/**
* @fileoverview Library for interacting with the OpenRouter API.
* Provides askAi, getCompletion, and getTokenUsage functions.
*/
const https = require('https');
/**
* Sends a chat completion request to the OpenRouter API.
*
* @param {string} model - The model name (e.g., 'openai/gpt-3.5-turbo', 'google/gemini-pro', etc).
* @param {string} token - OpenRouter API key (starts with 'org-...').
* @param {string} systemPrompt - System message for context.
* @param {string} message - User message.
* @returns {Promise<Object>} Resolves with the full JSON response.
*/
async function askAi(model, token, systemPrompt, message) {
const data = JSON.stringify({
model: model,
messages: [
{ role: "system", content: systemPrompt },
{ role: "user", content: message }
]
});
const options = {
hostname: 'openrouter.ai',
path: '/api/v1/chat/completions',
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${token}`,
'HTTP-Referer': 'https://github.com/JetsadaWijit/npmjs', // Replace with your domain for OpenRouter TOS compliance
'X-Title': 'Npmjs Library'
}
};
return new Promise((resolve, reject) => {
const req = https.request(options, (res) => {
let body = '';
res.on('data', (chunk) => { body += chunk; });
res.on('end', () => {
try {
resolve(JSON.parse(body));
} catch (err) {
reject(err);
}
});
});
req.on('error', (err) => {
reject(err);
});
req.write(data);
req.end();
});
}
/**
* Extracts the AI completion message from the API response.
*
* @param {Object} json - The full JSON response from OpenRouter.
* @returns {string} The completion text or empty string if not found.
*/
function getCompletion(json) {
if (json && json.choices && json.choices[0] && json.choices[0].message) {
return String(json.choices[0].message.content || '');
}
return '';
}
/**
* Extracts the total token usage from the API response.
*
* @param {Object} json - The full JSON response from OpenRouter.
* @returns {number} Total tokens used, or 0 if not found.
*/
function getTokenUsage(json) {
if (json && json.usage && typeof json.usage.total_tokens === 'number') {
return json.usage.total_tokens;
}
return 0;
}
module.exports = { askAi, getCompletion, getTokenUsage };
/*
@param model = String (e.g., 'openai/gpt-3.5-turbo')
@param token = String (your OpenRouter API key)
@param systemPrompt = String (system-level prompt, e.g., 'You are an assistant.')
@param message = String (user's prompt)
*/
const { askAi, getCompletion, getTokenUsage } = require('./openrouter');
const model = 'openai/gpt-3.5-turbo';
const token = 'org-xxx-your-openrouter-api-key';
const systemPrompt = 'You are an assistant.';
const message = 'Tell me a joke about cats.';
async function main() {
const res = await askAi(model, token, systemPrompt, message);
console.log('Completion:', getCompletion(res));
console.log('Token Usage:', getTokenUsage(res));
}
main();
The openrouter.js library offers a simple interface for interacting with the OpenRouter chat API.
Functions:
askAi(model, token, systemPrompt, message):
getCompletion(json):
getTokenUsage(json):
Parameters:
model: Model name (e.g., openai/gpt-3.5-turbo, google/gemini-pro).token: Your OpenRouter API key (starts with org-...).systemPrompt: Instruction for the system role.message: The user's message.
Output:
The askAi function returns a JSON object for use with getCompletion and getTokenUsage.