Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 9 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,15 @@ npm run setup
<img width="100%" alt="booking-screen" src="./public/assets/02.gif">

**Currently Supporting:**
- OpenAI API: Get OpenAI API Key [here](https://platform.openai.com/api-keys)
- Gemini API: Get Gemini API Key [here](https://aistudio.google.com/apikey)
- Local LLM Ollama & Whisper

- **OpenAI API**: Get your API Key [here](https://platform.openai.com/api-keys).
- **Gemini API**: Get your API Key [here](https://aistudio.google.com/apikey).
- **Local LLM (Ollama & Whisper).**
- **Anthropic API**: Get your API Key [here](https://console.anthropic.com/dashboard).
- **OpenRouter API (new)**: Supports models like **Grok 4, Claude 4 Sonnet, Gemini 2.5 Flash, GPT-4.1, LLaMA 4 Maverick** using your **OpenRouter API key**.
Get your API key [here](https://openrouter.ai/settings/keys).

> Note: OpenRouter does not currently support native STT; use OpenAI or Gemini for speech-to-text.

### Liquid Glass Design (coming soon)

Expand Down
15 changes: 14 additions & 1 deletion src/features/common/ai/factory.js
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,18 @@ const PROVIDERS = {
{ id: 'gemini-live-2.5-flash-preview', name: 'Gemini Live 2.5 Flash' }
],
},
'openrouter': {
name: 'OpenRouter',
handler: () => require("./providers/openrouter"),
llmModels: [
{ id: 'x-ai/grok-4', name: 'xAI Grok 4'},
{ id: 'anthropic/claude-sonnet-4', name: 'Anthropic Claude Sonnet(OpenRouter)' },
Comment thread
PhantomInTheWire marked this conversation as resolved.
{ id: 'google/gemini-2.5-flash', name: 'Google Gemini 2.5 Flash(OpenRouter)' },
{ id: 'openai/gpt-4.1', name: 'OpenAI GPT-4.1(OpenRouter)' },
{ id: 'meta-llama/llama-4-maverick', name: 'Meta Llama 4 Maverick(OpenRouter)' },
],
sttModels: [],
},
'anthropic': {
name: 'Anthropic',
handler: () => require("./providers/anthropic"),
Expand Down Expand Up @@ -158,7 +170,8 @@ function getProviderClass(providerId) {
'gemini': 'GeminiProvider',
'deepgram': 'DeepgramProvider',
'ollama': 'OllamaProvider',
'whisper': 'WhisperProvider'
'whisper': 'WhisperProvider',
'openrouter': 'OpenRouterProvider'
};

const className = classNameMap[actualProviderId];
Expand Down
191 changes: 191 additions & 0 deletions src/features/common/ai/providers/openrouter.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,191 @@
const OpenAI = require('openai');

const OPENROUTER_BASE_URL = 'https://openrouter.ai/api/v1';

class OpenRouterProvider {
static async validateApiKey(key) {
if (!key || typeof key !== 'string' || !key.startsWith('sk-or-')) {
return { success: false, error: 'Invalid OpenRouter API key format.' };
}

try {
const response = await fetch(`${OPENROUTER_BASE_URL}/models`, {
headers: { 'Authorization': `Bearer ${key}` }
});

if (response.ok) {
return { success: true };
} else {
const errorData = await response.json().catch(() => ({}));
const message = errorData.error?.message || `Validation failed with status: ${response.status}`;
return { success: false, error: message };
}
} catch (error) {
console.error(`[OpenRouterProvider] Network error during key validation:`, error);
return { success: false, error: 'A network error occurred during validation.' };
}
}
}

/**
* Creates an OpenRouter STT session
* Note: OpenRouter doesn't have native real-time STT, so this is a placeholder
* @param {object} opts - Configuration options
* @param {string} opts.apiKey - OpenRouter API key
* @param {string} [opts.language='en'] - Language code
* @param {object} [opts.callbacks] - Event callbacks
* @returns {Promise<object>} STT session placeholder
*/
async function createSTT({ apiKey, language = "en", callbacks = {}, ...config }) {
console.warn("[OpenRouter] STT not natively supported. Consider using OpenAI or Gemini for STT.")

// Return a mock STT session that doesn't actually do anything
return {
sendRealtimeInput: async (audioData) => {
console.warn("[OpenRouter] STT sendRealtimeInput called but not implemented")
},
close: async () => {
console.log("[OpenRouter] STT session closed")
},
}
}

/**
* Creates an OpenRouter LLM instance
* @param {object} opts - Configuration options
* @param {string} opts.apiKey - OpenRouter API key
* @param {string} [opts.model='x-ai/grok-4'] - Model name
* @param {number} [opts.temperature=0.7] - Temperature
* @param {number} [opts.maxTokens=2048] - Max tokens
* @returns {object} LLM instance
*/
function createLLM({ apiKey, model = 'x-ai/grok-4', temperature = 0.7, maxTokens = 2048, ...config }) {
const client = new OpenAI({ apiKey, baseURL: OPENROUTER_BASE_URL });

const callApi = async (messages) => {
try {
const response = await client.chat.completions.create({
model: model,
messages: messages,
temperature: temperature,
max_tokens: maxTokens
});

if (!response.choices || response.choices.length === 0) {
throw new Error('No response choices returned from OpenRouter API');
}

return {
content: response.choices[0].message.content?.trim() || '',
raw: response
};
} catch (error) {
console.error('[OpenRouter] API call failed:', error);
throw new Error(`OpenRouter API error: ${error.message}`);
}
};

return {
generateContent: async (parts) => {
const messages = [];
let systemPrompt = '';
let userContent = [];

for (const part of parts) {
if (typeof part === 'string') {
if (
systemPrompt === '' &&
(
part.toLowerCase().startsWith('you are') ||
part.toLowerCase().includes('system:')
)
) {
systemPrompt = part;
} else {
userContent.push({ type: 'text', text: part });
}
} else if (part.inlineData) {
userContent.push({
type: 'image_url',
image_url: { url: `data:${part.inlineData.mimeType};base64,${part.inlineData.data}` }
});
}
}

if (systemPrompt) messages.push({ role: 'system', content: systemPrompt });
if (userContent.length > 0) messages.push({ role: 'user', content: userContent });

const result = await callApi(messages);

return {
response: {
text: () => result.content
},
raw: result.raw
};
},

// For compatibility with chat-style interfaces
chat: async (messages) => {
return await callApi(messages);
}
};
}

/**
* Creates an OpenRouter streaming LLM instance
* @param {object} opts - Configuration options
* @param {string} opts.apiKey - OpenRouter API key
* @param {string} [opts.model='x-ai/grok-4'] - Model name
* @param {number} [opts.temperature=0.7] - Temperature
* @param {number} [opts.maxTokens=2048] - Max tokens
* @returns {object} Streaming LLM instance
*/
function createStreamingLLM({ apiKey, model = 'x-ai/grok-4', temperature = 0.7, maxTokens = 2048, ...config }) {
return {
streamChat: async (messages) => {
console.log("[OpenRouter Provider] Starting Streaming request")

if (!messages || !Array.isArray(messages) || messages.length === 0) {
throw new Error('Messages array is required and cannot be empty')
}

const fetchUrl = `${OPENROUTER_BASE_URL}/chat/completions`;
const headers = {
Authorization: `Bearer ${apiKey}`,
'Content-Type': 'application/json',
};

try {
const response = await fetch(fetchUrl, {
method: 'POST',
headers,
body: JSON.stringify({
model,
messages,
temperature,
max_tokens: maxTokens,
stream: true,
}),
});

if (!response.ok) {
const errorText = await response.text().catch(() => 'Unknown error');
throw new Error(`OpenRouter API error: ${response.status} ${response.statusText}. ${errorText}`);
}

return response;
} catch (error) {
console.error('[OpenRouter] Streaming request failed:', error);
throw error;
}
}
};
}

module.exports = {
OpenRouterProvider,
createSTT,
createLLM,
createStreamingLLM
};