Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 55 additions & 0 deletions src/api/providers/__tests__/openrouter.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,42 @@ describe("OpenRouterHandler", () => {
const generator = handler.createMessage("test", [])
await expect(generator.next()).rejects.toThrow("OpenRouter API Error 500: API Error")
})

it("handles model not found errors with user-friendly message", async () => {
const handler = new OpenRouterHandler(mockOptions)
const mockStream = {
async *[Symbol.asyncIterator]() {
yield { error: { message: "Model not found", code: 404 } }
},
}

const mockCreate = vitest.fn().mockResolvedValue(mockStream)
;(OpenAI as any).prototype.chat = {
completions: { create: mockCreate },
} as any

const generator = handler.createMessage("test", [])
await expect(generator.next()).rejects.toThrow(
`Model "${mockOptions.openRouterModelId}" is not available on OpenRouter`,
)
})

it("handles rate limit errors with user-friendly message", async () => {
const handler = new OpenRouterHandler(mockOptions)
const mockStream = {
async *[Symbol.asyncIterator]() {
yield { error: { message: "Rate limit exceeded", code: 429 } }
},
}

const mockCreate = vitest.fn().mockResolvedValue(mockStream)
;(OpenAI as any).prototype.chat = {
completions: { create: mockCreate },
} as any

const generator = handler.createMessage("test", [])
await expect(generator.next()).rejects.toThrow("OpenRouter rate limit exceeded")
})
})

describe("completePrompt", () => {
Expand Down Expand Up @@ -308,6 +344,25 @@ describe("OpenRouterHandler", () => {
await expect(handler.completePrompt("test prompt")).rejects.toThrow("OpenRouter API Error 500: API Error")
})

it("handles model not found errors in completePrompt", async () => {
const handler = new OpenRouterHandler(mockOptions)
const mockError = {
error: {
message: "Invalid model",
code: 404,
},
}

const mockCreate = vitest.fn().mockResolvedValue(mockError)
;(OpenAI as any).prototype.chat = {
completions: { create: mockCreate },
} as any

await expect(handler.completePrompt("test prompt")).rejects.toThrow(
`Model "${mockOptions.openRouterModelId}" is not available on OpenRouter`,
)
})

it("handles unexpected errors", async () => {
const handler = new OpenRouterHandler(mockOptions)
const mockCreate = vitest.fn().mockRejectedValue(new Error("Unexpected error"))
Expand Down
136 changes: 105 additions & 31 deletions src/api/providers/openrouter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -137,39 +137,85 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
const stream = await this.client.chat.completions.create(completionParams)

let lastUsage: CompletionUsage | undefined = undefined

for await (const chunk of stream) {
// OpenRouter returns an error object instead of the OpenAI SDK throwing an error.
if ("error" in chunk) {
const error = chunk.error as { message?: string; code?: number }
console.error(`OpenRouter API Error: ${error?.code} - ${error?.message}`)
throw new Error(`OpenRouter API Error ${error?.code}: ${error?.message}`)
}

const delta = chunk.choices[0]?.delta

if ("reasoning" in delta && delta.reasoning && typeof delta.reasoning === "string") {
yield { type: "reasoning", text: delta.reasoning }
}

if (delta?.content) {
yield { type: "text", text: delta.content }
let lastChunkTime = Date.now()
const CHUNK_TIMEOUT_MS = 30000 // 30 seconds timeout between chunks

// Set up a timeout check
const timeoutCheck = setInterval(() => {
const timeSinceLastChunk = Date.now() - lastChunkTime
if (timeSinceLastChunk > CHUNK_TIMEOUT_MS) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The timeout checker clears the interval when no chunk is received for 30 seconds, but it only logs an error and doesn’t abort the stream. This may leave the async generator hanging indefinitely. Consider throwing an error (or using an AbortController) to cancel the stream when a timeout occurs.

clearInterval(timeoutCheck)
console.error(`OpenRouter stream timeout: No chunks received for ${CHUNK_TIMEOUT_MS}ms`, {
modelId,
timeSinceLastChunk,
})
}

if (chunk.usage) {
lastUsage = chunk.usage
}, 5000) // Check every 5 seconds

try {
for await (const chunk of stream) {
lastChunkTime = Date.now() // Reset timeout on each chunk
// OpenRouter returns an error object instead of the OpenAI SDK throwing an error.
if ("error" in chunk) {
const error = chunk.error as { message?: string; code?: number; type?: string }
const errorMessage = error?.message || "Unknown error"
const errorCode = error?.code || "unknown"
const errorType = error?.type || "unknown"

// Log detailed error information
console.error(`OpenRouter API Error:`, {
code: errorCode,
type: errorType,
message: errorMessage,
modelId,
chunk: JSON.stringify(chunk),
})

// Provide more specific error messages for common issues
let userFriendlyMessage = `OpenRouter API Error ${errorCode}: ${errorMessage}`

if (
errorMessage.toLowerCase().includes("model not found") ||
errorMessage.toLowerCase().includes("invalid model") ||
errorCode === 404
) {
userFriendlyMessage = `Model "${modelId}" is not available on OpenRouter. Please check if the model ID is correct and if you have access to this model.`
} else if (errorMessage.toLowerCase().includes("rate limit")) {
userFriendlyMessage = `OpenRouter rate limit exceeded. Please wait a moment and try again.`
} else if (errorMessage.toLowerCase().includes("unauthorized") || errorCode === 401) {
userFriendlyMessage = `OpenRouter authentication failed. Please check your API key.`
}

throw new Error(userFriendlyMessage)
}

const delta = chunk.choices[0]?.delta

if ("reasoning" in delta && delta.reasoning && typeof delta.reasoning === "string") {
yield { type: "reasoning", text: delta.reasoning }
}

if (delta?.content) {
yield { type: "text", text: delta.content }
}

if (chunk.usage) {
lastUsage = chunk.usage
}
}
}

if (lastUsage) {
yield {
type: "usage",
inputTokens: lastUsage.prompt_tokens || 0,
outputTokens: lastUsage.completion_tokens || 0,
cacheReadTokens: lastUsage.prompt_tokens_details?.cached_tokens,
reasoningTokens: lastUsage.completion_tokens_details?.reasoning_tokens,
totalCost: (lastUsage.cost_details?.upstream_inference_cost || 0) + (lastUsage.cost || 0),
if (lastUsage) {
yield {
type: "usage",
inputTokens: lastUsage.prompt_tokens || 0,
outputTokens: lastUsage.completion_tokens || 0,
cacheReadTokens: lastUsage.prompt_tokens_details?.cached_tokens,
reasoningTokens: lastUsage.completion_tokens_details?.reasoning_tokens,
totalCost: (lastUsage.cost_details?.upstream_inference_cost || 0) + (lastUsage.cost || 0),
}
}
} finally {
clearInterval(timeoutCheck)
}
}

Expand Down Expand Up @@ -235,8 +281,36 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
const response = await this.client.chat.completions.create(completionParams)

if ("error" in response) {
const error = response.error as { message?: string; code?: number }
throw new Error(`OpenRouter API Error ${error?.code}: ${error?.message}`)
const error = response.error as { message?: string; code?: number; type?: string }
const errorMessage = error?.message || "Unknown error"
const errorCode = error?.code || "unknown"
const errorType = error?.type || "unknown"

// Log detailed error information
console.error(`OpenRouter API Error:`, {
code: errorCode,
type: errorType,
message: errorMessage,
modelId,
response: JSON.stringify(response),
})

// Provide more specific error messages for common issues
let userFriendlyMessage = `OpenRouter API Error ${errorCode}: ${errorMessage}`

if (
errorMessage.toLowerCase().includes("model not found") ||
errorMessage.toLowerCase().includes("invalid model") ||
errorCode === 404
) {
userFriendlyMessage = `Model "${modelId}" is not available on OpenRouter. Please check if the model ID is correct and if you have access to this model.`
} else if (errorMessage.toLowerCase().includes("rate limit")) {
userFriendlyMessage = `OpenRouter rate limit exceeded. Please wait a moment and try again.`
} else if (errorMessage.toLowerCase().includes("unauthorized") || errorCode === 401) {
userFriendlyMessage = `OpenRouter authentication failed. Please check your API key.`
}

throw new Error(userFriendlyMessage)
}

const completion = response as OpenAI.Chat.ChatCompletion
Expand Down