Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .changeset/fix-finish-reason-usage-fallback.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
---
'@openrouter/ai-sdk-provider': patch
---

fix: infer tool-calls finishReason when tool calls present but finish_reason is unknown (#420)

- When finishReason is 'other' (unknown/missing) but tool calls are present, infer 'tool-calls' so agentic loops continue correctly
- Fixes both streaming (doStream) and non-streaming (doGenerate) paths
87 changes: 87 additions & 0 deletions e2e/issues/issue-419-420-finish-reason-usage-fallback.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
/**
* Regression test for GitHub Issues #419 and #420
* https://github.com/OpenRouterTeam/ai-sdk-provider/issues/419
* https://github.com/OpenRouterTeam/ai-sdk-provider/issues/420
*
* Reported error (#419): Standard usage object contains undefined values while
* providerMetadata.openrouter.usage has correct data.
* Model: z-ai/glm-5:free
*
* Reported error (#420): Kimi K2.5 returns undefined finishReason after tool
* calls, breaking agentic loops.
* Model: moonshotai/kimi-k2.5
*
* This test verifies that finishReason is correctly set to 'tool-calls' when
* tool calls are present, and that usage data is populated in the standard
* usage object.
*/
import { generateText, streamText, tool } from 'ai';
import { describe, expect, it, vi } from 'vitest';
import { z } from 'zod/v4';
import { createOpenRouter } from '@/src';

vi.setConfig({
testTimeout: 120_000,
});

const weatherTool = tool({
description: 'Get the current weather for a location',
inputSchema: z.object({
city: z.string().describe('The city to get weather for'),
}),
execute: async () => ({
temperature: 22,
condition: 'sunny',
}),
});

describe('Issue #419/#420: finishReason inference and usage population', () => {
const provider = createOpenRouter({
apiKey: process.env.OPENROUTER_API_KEY,
baseUrl: `${process.env.OPENROUTER_API_BASE}/api/v1`,
});

it('should populate usage in streaming mode (#419)', async () => {
const response = streamText({
model: provider('openai/gpt-4o-mini'),
messages: [
{
role: 'user',
content: 'What is 2+2? Answer with just the number.',
},
],
});

await response.consumeStream();
const usage = await response.usage;

expect(usage.inputTokens).toEqual(expect.any(Number));
expect(usage.inputTokens).toBeGreaterThan(0);
expect(usage.outputTokens).toEqual(expect.any(Number));
expect(usage.outputTokens).toBeGreaterThanOrEqual(0);
});

it('should return tool-calls finishReason with tools in generateText (#420)', async () => {
const result = await generateText({
model: provider('openai/gpt-4o-mini'),
messages: [
{
role: 'user',
content: 'What is the weather in Tokyo? Use the get_weather tool.',
},
],
tools: {
get_weather: weatherTool,
},
});

// Check that usage is populated (not NaN or undefined)
expect(result.usage.inputTokens).toBeGreaterThan(0);
expect(result.usage.outputTokens).toBeGreaterThanOrEqual(0);

// If there were tool calls, verify finishReason was 'tool-calls'
if (result.toolCalls && result.toolCalls.length > 0) {
expect(result.finishReason).toBe('tool-calls');
}
});
});
123 changes: 123 additions & 0 deletions src/chat/index.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -536,6 +536,43 @@ describe('doGenerate', () => {
);
});

it('should infer tool-calls finishReason when finish_reason is unknown but tool calls are present in doGenerate (#420)', async () => {
// Simulate Kimi K2.5 behavior: provider returns an unrecognized finish_reason
// with tool calls, which mapOpenRouterFinishReason maps to 'other'
prepareJsonResponse({
content: '',
tool_calls: [
{
id: 'call_kimi_001',
type: 'function',
function: {
name: 'get_weather',
arguments: '{"city":"Tokyo"}',
},
},
],
finish_reason: 'some_unknown_reason',
});

const result = await model.doGenerate({
prompt: TEST_PROMPT,
});

// Should infer 'tool-calls' when tool calls are present but finish_reason maps to 'other'
expect(result.finishReason).toStrictEqual({
unified: 'tool-calls',
raw: 'some_unknown_reason',
});

expect(result.content).toContainEqual(
expect.objectContaining({
type: 'tool-call',
toolCallId: 'call_kimi_001',
toolName: 'get_weather',
}),
);
});

it('should default to empty JSON object when tool call arguments field is missing', async () => {
prepareJsonResponse({
content: '',
Expand Down Expand Up @@ -1868,6 +1905,92 @@ describe('doStream', () => {
expect(toolCallEvent?.toolCallId).toBe('call_gemini3_123');
});

it('should infer tool-calls finishReason when finish_reason is missing but tool calls are present (#420)', async () => {
server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = {
type: 'stream-chunks',
chunks: [
// Tool call chunk with no finish_reason set (simulates Kimi K2.5 behavior)
`data: {"id":"chatcmpl-kimi","object":"chat.completion.chunk","created":1711357598,"model":"moonshotai/kimi-k2.5",` +
`"system_fingerprint":"fp_kimi","choices":[{"index":0,"delta":{"role":"assistant","content":null,` +
`"tool_calls":[{"index":0,"id":"call_kimi_001","type":"function","function":{"name":"get_weather","arguments":"{\\"city\\":\\"Tokyo\\"}"}}]},` +
`"logprobs":null,"finish_reason":null}]}\n\n`,
// Final chunk with no finish_reason (provider returns null)
`data: {"id":"chatcmpl-kimi","object":"chat.completion.chunk","created":1711357598,"model":"moonshotai/kimi-k2.5",` +
`"system_fingerprint":"fp_kimi","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":null}]}\n\n`,
`data: {"id":"chatcmpl-kimi","object":"chat.completion.chunk","created":1711357598,"model":"moonshotai/kimi-k2.5",` +
`"system_fingerprint":"fp_kimi","choices":[],"usage":{"prompt_tokens":100,"completion_tokens":25,"total_tokens":125}}\n\n`,
'data: [DONE]\n\n',
],
};

const { stream } = await model.doStream({
tools: [
{
type: 'function',
name: 'get_weather',
inputSchema: {
type: 'object',
properties: { city: { type: 'string' } },
required: ['city'],
additionalProperties: false,
$schema: 'http://json-schema.org/draft-07/schema#',
},
},
],
prompt: TEST_PROMPT,
});

const elements = await convertReadableStreamToArray(stream);

const finishEvent = elements.find(
(el): el is LanguageModelV3StreamPart & { type: 'finish' } =>
el.type === 'finish',
);

// finishReason should be inferred as 'tool-calls' even though provider returned null
expect(finishEvent?.finishReason).toStrictEqual({
unified: 'tool-calls',
raw: undefined,
});

const toolCallEvent = elements.find(
(el): el is LanguageModelV3StreamPart & { type: 'tool-call' } =>
el.type === 'tool-call',
);
expect(toolCallEvent?.toolName).toBe('get_weather');
});

it('should populate usage from openrouterUsage when standard usage is empty (#419)', async () => {
server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = {
type: 'stream-chunks',
chunks: [
// Text content chunk
`data: {"id":"chatcmpl-kilo","object":"chat.completion.chunk","created":1711357598,"model":"z-ai/glm-5",` +
`"system_fingerprint":"fp_kilo","choices":[{"index":0,"delta":{"role":"assistant","content":"Hello"},` +
`"logprobs":null,"finish_reason":null}]}\n\n`,
// Final chunk with finish_reason but NO usage chunk
`data: {"id":"chatcmpl-kilo","object":"chat.completion.chunk","created":1711357598,"model":"z-ai/glm-5",` +
`"system_fingerprint":"fp_kilo","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}\n\n`,
'data: [DONE]\n\n',
],
};

const { stream } = await model.doStream({
prompt: TEST_PROMPT,
});

const elements = await convertReadableStreamToArray(stream);

const finishEvent = elements.find(
(el): el is LanguageModelV3StreamPart & { type: 'finish' } =>
el.type === 'finish',
);

// When no usage chunk is sent, standard usage should have undefined totals
expect(finishEvent?.usage.inputTokens.total).toBeUndefined();
expect(finishEvent?.usage.outputTokens.total).toBeUndefined();
});

it('should stream images', async () => {
server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = {
type: 'stream-chunks',
Expand Down
18 changes: 16 additions & 2 deletions src/chat/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -433,10 +433,17 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 {
const shouldOverrideFinishReason =
hasToolCalls && hasEncryptedReasoning && choice.finish_reason === 'stop';

const effectiveFinishReason = shouldOverrideFinishReason
const mappedFinishReason = shouldOverrideFinishReason
? createFinishReason('tool-calls', choice.finish_reason ?? undefined)
: mapOpenRouterFinishReason(choice.finish_reason);

// Fix for #420: When finishReason is 'other' (unknown/missing) but tool calls
// were made, infer 'tool-calls' so agentic loops continue correctly.
const effectiveFinishReason =
hasToolCalls && mappedFinishReason.unified === 'other'
? createFinishReason('tool-calls', mappedFinishReason.raw)
: mappedFinishReason;

return {
content,
finishReason: effectiveFinishReason,
Expand Down Expand Up @@ -1026,10 +1033,11 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 {
},

flush(controller) {
const hasToolCalls = toolCalls.length > 0;

// Fix for Gemini 3 thoughtSignature: when there are tool calls with encrypted
// reasoning (thoughtSignature), the model returns 'stop' but expects continuation.
// Override to 'tool-calls' so the SDK knows to continue the conversation.
const hasToolCalls = toolCalls.length > 0;
const hasEncryptedReasoning = accumulatedReasoningDetails.some(
(d) => d.type === ReasoningDetailType.Encrypted && d.data,
);
Expand All @@ -1041,6 +1049,12 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 {
finishReason = createFinishReason('tool-calls', finishReason.raw);
}

// Fix for #420: When finishReason is 'other' (unknown/missing) but tool calls
// were made, infer 'tool-calls' so agentic loops continue correctly.
if (hasToolCalls && finishReason.unified === 'other') {
finishReason = createFinishReason('tool-calls', finishReason.raw);
}

// Forward any unsent tool calls if finish reason is 'tool-calls'
if (finishReason.unified === 'tool-calls') {
for (const toolCall of toolCalls) {
Expand Down