Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .changeset/fix-reasoning-end-signature.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
"@openrouter/ai-sdk-provider": patch
---

fix: include accumulated reasoning_details with signature in reasoning-end stream event

When streaming a text-only response (no tool calls) with reasoning enabled, the reasoning-end event now includes the accumulated reasoning_details (with signature) in providerMetadata. This fixes multi-turn conversation failures with Anthropic models where the signature was lost, causing "Invalid signature in thinking block" errors on subsequent turns.
132 changes: 132 additions & 0 deletions e2e/issues/issue-394-reasoning-end-signature.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
/**
* Regression test for GitHub PR #394
* https://github.com/OpenRouterTeam/ai-sdk-provider/pull/394
*
* Reported error: Multi-turn conversation failure with Anthropic models when
* the first turn is a text-only response (no tool calls) with reasoning enabled.
* The reasoning-end stream event was emitted without providerMetadata, causing
* the Anthropic signature to be lost. On the next turn, Anthropic rejects with
* "Invalid signature in thinking block".
*/
import { streamText } from 'ai';
import { describe, expect, it, vi } from 'vitest';
import { createOpenRouter } from '@/src';

vi.setConfig({
testTimeout: 120_000,
});

describe('Issue #394: reasoning-end should include accumulated reasoning_details with signature', () => {
const provider = createOpenRouter({
apiKey: process.env.OPENROUTER_API_KEY,
baseUrl: `${process.env.OPENROUTER_API_BASE}/api/v1`,
});

it('should include reasoning_details with signature in reasoning-end providerMetadata for text-only streaming response', async () => {
const model = provider('anthropic/claude-sonnet-4');

const stream = streamText({
model,
prompt: 'What is 2+2? Answer briefly.',
providerOptions: {
openrouter: {
reasoning: 'enabled',
},
},
});

let hasReasoningStart = false;
let hasReasoningEnd = false;
let reasoningEndProviderMetadata: Record<string, unknown> | undefined;
let reasoning = '';

for await (const chunk of stream.fullStream) {
if (chunk.type === 'reasoning-start') {
hasReasoningStart = true;
}
if (chunk.type === 'reasoning-delta') {
reasoning += chunk.text;
}
if (chunk.type === 'reasoning-end') {
hasReasoningEnd = true;
reasoningEndProviderMetadata = chunk.providerMetadata as
| Record<string, unknown>
| undefined;
}
}

expect(hasReasoningStart).toBe(true);
expect(hasReasoningEnd).toBe(true);
expect(reasoning.length).toBeGreaterThan(0);

expect(reasoningEndProviderMetadata).toBeDefined();

const openrouterMeta = reasoningEndProviderMetadata?.openrouter as
| Record<string, unknown>
| undefined;
expect(openrouterMeta).toBeDefined();

const reasoningDetails = openrouterMeta?.reasoning_details as
| Array<Record<string, unknown>>
| undefined;
expect(reasoningDetails).toBeDefined();
expect(reasoningDetails!.length).toBeGreaterThan(0);

const textDetail = reasoningDetails!.find(
(d) => d.type === 'reasoning.text',
);
expect(textDetail).toBeDefined();
expect(textDetail!.signature).toBeDefined();
expect(typeof textDetail!.signature).toBe('string');
expect((textDetail!.signature as string).length).toBeGreaterThan(0);
});

it('should produce valid reasoning parts for multi-turn continuation', async () => {
const model = provider('anthropic/claude-sonnet-4');

const result = await streamText({
model,
prompt: 'What is the capital of France? Answer in one word.',
providerOptions: {
openrouter: {
reasoning: 'enabled',
},
},
});

const response = await result.response;
const messages = response.messages;

expect(messages.length).toBeGreaterThan(0);

const assistantMessage = messages.find((m) => m.role === 'assistant');
expect(assistantMessage).toBeDefined();

const content = assistantMessage?.content;
if (typeof content === 'string') {
return;
}

const reasoningParts = content?.filter(
(p: { type: string }) => p.type === 'reasoning',
);

if (reasoningParts && reasoningParts.length > 0) {
for (const part of reasoningParts) {
if ('providerMetadata' in part) {
expect(part.providerMetadata).toBeDefined();

const openrouterMeta = (
part as { providerMetadata?: Record<string, unknown> }
).providerMetadata?.openrouter as Record<string, unknown> | undefined;
expect(openrouterMeta).toBeDefined();

const details = openrouterMeta?.reasoning_details as
| Array<Record<string, unknown>>
| undefined;
expect(details).toBeDefined();
}
}
}
});
});
98 changes: 98 additions & 0 deletions src/chat/index.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2308,6 +2308,104 @@ describe('doStream', () => {
expect(openrouterMetadata?.annotations?.[0]?.file.hash).toBe('hash1');
expect(openrouterMetadata?.annotations?.[1]?.file.hash).toBe('hash2');
});

it('should include accumulated reasoning_details with signature in reasoning-end providerMetadata for text-only responses', async () => {
// This test reproduces the Anthropic multi-turn signature bug:
// When streaming a text-only response (no tool calls), the signature arrives
// in the LAST reasoning delta. The reasoning-start event gets the FIRST delta's
// metadata (no signature). The AI SDK uses reasoning-end's providerMetadata to
// update the reasoning part's providerMetadata. So the provider MUST include
// the accumulated reasoning_details (with signature) in the reasoning-end event.
// Without this fix, the saved reasoning part has no signature, and the next turn
// fails with "Invalid signature in thinking block".
server.urls['https://openrouter.ai/api/v1/chat/completions']!.response = {
type: 'stream-chunks',
chunks: [
// First chunk: reasoning starts, NO signature yet
`data: {"id":"chatcmpl-sig-test","object":"chat.completion.chunk","created":1711357598,"model":"anthropic/claude-opus-4.6",` +
`"system_fingerprint":"fp_test","choices":[{"index":0,"delta":{"role":"assistant","content":"",` +
`"reasoning_details":[{"type":"${ReasoningDetailType.Text}","text":"Let me think about this","index":0,"format":"anthropic-claude-v1"}]},` +
`"logprobs":null,"finish_reason":null}]}\n\n`,
// Second chunk: more reasoning text, still no signature
`data: {"id":"chatcmpl-sig-test","object":"chat.completion.chunk","created":1711357598,"model":"anthropic/claude-opus-4.6",` +
`"system_fingerprint":"fp_test","choices":[{"index":0,"delta":{` +
`"reasoning_details":[{"type":"${ReasoningDetailType.Text}","text":" step by step.","index":0,"format":"anthropic-claude-v1"}]},` +
`"logprobs":null,"finish_reason":null}]}\n\n`,
// Third chunk: last reasoning delta WITH signature
`data: {"id":"chatcmpl-sig-test","object":"chat.completion.chunk","created":1711357598,"model":"anthropic/claude-opus-4.6",` +
`"system_fingerprint":"fp_test","choices":[{"index":0,"delta":{` +
`"reasoning_details":[{"type":"${ReasoningDetailType.Text}","text":" Done.","index":0,"format":"anthropic-claude-v1","signature":"erX9OCAqSEO90HsfvNlBn5J3BQ9cEI/Hg2wHFo5iA8w3L+a"}]},` +
`"logprobs":null,"finish_reason":null}]}\n\n`,
// Fourth chunk: text content starts (reasoning ends)
`data: {"id":"chatcmpl-sig-test","object":"chat.completion.chunk","created":1711357598,"model":"anthropic/claude-opus-4.6",` +
`"system_fingerprint":"fp_test","choices":[{"index":0,"delta":{"content":"Hello! How can I help?"},` +
`"logprobs":null,"finish_reason":null}]}\n\n`,
// Finish chunk
`data: {"id":"chatcmpl-sig-test","object":"chat.completion.chunk","created":1711357598,"model":"anthropic/claude-opus-4.6",` +
`"system_fingerprint":"fp_test","choices":[{"index":0,"delta":{},` +
`"logprobs":null,"finish_reason":"stop"}]}\n\n`,
`data: {"id":"chatcmpl-sig-test","object":"chat.completion.chunk","created":1711357598,"model":"anthropic/claude-opus-4.6",` +
`"system_fingerprint":"fp_test","choices":[],"usage":{"prompt_tokens":100,"completion_tokens":50,"total_tokens":150}}\n\n`,
'data: [DONE]\n\n',
],
};

const { stream } = await model.doStream({
prompt: TEST_PROMPT,
});

const elements = await convertReadableStreamToArray(stream);

// Find reasoning-end event
const reasoningEnd = elements.find(
(
el,
): el is Extract<LanguageModelV3StreamPart, { type: 'reasoning-end' }> =>
el.type === 'reasoning-end',
);

expect(reasoningEnd).toBeDefined();

// The reasoning-end event MUST have providerMetadata with the full accumulated
// reasoning_details including the signature from the last delta.
// This is critical because the AI SDK updates the reasoning part's providerMetadata
// from reasoning-end, and the signature is needed for multi-turn conversations.
expect(reasoningEnd?.providerMetadata).toBeDefined();

const reasoningDetails = (
reasoningEnd?.providerMetadata?.openrouter as {
reasoning_details: ReasoningDetailUnion[];
}
)?.reasoning_details;

expect(reasoningDetails).toBeDefined();
expect(reasoningDetails).toHaveLength(1);
expect(reasoningDetails[0]).toMatchObject({
type: ReasoningDetailType.Text,
text: 'Let me think about this step by step. Done.',
signature: 'erX9OCAqSEO90HsfvNlBn5J3BQ9cEI/Hg2wHFo5iA8w3L+a',
format: 'anthropic-claude-v1',
});

// Also verify that the finish event has the same accumulated data
const finishEvent = elements.find(
(el): el is Extract<LanguageModelV3StreamPart, { type: 'finish' }> =>
el.type === 'finish',
);

const finishReasoningDetails = (
finishEvent?.providerMetadata?.openrouter as {
reasoning_details: ReasoningDetailUnion[];
}
)?.reasoning_details;

expect(finishReasoningDetails).toHaveLength(1);
expect(finishReasoningDetails[0]).toMatchObject({
type: ReasoningDetailType.Text,
text: 'Let me think about this step by step. Done.',
signature: 'erX9OCAqSEO90HsfvNlBn5J3BQ9cEI/Hg2wHFo5iA8w3L+a',
});
});
});

describe('debug settings', () => {
Expand Down
22 changes: 22 additions & 0 deletions src/chat/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -822,6 +822,18 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 {
controller.enqueue({
type: 'reasoning-end',
id: reasoningId || generateId(),
// Include accumulated reasoning_details so the AI SDK can update
// the reasoning part's providerMetadata with the correct signature.
// The signature typically arrives in the last reasoning delta,
// but reasoning-start only carries the first delta's metadata.
providerMetadata:
accumulatedReasoningDetails.length > 0
? {
openrouter: {
reasoning_details: accumulatedReasoningDetails,
},
}
: undefined,
});
reasoningStarted = false; // Mark as ended so we don't end it again in flush
}
Expand Down Expand Up @@ -1096,6 +1108,16 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 {
controller.enqueue({
type: 'reasoning-end',
id: reasoningId || generateId(),
// Include accumulated reasoning_details so the AI SDK can update
// the reasoning part's providerMetadata with the correct signature.
providerMetadata:
accumulatedReasoningDetails.length > 0
? {
openrouter: {
reasoning_details: accumulatedReasoningDetails,
},
}
: undefined,
});
}
if (textStarted) {
Expand Down