Skip to content

Commit 85d6633

Browse files
fix: exclude undefined cost from providerMetadata (#359)
* fix: exclude undefined cost from providerMetadata When cost is undefined, it should not be included in the providerMetadata object since undefined is not a valid JSON value. This was causing AI SDK validation failures when passing providerMetadata back into subsequent conversation turns. The fix uses conditional spreading/assignment to only include the cost field when it has a defined value, matching the existing pattern used for other optional fields like costDetails. Fixes #262 Co-Authored-By: Robert Yeakel <robert.yeakel@openrouter.ai> * fix: apply same cost fix to embedding model Ensure embedding model also uses conditional spreading for cost field to maintain consistency with chat and completion models. Co-Authored-By: Robert Yeakel <robert.yeakel@openrouter.ai> * fix: add providerMetadata to completion model doGenerate For consistency with chat model and streaming endpoints, the completion model's doGenerate method now returns providerMetadata with usage info. Co-Authored-By: Robert Yeakel <robert.yeakel@openrouter.ai> * fix: add schema validation and provider field to completion/embedding models - Completion doGenerate: use OpenRouterProviderMetadataSchema.parse() with provider field - Completion doStream: track and include provider field in providerMetadata - Completion schema: add provider field - Embedding model: include provider field and full usage structure (promptTokens, completionTokens, totalTokens) - Embedding schema: add provider field - Update embedding test expectations Co-Authored-By: Robert Yeakel <robert.yeakel@openrouter.ai> * fix: add schema validation to embedding model for consistency Co-Authored-By: Robert Yeakel <robert.yeakel@openrouter.ai> * test: add coverage for completion doGenerate providerMetadata Co-Authored-By: Robert Yeakel <robert.yeakel@openrouter.ai> --------- Co-authored-by: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com>
1 parent f2b78f5 commit 85d6633

File tree

9 files changed

+256
-21
lines changed

9 files changed

+256
-21
lines changed

.changeset/dark-pens-float.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"@openrouter/ai-sdk-provider": patch
3+
---
4+
5+
Fix undefined cost field in providerMetadata causing AI SDK validation failures

src/chat/index.test.ts

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -996,7 +996,6 @@ describe('doStream', () => {
996996
completionTokens: 227,
997997
promptTokens: 17,
998998
totalTokens: 244,
999-
cost: undefined,
1000999
},
10011000
},
10021001
},
@@ -1593,7 +1592,6 @@ describe('doStream', () => {
15931592
completionTokens: 17,
15941593
promptTokens: 53,
15951594
totalTokens: 70,
1596-
cost: undefined,
15971595
},
15981596
},
15991597
},
@@ -1712,7 +1710,6 @@ describe('doStream', () => {
17121710
completionTokens: 17,
17131711
promptTokens: 53,
17141712
totalTokens: 70,
1715-
cost: undefined,
17161713
},
17171714
},
17181715
},
@@ -1850,7 +1847,6 @@ describe('doStream', () => {
18501847
completionTokens: 17,
18511848
promptTokens: 53,
18521849
totalTokens: 70,
1853-
cost: undefined,
18541850
},
18551851
},
18561852
},

src/chat/index.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -485,7 +485,9 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 {
485485
totalTokens:
486486
(usageInfo.inputTokens.total ?? 0) +
487487
(usageInfo.outputTokens.total ?? 0),
488-
cost: response.usage?.cost,
488+
...(response.usage?.cost != null
489+
? { cost: response.usage.cost }
490+
: {}),
489491
...(response.usage?.prompt_tokens_details?.cached_tokens != null
490492
? {
491493
promptTokensDetails: {
@@ -701,7 +703,9 @@ export class OpenRouterChatLanguageModel implements LanguageModelV3 {
701703
};
702704
}
703705

704-
openrouterUsage.cost = value.usage.cost;
706+
if (value.usage.cost != null) {
707+
openrouterUsage.cost = value.usage.cost;
708+
}
705709
openrouterUsage.totalTokens = value.usage.total_tokens;
706710
const upstreamInferenceCost =
707711
value.usage.cost_details?.upstream_inference_cost;

src/completion/index.test.ts

Lines changed: 156 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,19 +70,31 @@ describe('doGenerate', () => {
7070
},
7171
logprobs = null,
7272
finish_reason = 'stop',
73+
provider,
7374
}: {
7475
content?: string;
7576
usage?: {
7677
prompt_tokens: number;
7778
total_tokens: number;
7879
completion_tokens: number;
80+
cost?: number;
81+
prompt_tokens_details?: {
82+
cached_tokens: number;
83+
};
84+
completion_tokens_details?: {
85+
reasoning_tokens: number;
86+
};
87+
cost_details?: {
88+
upstream_inference_cost: number;
89+
};
7990
};
8091
logprobs?: {
8192
tokens: string[];
8293
token_logprobs: number[];
8394
top_logprobs: Record<string, number>[];
8495
} | null;
8596
finish_reason?: string;
97+
provider?: string;
8698
}) {
8799
server.urls['https://openrouter.ai/api/v1/completions']!.response = {
88100
type: 'json-value',
@@ -91,6 +103,7 @@ describe('doGenerate', () => {
91103
object: 'text_completion',
92104
created: 1711363706,
93105
model: 'openai/gpt-3.5-turbo-instruct',
106+
provider,
94107
choices: [
95108
{
96109
text: content,
@@ -146,6 +159,149 @@ describe('doGenerate', () => {
146159
});
147160
});
148161

162+
it('should return providerMetadata with usage and provider', async () => {
163+
prepareJsonResponse({
164+
content: 'Hello',
165+
usage: {
166+
prompt_tokens: 10,
167+
total_tokens: 20,
168+
completion_tokens: 10,
169+
cost: 0.0001,
170+
},
171+
provider: 'openai',
172+
});
173+
174+
const { providerMetadata } = await model.doGenerate({
175+
prompt: TEST_PROMPT,
176+
});
177+
178+
expect(providerMetadata).toStrictEqual({
179+
openrouter: {
180+
provider: 'openai',
181+
usage: {
182+
promptTokens: 10,
183+
completionTokens: 10,
184+
totalTokens: 20,
185+
cost: 0.0001,
186+
},
187+
},
188+
});
189+
});
190+
191+
it('should omit cost from providerMetadata when undefined', async () => {
192+
prepareJsonResponse({
193+
content: 'Hello',
194+
usage: {
195+
prompt_tokens: 10,
196+
total_tokens: 20,
197+
completion_tokens: 10,
198+
},
199+
provider: 'google',
200+
});
201+
202+
const { providerMetadata } = await model.doGenerate({
203+
prompt: TEST_PROMPT,
204+
});
205+
206+
const openrouterMetadata = providerMetadata?.openrouter as {
207+
provider?: string;
208+
usage?: { cost?: number };
209+
};
210+
211+
expect(openrouterMetadata?.provider).toBe('google');
212+
expect(openrouterMetadata?.usage?.cost).toBeUndefined();
213+
expect('cost' in (openrouterMetadata?.usage ?? {})).toBe(false);
214+
});
215+
216+
it('should include cost: 0 in providerMetadata when cost is zero', async () => {
217+
prepareJsonResponse({
218+
content: 'Hello',
219+
usage: {
220+
prompt_tokens: 10,
221+
total_tokens: 20,
222+
completion_tokens: 10,
223+
cost: 0,
224+
},
225+
provider: 'openai',
226+
});
227+
228+
const { providerMetadata } = await model.doGenerate({
229+
prompt: TEST_PROMPT,
230+
});
231+
232+
const openrouterMetadata = providerMetadata?.openrouter as {
233+
usage?: { cost?: number };
234+
};
235+
236+
expect(openrouterMetadata?.usage?.cost).toBe(0);
237+
});
238+
239+
it('should default provider to empty string when not returned by API', async () => {
240+
prepareJsonResponse({
241+
content: 'Hello',
242+
usage: {
243+
prompt_tokens: 10,
244+
total_tokens: 20,
245+
completion_tokens: 10,
246+
},
247+
});
248+
249+
const { providerMetadata } = await model.doGenerate({
250+
prompt: TEST_PROMPT,
251+
});
252+
253+
const openrouterMetadata = providerMetadata?.openrouter as {
254+
provider?: string;
255+
};
256+
257+
expect(openrouterMetadata?.provider).toBe('');
258+
});
259+
260+
it('should include token details in providerMetadata when provided', async () => {
261+
prepareJsonResponse({
262+
content: 'Hello',
263+
usage: {
264+
prompt_tokens: 100,
265+
total_tokens: 150,
266+
completion_tokens: 50,
267+
prompt_tokens_details: {
268+
cached_tokens: 80,
269+
},
270+
completion_tokens_details: {
271+
reasoning_tokens: 20,
272+
},
273+
cost_details: {
274+
upstream_inference_cost: 0.005,
275+
},
276+
},
277+
provider: 'anthropic',
278+
});
279+
280+
const { providerMetadata } = await model.doGenerate({
281+
prompt: TEST_PROMPT,
282+
});
283+
284+
expect(providerMetadata).toStrictEqual({
285+
openrouter: {
286+
provider: 'anthropic',
287+
usage: {
288+
promptTokens: 100,
289+
completionTokens: 50,
290+
totalTokens: 150,
291+
promptTokensDetails: {
292+
cachedTokens: 80,
293+
},
294+
completionTokensDetails: {
295+
reasoningTokens: 20,
296+
},
297+
costDetails: {
298+
upstreamInferenceCost: 0.005,
299+
},
300+
},
301+
},
302+
});
303+
});
304+
149305
it('should extract logprobs', async () => {
150306
prepareJsonResponse({ logprobs: TEST_LOGPROBS });
151307

@@ -341,7 +497,6 @@ describe('doStream', () => {
341497
promptTokens: 10,
342498
completionTokens: 362,
343499
totalTokens: 372,
344-
cost: undefined,
345500
},
346501
},
347502
},

src/completion/index.ts

Lines changed: 63 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ import {
2727
postJsonToApi,
2828
} from '@ai-sdk/provider-utils';
2929
import { openrouterFailedResponseHandler } from '../schemas/error-response';
30+
import { OpenRouterProviderMetadataSchema } from '../schemas/provider-metadata';
3031
import {
3132
createFinishReason,
3233
mapOpenRouterFinishReason,
@@ -220,6 +221,46 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 {
220221
raw: (response.usage as JSONObject) ?? undefined,
221222
},
222223
warnings: [],
224+
providerMetadata: {
225+
openrouter: OpenRouterProviderMetadataSchema.parse({
226+
provider: response.provider ?? '',
227+
usage: {
228+
promptTokens: response.usage?.prompt_tokens ?? 0,
229+
completionTokens: response.usage?.completion_tokens ?? 0,
230+
totalTokens:
231+
(response.usage?.prompt_tokens ?? 0) +
232+
(response.usage?.completion_tokens ?? 0),
233+
...(response.usage?.cost != null
234+
? { cost: response.usage.cost }
235+
: {}),
236+
...(response.usage?.prompt_tokens_details?.cached_tokens != null
237+
? {
238+
promptTokensDetails: {
239+
cachedTokens:
240+
response.usage.prompt_tokens_details.cached_tokens,
241+
},
242+
}
243+
: {}),
244+
...(response.usage?.completion_tokens_details?.reasoning_tokens !=
245+
null
246+
? {
247+
completionTokensDetails: {
248+
reasoningTokens:
249+
response.usage.completion_tokens_details.reasoning_tokens,
250+
},
251+
}
252+
: {}),
253+
...(response.usage?.cost_details?.upstream_inference_cost != null
254+
? {
255+
costDetails: {
256+
upstreamInferenceCost:
257+
response.usage.cost_details.upstream_inference_cost,
258+
},
259+
}
260+
: {}),
261+
},
262+
}),
263+
},
223264
response: {
224265
headers: responseHeaders,
225266
},
@@ -278,9 +319,11 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 {
278319
};
279320

280321
const openrouterUsage: Partial<OpenRouterUsageAccounting> = {};
322+
let provider: string | undefined;
281323

282324
// Track raw usage from the API response for usage.raw
283325
let rawUsage: JSONObject | undefined;
326+
284327
return {
285328
stream: response.pipeThrough(
286329
new TransformStream<
@@ -309,6 +352,10 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 {
309352
return;
310353
}
311354

355+
if (value.provider) {
356+
provider = value.provider;
357+
}
358+
312359
if (value.usage != null) {
313360
usage.inputTokens.total = value.usage.prompt_tokens;
314361
usage.outputTokens.total = value.usage.completion_tokens;
@@ -340,7 +387,9 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 {
340387
};
341388
}
342389

343-
openrouterUsage.cost = value.usage.cost;
390+
if (value.usage.cost != null) {
391+
openrouterUsage.cost = value.usage.cost;
392+
}
344393
openrouterUsage.totalTokens = value.usage.total_tokens;
345394
const upstreamInferenceCost =
346395
value.usage.cost_details?.upstream_inference_cost;
@@ -370,14 +419,24 @@ export class OpenRouterCompletionLanguageModel implements LanguageModelV3 {
370419
// Set raw usage before emitting finish event
371420
usage.raw = rawUsage;
372421

422+
const openrouterMetadata: {
423+
usage: Partial<OpenRouterUsageAccounting>;
424+
provider?: string;
425+
} = {
426+
usage: openrouterUsage,
427+
};
428+
429+
// Only include provider if it's actually set
430+
if (provider !== undefined) {
431+
openrouterMetadata.provider = provider;
432+
}
433+
373434
controller.enqueue({
374435
type: 'finish',
375436
finishReason,
376437
usage,
377438
providerMetadata: {
378-
openrouter: {
379-
usage: openrouterUsage,
380-
},
439+
openrouter: openrouterMetadata,
381440
},
382441
});
383442
},

src/completion/schemas.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ export const OpenRouterCompletionChunkSchema = z.union([
99
.object({
1010
id: z.string().optional(),
1111
model: z.string().optional(),
12+
provider: z.string().optional(),
1213
choices: z.array(
1314
z
1415
.object({

0 commit comments

Comments
 (0)