Skip to content

Commit 0f8ae2b

Browse files
fix(extensions): handle NaN usage tokens
1 parent bbce5a7 commit 0f8ae2b

File tree

2 files changed

+91
-5
lines changed

2 files changed

+91
-5
lines changed

packages/agents-extensions/src/aiSdk.ts

Lines changed: 18 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -443,10 +443,19 @@ export class AiSdkModel implements Model {
443443
return {
444444
responseId: result.response?.id ?? 'FAKE_ID',
445445
usage: new Usage({
446-
inputTokens: result.usage.promptTokens,
447-
outputTokens: result.usage.completionTokens,
446+
inputTokens: Number.isNaN(result.usage?.promptTokens)
447+
? 0
448+
: (result.usage?.promptTokens ?? 0),
449+
outputTokens: Number.isNaN(result.usage?.completionTokens)
450+
? 0
451+
: (result.usage?.completionTokens ?? 0),
448452
totalTokens:
449-
result.usage.promptTokens + result.usage.completionTokens,
453+
(Number.isNaN(result.usage?.promptTokens)
454+
? 0
455+
: (result.usage?.promptTokens ?? 0)) +
456+
(Number.isNaN(result.usage?.completionTokens)
457+
? 0
458+
: (result.usage?.completionTokens ?? 0)),
450459
}),
451460
output,
452461
};
@@ -608,8 +617,12 @@ export class AiSdkModel implements Model {
608617
break;
609618
}
610619
case 'finish': {
611-
usagePromptTokens = part.usage.promptTokens;
612-
usageCompletionTokens = part.usage.completionTokens;
620+
usagePromptTokens = Number.isNaN(part.usage?.promptTokens)
621+
? 0
622+
: (part.usage?.promptTokens ?? 0);
623+
usageCompletionTokens = Number.isNaN(part.usage?.completionTokens)
624+
? 0
625+
: (part.usage?.completionTokens ?? 0);
613626
break;
614627
}
615628
case 'error': {

packages/agents-extensions/test/aiSdk.test.ts

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -468,6 +468,42 @@ describe('AiSdkModel.getResponse', () => {
468468
content: 'inst',
469469
});
470470
});
471+
472+
test('handles NaN usage in doGenerate', async () => {
473+
const model = new AiSdkModel(
474+
stubModel({
475+
async doGenerate() {
476+
return {
477+
text: '',
478+
finishReason: 'stop',
479+
usage: { promptTokens: Number.NaN, completionTokens: Number.NaN },
480+
providerMetadata: {},
481+
rawCall: { rawPrompt: '', rawSettings: {} },
482+
};
483+
},
484+
}),
485+
);
486+
487+
const res = await withTrace('t', () =>
488+
model.getResponse({
489+
input: 'hi',
490+
tools: [],
491+
handoffs: [],
492+
modelSettings: {},
493+
outputType: 'text',
494+
tracing: false,
495+
} as any),
496+
);
497+
498+
expect(res.usage).toEqual({
499+
requests: 1,
500+
inputTokens: 0,
501+
outputTokens: 0,
502+
totalTokens: 0,
503+
inputTokensDetails: [],
504+
outputTokensDetails: [],
505+
});
506+
});
471507
});
472508

473509
describe('AiSdkModel.getStreamedResponse', () => {
@@ -639,6 +675,43 @@ describe('AiSdkModel.getStreamedResponse', () => {
639675
content: 'inst',
640676
});
641677
});
678+
679+
test('handles NaN usage in stream finish event', async () => {
680+
const parts = [
681+
{ type: 'text-delta', textDelta: 'a' },
682+
{
683+
type: 'finish',
684+
finishReason: 'stop',
685+
usage: { promptTokens: Number.NaN, completionTokens: Number.NaN },
686+
},
687+
];
688+
const model = new AiSdkModel(
689+
stubModel({
690+
async doStream() {
691+
return {
692+
stream: partsStream(parts),
693+
rawCall: { rawPrompt: '', rawSettings: {} },
694+
} as any;
695+
},
696+
}),
697+
);
698+
699+
let final: any;
700+
for await (const ev of model.getStreamedResponse({
701+
input: 'hi',
702+
tools: [],
703+
handoffs: [],
704+
modelSettings: {},
705+
outputType: 'text',
706+
tracing: false,
707+
} as any)) {
708+
if (ev.type === 'response_done') {
709+
final = ev.response.usage;
710+
}
711+
}
712+
713+
expect(final).toEqual({ inputTokens: 0, outputTokens: 0, totalTokens: 0 });
714+
});
642715
});
643716

644717
describe('AiSdkModel', () => {

0 commit comments

Comments
 (0)