Skip to content

Commit 30343de

Browse files
committed
Return token count even during execution error
1 parent be849c0 commit 30343de

File tree

3 files changed

+19
-12
lines changed

3 files changed

+19
-12
lines changed

src/__tests__/unit/agents.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -451,7 +451,7 @@ describe('GuardrailAgent', () => {
451451
expect(result.outputInfo.input).toBe('Latest user message with additional context.');
452452
});
453453

454-
it('should handle guardrail execution errors based on raiseGuardrailErrors setting', async () => {
454+
it('should handle guardrail execution errors based on raiseGuardrailErrors setting', async () => {
455455
process.env.OPENAI_API_KEY = 'test';
456456
const config = {
457457
version: 1,

src/__tests__/unit/llm-base.test.ts

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -186,11 +186,11 @@ describe('LLM Base', () => {
186186
expect(result.info.flagged).toBe(false);
187187
expect(result.info.confidence).toBe(0.0);
188188
expect(result.info.error_message).toBe('LLM response validation failed.');
189+
// Token usage is now preserved even when schema validation fails
189190
expect(result.info.token_usage).toEqual({
190-
prompt_tokens: null,
191-
completion_tokens: null,
192-
total_tokens: null,
193-
unavailable_reason: 'LLM call failed before usage could be recorded',
191+
prompt_tokens: 12,
192+
completion_tokens: 4,
193+
total_tokens: 16,
194194
});
195195
});
196196

@@ -235,11 +235,11 @@ describe('LLM Base', () => {
235235
expect(result.info.flagged).toBe(false);
236236
expect(result.info.confidence).toBe(0.0);
237237
expect(result.info.error_message).toBe('LLM returned non-JSON or malformed JSON.');
238+
// Token usage is now preserved even when JSON parsing fails
238239
expect(result.info.token_usage).toEqual({
239-
prompt_tokens: null,
240-
completion_tokens: null,
241-
total_tokens: null,
242-
unavailable_reason: 'LLM call failed before usage could be recorded',
240+
prompt_tokens: 8,
241+
completion_tokens: 3,
242+
total_tokens: 11,
243243
});
244244
});
245245
});

src/checks/llm-base.ts

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -291,6 +291,10 @@ export async function runLLM<TOutput extends ZodTypeAny>(
291291
unavailable_reason: 'LLM call failed before usage could be recorded',
292292
});
293293

294+
// Declare tokenUsage outside try block so it's accessible in catch
295+
// when JSON parsing or schema validation fails after a successful API call
296+
let tokenUsage: TokenUsage = noUsage;
297+
294298
try {
295299
// Handle temperature based on model capabilities
296300
let temperature = 0.0;
@@ -319,7 +323,8 @@ export async function runLLM<TOutput extends ZodTypeAny>(
319323
// @ts-ignore - safety_identifier is not in the OpenAI types yet
320324
const response = await client.chat.completions.create(params);
321325

322-
const tokenUsage = extractTokenUsage(response);
326+
// Extract token usage immediately after API call so it's available even if parsing fails
327+
tokenUsage = extractTokenUsage(response);
323328
const result = response.choices[0]?.message?.content;
324329
if (!result) {
325330
return [
@@ -356,6 +361,7 @@ export async function runLLM<TOutput extends ZodTypeAny>(
356361
}
357362

358363
// Fail-open on JSON parsing errors (malformed or non-JSON responses)
364+
// Use tokenUsage here since API call succeeded but response parsing failed
359365
if (error instanceof SyntaxError || (error as Error)?.constructor?.name === 'SyntaxError') {
360366
console.warn('LLM returned non-JSON or malformed JSON.', error);
361367
return [
@@ -366,11 +372,12 @@ export async function runLLM<TOutput extends ZodTypeAny>(
366372
error_message: 'LLM returned non-JSON or malformed JSON.',
367373
},
368374
}),
369-
noUsage,
375+
tokenUsage,
370376
];
371377
}
372378

373379
// Fail-open on schema validation errors (e.g., wrong types like confidence as string)
380+
// Use tokenUsage here since API call succeeded but schema validation failed
374381
if (error instanceof z.ZodError) {
375382
console.warn('LLM response validation failed.', error);
376383
return [
@@ -382,7 +389,7 @@ export async function runLLM<TOutput extends ZodTypeAny>(
382389
zod_issues: error.issues ?? [],
383390
},
384391
}),
385-
noUsage,
392+
tokenUsage,
386393
];
387394
}
388395

0 commit comments

Comments
 (0)