diff --git a/packages/sdk/server-ai/src/LDAIConfigMapper.ts b/packages/sdk/server-ai/src/LDAIConfigMapper.ts index 4992dd21f0..f7ed111152 100644 --- a/packages/sdk/server-ai/src/LDAIConfigMapper.ts +++ b/packages/sdk/server-ai/src/LDAIConfigMapper.ts @@ -52,6 +52,7 @@ export class LDAIConfigMapper { model, messages, maxTokens: this._findParameter('max_tokens', 'maxTokens'), + maxOutputTokens: this._findParameter('max_tokens', 'maxTokens'), temperature: this._findParameter('temperature'), topP: this._findParameter('top_p', 'topP'), topK: this._findParameter('top_k', 'topK'), diff --git a/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts b/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts index ec8876b4cf..d820a85168 100644 --- a/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts +++ b/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts @@ -138,7 +138,9 @@ export class LDAIConfigTrackerImpl implements LDAIConfigTracker { TRes extends { usage?: { totalTokens?: number; + inputTokens?: number; promptTokens?: number; + outputTokens?: number; completionTokens?: number; }; }, @@ -161,7 +163,9 @@ export class LDAIConfigTrackerImpl implements LDAIConfigTracker { finishReason?: Promise; usage?: Promise<{ totalTokens?: number; + inputTokens?: number; promptTokens?: number; + outputTokens?: number; completionTokens?: number; }>; }, diff --git a/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts b/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts index dfed0fa4db..444060ef32 100644 --- a/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts +++ b/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts @@ -149,7 +149,9 @@ export interface LDAIConfigTracker { TRes extends { usage?: { totalTokens?: number; + inputTokens?: number; promptTokens?: number; + outputTokens?: number; completionTokens?: number; }; }, @@ -174,7 +176,9 @@ export interface LDAIConfigTracker { finishReason?: Promise; usage?: Promise<{ totalTokens?: number; + inputTokens?: number; promptTokens?: number; + outputTokens?: number; completionTokens?: number; }>; }, diff --git a/packages/sdk/server-ai/src/api/config/VercelAISDK.ts b/packages/sdk/server-ai/src/api/config/VercelAISDK.ts index 4387fba06d..8796eb5c95 100644 --- a/packages/sdk/server-ai/src/api/config/VercelAISDK.ts +++ b/packages/sdk/server-ai/src/api/config/VercelAISDK.ts @@ -10,6 +10,7 @@ export interface VercelAISDKConfig { model: TMod; messages?: LDMessage[] | undefined; maxTokens?: number | undefined; + maxOutputTokens?: number | undefined; temperature?: number | undefined; topP?: number | undefined; topK?: number | undefined; diff --git a/packages/sdk/server-ai/src/api/metrics/VercelAISDKTokenUsage.ts b/packages/sdk/server-ai/src/api/metrics/VercelAISDKTokenUsage.ts index dbe83a8bf4..20d7281b1c 100644 --- a/packages/sdk/server-ai/src/api/metrics/VercelAISDKTokenUsage.ts +++ b/packages/sdk/server-ai/src/api/metrics/VercelAISDKTokenUsage.ts @@ -2,12 +2,14 @@ import { LDTokenUsage } from './LDTokenUsage'; export function createVercelAISDKTokenUsage(data: { totalTokens?: number; + inputTokens?: number; promptTokens?: number; + outputTokens?: number; completionTokens?: number; }): LDTokenUsage { return { total: data.totalTokens ?? 0, - input: data.promptTokens ?? 0, - output: data.completionTokens ?? 0, + input: data.inputTokens ?? data.promptTokens ?? 0, + output: data.outputTokens ?? data.completionTokens ?? 0, }; }