Skip to content

Commit 9772c40

Browse files
committed
add type checking and update method names
1 parent 47a0875 commit 9772c40

File tree

2 files changed

+82
-37
lines changed

2 files changed

+82
-37
lines changed

packages/ai-providers/server-ai-vercel/src/VercelProvider.ts

Lines changed: 48 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,9 @@ import type {
1111
} from '@launchdarkly/server-sdk-ai';
1212

1313
import type {
14+
ModelUsageTokens,
15+
StreamResponse,
16+
TextResponse,
1417
VercelAIModelParameters,
1518
VercelAISDKConfig,
1619
VercelAISDKMapOptions,
@@ -65,21 +68,18 @@ export class VercelProvider extends AIProvider {
6568
* Invoke the Vercel AI model with an array of messages.
6669
*/
6770
async invokeModel(messages: LDMessage[]): Promise<ChatResponse> {
68-
// Call Vercel AI generateText
6971
const result = await generateText({
7072
model: this._model,
7173
messages,
7274
...this._parameters,
7375
});
7476

75-
// Create the assistant message
7677
const assistantMessage: LDMessage = {
7778
role: 'assistant',
7879
content: result.text,
7980
};
8081

81-
// Extract metrics including token usage and success status
82-
const metrics = VercelProvider.createAIMetrics(result);
82+
const metrics = VercelProvider.getAIMetricsFromResponse(result);
8383

8484
return {
8585
message: assistantMessage,
@@ -115,17 +115,13 @@ export class VercelProvider extends AIProvider {
115115

116116
/**
117117
* Map Vercel AI SDK usage data to LaunchDarkly token usage.
118-
* Supports both v4 and v5 field names for backward compatibility.
119118
*
120-
* @param usageData Usage data from Vercel AI SDK (may be from usage or totalUsage)
121-
* @returns LDTokenUsage or undefined if no usage data provided
119+
* @param usageData Usage data from Vercel AI SDK
120+
* @returns LDTokenUsage
122121
*/
123-
static mapUsageDataToLDTokenUsage(usageData: any): LDTokenUsage | undefined {
124-
if (!usageData) {
125-
return undefined;
126-
}
127-
128-
const { totalTokens, inputTokens, promptTokens, outputTokens, completionTokens } = usageData;
122+
static mapUsageDataToLDTokenUsage(usageData: ModelUsageTokens): LDTokenUsage {
123+
// Support v4 field names (promptTokens, completionTokens) for backward compatibility
124+
const { totalTokens, inputTokens, outputTokens, promptTokens, completionTokens } = usageData;
129125
return {
130126
total: totalTokens ?? 0,
131127
input: inputTokens ?? promptTokens ?? 0,
@@ -134,23 +130,31 @@ export class VercelProvider extends AIProvider {
134130
}
135131

136132
/**
137-
* Create AI metrics information from a Vercel AI response.
133+
* Get AI metrics from a Vercel AI SDK text response
138134
* This method extracts token usage information and success status from Vercel AI responses
139135
* and returns a LaunchDarkly AIMetrics object.
140136
* Supports both v4 and v5 field names for backward compatibility.
137+
*
138+
* @param response The response from generateText() or similar non-streaming operations
139+
* @returns LDAIMetrics with success status and token usage
140+
*
141+
* @example
142+
* const response = await aiConfig.tracker.trackMetricsOf(
143+
* VercelProvider.getAIMetricsFromResponse,
144+
* () => generateText(vercelConfig)
145+
* );
141146
*/
142-
static createAIMetrics(vercelResponse: any): LDAIMetrics {
143-
const finishReason = vercelResponse?.finishReason ?? 'unknown';
144-
let usageData: any;
147+
static getAIMetricsFromResponse(response: TextResponse): LDAIMetrics {
148+
const finishReason = response?.finishReason ?? 'unknown';
145149

146150
// favor totalUsage over usage for cumulative usage across all steps
147-
if (vercelResponse?.totalUsage) {
148-
usageData = vercelResponse?.totalUsage;
149-
} else if (vercelResponse?.usage) {
150-
usageData = vercelResponse?.usage;
151+
let usage: LDTokenUsage | undefined;
152+
if (response?.totalUsage) {
153+
usage = VercelProvider.mapUsageDataToLDTokenUsage(response.totalUsage);
154+
} else if (response?.usage) {
155+
usage = VercelProvider.mapUsageDataToLDTokenUsage(response.usage);
151156
}
152157

153-
const usage = VercelProvider.mapUsageDataToLDTokenUsage(usageData);
154158
const success = finishReason !== 'error';
155159

156160
return {
@@ -160,8 +164,21 @@ export class VercelProvider extends AIProvider {
160164
}
161165

162166
/**
163-
* Create AI metrics from a Vercel AI SDK streaming result.
164-
* Use this with tracker.trackStreamMetricsOf() for streaming operations like streamText.
167+
* Create AI metrics information from a Vercel AI response.
168+
* This method extracts token usage information and success status from Vercel AI responses
169+
* and returns a LaunchDarkly AIMetrics object.
170+
* Supports both v4 and v5 field names for backward compatibility.
171+
*
172+
* @deprecated Use `getAIMetricsFromResponse()` instead.
173+
* @param vercelResponse The response from generateText() or similar non-streaming operations
174+
* @returns LDAIMetrics with success status and token usage
175+
*/
176+
static createAIMetrics(vercelResponse: TextResponse): LDAIMetrics {
177+
return VercelProvider.getAIMetricsFromResponse(vercelResponse);
178+
}
179+
180+
/**
181+
* Get AI metrics from a Vercel AI SDK streaming result.
165182
*
166183
* This method waits for the stream to complete, then extracts metrics using totalUsage
167184
* (preferred for cumulative usage across all steps) or usage if totalUsage is unavailable.
@@ -172,25 +189,22 @@ export class VercelProvider extends AIProvider {
172189
* @example
173190
* const stream = aiConfig.tracker.trackStreamMetricsOf(
174191
* () => streamText(vercelConfig),
175-
* VercelProvider.createStreamMetrics
192+
* VercelProvider.getAIMetricsFromStream
176193
* );
177-
*
178-
* for await (const chunk of stream.textStream) {
179-
* process.stdout.write(chunk);
180-
* }
181194
*/
182-
static async createStreamMetrics(stream: any): Promise<LDAIMetrics> {
195+
static async getAIMetricsFromStream(stream: StreamResponse): Promise<LDAIMetrics> {
183196
const finishReason = (await stream.finishReason?.catch(() => 'error')) ?? 'unknown';
184197

185198
// favor totalUsage over usage for cumulative usage across all steps
186-
let usageData: any;
199+
let usage: LDTokenUsage | undefined;
187200
if (stream.totalUsage) {
188-
usageData = await stream.totalUsage;
201+
const usageData = await stream.totalUsage;
202+
usage = VercelProvider.mapUsageDataToLDTokenUsage(usageData);
189203
} else if (stream.usage) {
190-
usageData = await stream.usage;
204+
const usageData = await stream.usage;
205+
usage = VercelProvider.mapUsageDataToLDTokenUsage(usageData);
191206
}
192207

193-
const usage = VercelProvider.mapUsageDataToLDTokenUsage(usageData);
194208
const success = finishReason !== 'error';
195209

196210
return {
@@ -223,15 +237,12 @@ export class VercelProvider extends AIProvider {
223237

224238
const params: VercelAIModelParameters = {};
225239

226-
// Map token limits
227240
if (parameters.max_tokens !== undefined) {
228241
params.maxTokens = parameters.max_tokens as number;
229242
}
230243
if (parameters.max_completion_tokens !== undefined) {
231244
params.maxOutputTokens = parameters.max_completion_tokens as number;
232245
}
233-
234-
// Map remaining parameters
235246
if (parameters.temperature !== undefined) {
236247
params.temperature = parameters.temperature as number;
237248
}

packages/ai-providers/server-ai-vercel/src/types.ts

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,3 +38,37 @@ export interface VercelAISDKConfig<TMod> extends VercelAIModelParameters {
3838
model: TMod;
3939
messages?: LDMessage[] | undefined;
4040
}
41+
42+
/**
43+
* Token usage information from Vercel AI SDK operations.
44+
* Matches the LanguageModelUsage type from the Vercel AI SDK.
45+
* Includes v4 field names (promptTokens, completionTokens) for backward compatibility.
46+
*/
47+
export interface ModelUsageTokens {
48+
inputTokens?: number;
49+
outputTokens?: number;
50+
totalTokens?: number;
51+
reasoningTokens?: number;
52+
cachedInputTokens?: number;
53+
// v4 backward compatibility field names
54+
promptTokens?: number;
55+
completionTokens?: number;
56+
}
57+
58+
/**
59+
* Response type for non-streaming Vercel AI SDK operations (e.g., generateText).
60+
*/
61+
export interface TextResponse {
62+
finishReason?: string;
63+
totalUsage?: ModelUsageTokens;
64+
usage?: ModelUsageTokens;
65+
}
66+
67+
/**
68+
* Response type for streaming Vercel AI SDK operations (e.g., streamText).
69+
*/
70+
export interface StreamResponse {
71+
finishReason?: Promise<string>;
72+
totalUsage?: Promise<ModelUsageTokens>;
73+
usage?: Promise<ModelUsageTokens>;
74+
}

0 commit comments

Comments
 (0)