From 9393389fe8cf34a8147ac7b0fbdf97dcc64e0360 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Fri, 31 Oct 2025 02:08:04 +0000 Subject: [PATCH 1/4] feat: Add toVercelAISDK method to support easy model creation fix!: VercelProvider now requires type safe parameters for Vercel models fix: Properly convert LD model parameters to Vercel model parameters --- .../server-ai-vercel/src/VercelProvider.ts | 170 ++++++++++++++++-- .../server-ai-vercel/src/index.ts | 6 + .../server-ai-vercel/src/types.ts | 41 +++++ 3 files changed, 207 insertions(+), 10 deletions(-) create mode 100644 packages/ai-providers/server-ai-vercel/src/types.ts diff --git a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts index fc13ae701a..d7611d3ec8 100644 --- a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts +++ b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts @@ -10,30 +10,50 @@ import type { LDTokenUsage, } from '@launchdarkly/server-sdk-ai'; +import type { + VercelAIModelParameters, + VercelAISDKConfig, + VercelAISDKMapOptions, + VercelAISDKProvider, +} from './types'; + /** * Vercel AI implementation of AIProvider. * This provider integrates Vercel AI SDK with LaunchDarkly's tracking capabilities. */ export class VercelProvider extends AIProvider { private _model: LanguageModel; - private _parameters: Record; + private _parameters: VercelAIModelParameters; - constructor(model: LanguageModel, parameters: Record, logger?: LDLogger) { + /** + * Constructor for the VercelProvider. + * @param model - The Vercel AI model to use. + * @param parameters - The Vercel AI model parameters. + * @param logger - The logger to use for the Vercel AI provider. + */ + constructor(model: LanguageModel, parameters: VercelAIModelParameters, logger?: LDLogger) { super(logger); this._model = model; this._parameters = parameters; } // ============================================================================= - // MAIN FACTORY METHOD + // MAIN FACTORY METHODS // ============================================================================= /** * Static factory method to create a Vercel AIProvider from an AI configuration. + * This method auto-detects the provider and creates the model. + * Note: Messages from the AI config are not included in the provider - messages + * should be passed at invocation time via invokeModel(). + * + * @param aiConfig The LaunchDarkly AI configuration + * @param logger Optional logger + * @returns A Promise that resolves to a configured VercelProvider */ static async create(aiConfig: LDAIConfig, logger?: LDLogger): Promise { const model = await VercelProvider.createVercelModel(aiConfig); - const parameters = aiConfig.model?.parameters || {}; + const parameters = VercelProvider.mapParameters(aiConfig.model?.parameters); return new VercelProvider(model, parameters, logger); } @@ -46,8 +66,6 @@ export class VercelProvider extends AIProvider { */ async invokeModel(messages: LDMessage[]): Promise { // Call Vercel AI generateText - // Type assertion: our MinLanguageModel is compatible with the expected LanguageModel interface - // The generateText function will work with any object that has the required properties const result = await generateText({ model: this._model, messages, @@ -121,9 +139,144 @@ export class VercelProvider extends AIProvider { }; } + /** + * Create a metrics extractor for Vercel AI SDK streaming results. + * Use this with tracker.trackStreamMetricsOf() for streaming operations like streamText. + * + * The extractor waits for the stream's response promise to resolve, then extracts + * metrics from the completed response. + * + * @returns A metrics extractor function for streaming results + * + * @example + * const stream = aiConfig.tracker.trackStreamMetricsOf( + * () => streamText(vercelConfig), + * VercelProvider.createStreamMetricsExtractor() + * ); + * + * for await (const chunk of stream.textStream) { + * process.stdout.write(chunk); + * } + */ + static createStreamMetricsExtractor() { + return async (stream: any): Promise => { + // Wait for stream to complete + const result = await stream.response; + // Extract metrics from completed response + return VercelProvider.createAIMetrics(result); + }; + } + + /** + * Map LaunchDarkly model parameters to Vercel AI SDK parameters. + * + * Parameter mappings: + * - max_tokens → maxTokens + * - max_completion_tokens → maxOutputTokens + * - temperature → temperature + * - top_p → topP + * - top_k → topK + * - presence_penalty → presencePenalty + * - frequency_penalty → frequencyPenalty + * - stop → stopSequences + * - seed → seed + * + * @param parameters The LaunchDarkly model parameters to map + * @returns An object containing mapped Vercel AI SDK parameters + */ + static mapParameters(parameters?: { [index: string]: unknown }): VercelAIModelParameters { + if (!parameters) { + return {}; + } + + const params: VercelAIModelParameters = {}; + + // Map token limits + if (parameters.max_tokens !== undefined) { + params.maxTokens = parameters.max_tokens as number; + } + if (parameters.max_completion_tokens !== undefined) { + params.maxOutputTokens = parameters.max_completion_tokens as number; + } + + // Map remaining parameters + if (parameters.temperature !== undefined) { + params.temperature = parameters.temperature as number; + } + if (parameters.top_p !== undefined) { + params.topP = parameters.top_p as number; + } + if (parameters.top_k !== undefined) { + params.topK = parameters.top_k as number; + } + if (parameters.presence_penalty !== undefined) { + params.presencePenalty = parameters.presence_penalty as number; + } + if (parameters.frequency_penalty !== undefined) { + params.frequencyPenalty = parameters.frequency_penalty as number; + } + if (parameters.stop !== undefined) { + params.stopSequences = parameters.stop as string[]; + } + if (parameters.seed !== undefined) { + params.seed = parameters.seed as number; + } + + return params; + } + + /** + * Convert an AI configuration to Vercel AI SDK parameters. + * This static method allows converting an LDAIConfig to VercelAISDKConfig without + * requiring an instance of VercelProvider. + * + * @param aiConfig The LaunchDarkly AI configuration + * @param provider A Vercel AI SDK Provider or a map of provider names to Vercel AI SDK Providers + * @param options Optional mapping options + * @returns A configuration directly usable in Vercel AI SDK generateText() and streamText() + * @throws {Error} if a Vercel AI SDK model cannot be determined from the given provider parameter + */ + static toVercelAISDK( + aiConfig: LDAIConfig, + provider: VercelAISDKProvider | Record>, + options?: VercelAISDKMapOptions | undefined, + ): VercelAISDKConfig { + // Determine the model from the provider + let model: TMod | undefined; + if (typeof provider === 'function') { + model = provider(aiConfig.model?.name ?? ''); + } else { + model = provider[aiConfig.provider?.name ?? '']?.(aiConfig.model?.name ?? ''); + } + if (!model) { + throw new Error( + 'Vercel AI SDK model cannot be determined from the supplied provider parameter.', + ); + } + + // Merge messages from config and options + let messages: LDMessage[] | undefined; + const configMessages = ('messages' in aiConfig ? aiConfig.messages : undefined) as + | LDMessage[] + | undefined; + if (configMessages || options?.nonInterpolatedMessages) { + messages = [...(configMessages ?? []), ...(options?.nonInterpolatedMessages ?? [])]; + } + + // Map parameters using the shared mapping method + const params = VercelProvider.mapParameters(aiConfig.model?.parameters); + + // Build and return the Vercel AI SDK configuration + return { + model, + messages, + ...params, + }; + } + /** * Create a Vercel AI model from an AI configuration. - * This method creates a Vercel AI model based on the provider configuration. + * This method auto-detects the provider and creates the model instance. * * @param aiConfig The LaunchDarkly AI configuration * @returns A Promise that resolves to a configured Vercel AI model @@ -131,9 +284,6 @@ export class VercelProvider extends AIProvider { static async createVercelModel(aiConfig: LDAIConfig): Promise { const providerName = VercelProvider.mapProvider(aiConfig.provider?.name || ''); const modelName = aiConfig.model?.name || ''; - // Parameters are not used in model creation but kept for future use - // eslint-disable-next-line @typescript-eslint/no-unused-vars - const parameters = aiConfig.model?.parameters || {}; // Map provider names to their corresponding Vercel AI SDK imports switch (providerName) { diff --git a/packages/ai-providers/server-ai-vercel/src/index.ts b/packages/ai-providers/server-ai-vercel/src/index.ts index 3dde0dc683..6e7eb55023 100644 --- a/packages/ai-providers/server-ai-vercel/src/index.ts +++ b/packages/ai-providers/server-ai-vercel/src/index.ts @@ -1 +1,7 @@ export { VercelProvider } from './VercelProvider'; +export type { + VercelAIModelParameters, + VercelAISDKConfig, + VercelAISDKMapOptions, + VercelAISDKProvider, +} from './types'; diff --git a/packages/ai-providers/server-ai-vercel/src/types.ts b/packages/ai-providers/server-ai-vercel/src/types.ts new file mode 100644 index 0000000000..da4bf17186 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/src/types.ts @@ -0,0 +1,41 @@ +import type { LDMessage } from '@launchdarkly/server-sdk-ai'; + +/** + * Vercel AI SDK Provider type - a function that takes a model name and returns a model instance. + */ +export type VercelAISDKProvider = (modelName: string) => TMod; + +/** + * Options for mapping to Vercel AI SDK configuration. + */ +export interface VercelAISDKMapOptions { + /** + * Additional messages that should not be interpolated. + */ + nonInterpolatedMessages?: LDMessage[] | undefined; +} + +/** + * Vercel AI SDK model parameters. + * These are the parameters that can be passed to Vercel AI SDK methods like generateText() and streamText(). + */ +export interface VercelAIModelParameters { + maxTokens?: number | undefined; + maxOutputTokens?: number | undefined; + temperature?: number | undefined; + topP?: number | undefined; + topK?: number | undefined; + presencePenalty?: number | undefined; + frequencyPenalty?: number | undefined; + stopSequences?: string[] | undefined; + seed?: number | undefined; +} + +/** + * Configuration format compatible with Vercel AI SDK's generateText() and streamText() methods. + */ +export interface VercelAISDKConfig extends VercelAIModelParameters { + model: TMod; + messages?: LDMessage[] | undefined; +} + From 15e377415a9a4f78aac68997a3c2901f1a8dd433 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Fri, 31 Oct 2025 17:18:11 +0000 Subject: [PATCH 2/4] fix lint errors in the vercel ai provider --- packages/ai-providers/server-ai-vercel/src/VercelProvider.ts | 4 ++-- packages/ai-providers/server-ai-vercel/src/types.ts | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts index d7611d3ec8..97f1336111 100644 --- a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts +++ b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts @@ -46,7 +46,7 @@ export class VercelProvider extends AIProvider { * This method auto-detects the provider and creates the model. * Note: Messages from the AI config are not included in the provider - messages * should be passed at invocation time via invokeModel(). - * + * * @param aiConfig The LaunchDarkly AI configuration * @param logger Optional logger * @returns A Promise that resolves to a configured VercelProvider @@ -169,7 +169,7 @@ export class VercelProvider extends AIProvider { /** * Map LaunchDarkly model parameters to Vercel AI SDK parameters. - * + * * Parameter mappings: * - max_tokens → maxTokens * - max_completion_tokens → maxOutputTokens diff --git a/packages/ai-providers/server-ai-vercel/src/types.ts b/packages/ai-providers/server-ai-vercel/src/types.ts index da4bf17186..918a746d71 100644 --- a/packages/ai-providers/server-ai-vercel/src/types.ts +++ b/packages/ai-providers/server-ai-vercel/src/types.ts @@ -38,4 +38,3 @@ export interface VercelAISDKConfig extends VercelAIModelParameters { model: TMod; messages?: LDMessage[] | undefined; } - From 47a087501f899607e9f0c7f6cfd1e363f45295e4 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 4 Nov 2025 14:31:14 +0000 Subject: [PATCH 3/4] favor totalusage over usage metrics --- .../server-ai-vercel/src/VercelProvider.ts | 78 +++++++++++++------ 1 file changed, 55 insertions(+), 23 deletions(-) diff --git a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts index 97f1336111..cf9b5d697c 100644 --- a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts +++ b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts @@ -113,6 +113,26 @@ export class VercelProvider extends AIProvider { return mapping[lowercasedName] || lowercasedName; } + /** + * Map Vercel AI SDK usage data to LaunchDarkly token usage. + * Supports both v4 and v5 field names for backward compatibility. + * + * @param usageData Usage data from Vercel AI SDK (may be from usage or totalUsage) + * @returns LDTokenUsage or undefined if no usage data provided + */ + static mapUsageDataToLDTokenUsage(usageData: any): LDTokenUsage | undefined { + if (!usageData) { + return undefined; + } + + const { totalTokens, inputTokens, promptTokens, outputTokens, completionTokens } = usageData; + return { + total: totalTokens ?? 0, + input: inputTokens ?? promptTokens ?? 0, + output: outputTokens ?? completionTokens ?? 0, + }; + } + /** * Create AI metrics information from a Vercel AI response. * This method extracts token usage information and success status from Vercel AI responses @@ -120,50 +140,62 @@ export class VercelProvider extends AIProvider { * Supports both v4 and v5 field names for backward compatibility. */ static createAIMetrics(vercelResponse: any): LDAIMetrics { - // Extract token usage if available - let usage: LDTokenUsage | undefined; - if (vercelResponse?.usage) { - const { totalTokens, inputTokens, promptTokens, outputTokens, completionTokens } = - vercelResponse.usage; - usage = { - total: totalTokens ?? 0, - input: inputTokens ?? promptTokens ?? 0, - output: outputTokens ?? completionTokens ?? 0, - }; + const finishReason = vercelResponse?.finishReason ?? 'unknown'; + let usageData: any; + + // favor totalUsage over usage for cumulative usage across all steps + if (vercelResponse?.totalUsage) { + usageData = vercelResponse?.totalUsage; + } else if (vercelResponse?.usage) { + usageData = vercelResponse?.usage; } - // Vercel AI responses that complete successfully are considered successful + const usage = VercelProvider.mapUsageDataToLDTokenUsage(usageData); + const success = finishReason !== 'error'; + return { - success: true, + success, usage, }; } /** - * Create a metrics extractor for Vercel AI SDK streaming results. + * Create AI metrics from a Vercel AI SDK streaming result. * Use this with tracker.trackStreamMetricsOf() for streaming operations like streamText. * - * The extractor waits for the stream's response promise to resolve, then extracts - * metrics from the completed response. + * This method waits for the stream to complete, then extracts metrics using totalUsage + * (preferred for cumulative usage across all steps) or usage if totalUsage is unavailable. * - * @returns A metrics extractor function for streaming results + * @param stream The stream result from streamText() + * @returns A Promise that resolves to LDAIMetrics * * @example * const stream = aiConfig.tracker.trackStreamMetricsOf( * () => streamText(vercelConfig), - * VercelProvider.createStreamMetricsExtractor() + * VercelProvider.createStreamMetrics * ); * * for await (const chunk of stream.textStream) { * process.stdout.write(chunk); * } */ - static createStreamMetricsExtractor() { - return async (stream: any): Promise => { - // Wait for stream to complete - const result = await stream.response; - // Extract metrics from completed response - return VercelProvider.createAIMetrics(result); + static async createStreamMetrics(stream: any): Promise { + const finishReason = (await stream.finishReason?.catch(() => 'error')) ?? 'unknown'; + + // favor totalUsage over usage for cumulative usage across all steps + let usageData: any; + if (stream.totalUsage) { + usageData = await stream.totalUsage; + } else if (stream.usage) { + usageData = await stream.usage; + } + + const usage = VercelProvider.mapUsageDataToLDTokenUsage(usageData); + const success = finishReason !== 'error'; + + return { + success, + usage, }; } From 9772c40366594f6f913b0d390a371f793ecde701 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 4 Nov 2025 18:48:14 +0000 Subject: [PATCH 4/4] add type checking and update method names --- .../server-ai-vercel/src/VercelProvider.ts | 85 +++++++++++-------- .../server-ai-vercel/src/types.ts | 34 ++++++++ 2 files changed, 82 insertions(+), 37 deletions(-) diff --git a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts index cf9b5d697c..185d93c13c 100644 --- a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts +++ b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts @@ -11,6 +11,9 @@ import type { } from '@launchdarkly/server-sdk-ai'; import type { + ModelUsageTokens, + StreamResponse, + TextResponse, VercelAIModelParameters, VercelAISDKConfig, VercelAISDKMapOptions, @@ -65,21 +68,18 @@ export class VercelProvider extends AIProvider { * Invoke the Vercel AI model with an array of messages. */ async invokeModel(messages: LDMessage[]): Promise { - // Call Vercel AI generateText const result = await generateText({ model: this._model, messages, ...this._parameters, }); - // Create the assistant message const assistantMessage: LDMessage = { role: 'assistant', content: result.text, }; - // Extract metrics including token usage and success status - const metrics = VercelProvider.createAIMetrics(result); + const metrics = VercelProvider.getAIMetricsFromResponse(result); return { message: assistantMessage, @@ -115,17 +115,13 @@ export class VercelProvider extends AIProvider { /** * Map Vercel AI SDK usage data to LaunchDarkly token usage. - * Supports both v4 and v5 field names for backward compatibility. * - * @param usageData Usage data from Vercel AI SDK (may be from usage or totalUsage) - * @returns LDTokenUsage or undefined if no usage data provided + * @param usageData Usage data from Vercel AI SDK + * @returns LDTokenUsage */ - static mapUsageDataToLDTokenUsage(usageData: any): LDTokenUsage | undefined { - if (!usageData) { - return undefined; - } - - const { totalTokens, inputTokens, promptTokens, outputTokens, completionTokens } = usageData; + static mapUsageDataToLDTokenUsage(usageData: ModelUsageTokens): LDTokenUsage { + // Support v4 field names (promptTokens, completionTokens) for backward compatibility + const { totalTokens, inputTokens, outputTokens, promptTokens, completionTokens } = usageData; return { total: totalTokens ?? 0, input: inputTokens ?? promptTokens ?? 0, @@ -134,23 +130,31 @@ export class VercelProvider extends AIProvider { } /** - * Create AI metrics information from a Vercel AI response. + * Get AI metrics from a Vercel AI SDK text response * This method extracts token usage information and success status from Vercel AI responses * and returns a LaunchDarkly AIMetrics object. * Supports both v4 and v5 field names for backward compatibility. + * + * @param response The response from generateText() or similar non-streaming operations + * @returns LDAIMetrics with success status and token usage + * + * @example + * const response = await aiConfig.tracker.trackMetricsOf( + * VercelProvider.getAIMetricsFromResponse, + * () => generateText(vercelConfig) + * ); */ - static createAIMetrics(vercelResponse: any): LDAIMetrics { - const finishReason = vercelResponse?.finishReason ?? 'unknown'; - let usageData: any; + static getAIMetricsFromResponse(response: TextResponse): LDAIMetrics { + const finishReason = response?.finishReason ?? 'unknown'; // favor totalUsage over usage for cumulative usage across all steps - if (vercelResponse?.totalUsage) { - usageData = vercelResponse?.totalUsage; - } else if (vercelResponse?.usage) { - usageData = vercelResponse?.usage; + let usage: LDTokenUsage | undefined; + if (response?.totalUsage) { + usage = VercelProvider.mapUsageDataToLDTokenUsage(response.totalUsage); + } else if (response?.usage) { + usage = VercelProvider.mapUsageDataToLDTokenUsage(response.usage); } - const usage = VercelProvider.mapUsageDataToLDTokenUsage(usageData); const success = finishReason !== 'error'; return { @@ -160,8 +164,21 @@ export class VercelProvider extends AIProvider { } /** - * Create AI metrics from a Vercel AI SDK streaming result. - * Use this with tracker.trackStreamMetricsOf() for streaming operations like streamText. + * Create AI metrics information from a Vercel AI response. + * This method extracts token usage information and success status from Vercel AI responses + * and returns a LaunchDarkly AIMetrics object. + * Supports both v4 and v5 field names for backward compatibility. + * + * @deprecated Use `getAIMetricsFromResponse()` instead. + * @param vercelResponse The response from generateText() or similar non-streaming operations + * @returns LDAIMetrics with success status and token usage + */ + static createAIMetrics(vercelResponse: TextResponse): LDAIMetrics { + return VercelProvider.getAIMetricsFromResponse(vercelResponse); + } + + /** + * Get AI metrics from a Vercel AI SDK streaming result. * * This method waits for the stream to complete, then extracts metrics using totalUsage * (preferred for cumulative usage across all steps) or usage if totalUsage is unavailable. @@ -172,25 +189,22 @@ export class VercelProvider extends AIProvider { * @example * const stream = aiConfig.tracker.trackStreamMetricsOf( * () => streamText(vercelConfig), - * VercelProvider.createStreamMetrics + * VercelProvider.getAIMetricsFromStream * ); - * - * for await (const chunk of stream.textStream) { - * process.stdout.write(chunk); - * } */ - static async createStreamMetrics(stream: any): Promise { + static async getAIMetricsFromStream(stream: StreamResponse): Promise { const finishReason = (await stream.finishReason?.catch(() => 'error')) ?? 'unknown'; // favor totalUsage over usage for cumulative usage across all steps - let usageData: any; + let usage: LDTokenUsage | undefined; if (stream.totalUsage) { - usageData = await stream.totalUsage; + const usageData = await stream.totalUsage; + usage = VercelProvider.mapUsageDataToLDTokenUsage(usageData); } else if (stream.usage) { - usageData = await stream.usage; + const usageData = await stream.usage; + usage = VercelProvider.mapUsageDataToLDTokenUsage(usageData); } - const usage = VercelProvider.mapUsageDataToLDTokenUsage(usageData); const success = finishReason !== 'error'; return { @@ -223,15 +237,12 @@ export class VercelProvider extends AIProvider { const params: VercelAIModelParameters = {}; - // Map token limits if (parameters.max_tokens !== undefined) { params.maxTokens = parameters.max_tokens as number; } if (parameters.max_completion_tokens !== undefined) { params.maxOutputTokens = parameters.max_completion_tokens as number; } - - // Map remaining parameters if (parameters.temperature !== undefined) { params.temperature = parameters.temperature as number; } diff --git a/packages/ai-providers/server-ai-vercel/src/types.ts b/packages/ai-providers/server-ai-vercel/src/types.ts index 918a746d71..098c7f6251 100644 --- a/packages/ai-providers/server-ai-vercel/src/types.ts +++ b/packages/ai-providers/server-ai-vercel/src/types.ts @@ -38,3 +38,37 @@ export interface VercelAISDKConfig extends VercelAIModelParameters { model: TMod; messages?: LDMessage[] | undefined; } + +/** + * Token usage information from Vercel AI SDK operations. + * Matches the LanguageModelUsage type from the Vercel AI SDK. + * Includes v4 field names (promptTokens, completionTokens) for backward compatibility. + */ +export interface ModelUsageTokens { + inputTokens?: number; + outputTokens?: number; + totalTokens?: number; + reasoningTokens?: number; + cachedInputTokens?: number; + // v4 backward compatibility field names + promptTokens?: number; + completionTokens?: number; +} + +/** + * Response type for non-streaming Vercel AI SDK operations (e.g., generateText). + */ +export interface TextResponse { + finishReason?: string; + totalUsage?: ModelUsageTokens; + usage?: ModelUsageTokens; +} + +/** + * Response type for streaming Vercel AI SDK operations (e.g., streamText). + */ +export interface StreamResponse { + finishReason?: Promise; + totalUsage?: Promise; + usage?: Promise; +}