diff --git a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts index fc13ae701..185d93c13 100644 --- a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts +++ b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts @@ -10,30 +10,53 @@ import type { LDTokenUsage, } from '@launchdarkly/server-sdk-ai'; +import type { + ModelUsageTokens, + StreamResponse, + TextResponse, + VercelAIModelParameters, + VercelAISDKConfig, + VercelAISDKMapOptions, + VercelAISDKProvider, +} from './types'; + /** * Vercel AI implementation of AIProvider. * This provider integrates Vercel AI SDK with LaunchDarkly's tracking capabilities. */ export class VercelProvider extends AIProvider { private _model: LanguageModel; - private _parameters: Record; + private _parameters: VercelAIModelParameters; - constructor(model: LanguageModel, parameters: Record, logger?: LDLogger) { + /** + * Constructor for the VercelProvider. + * @param model - The Vercel AI model to use. + * @param parameters - The Vercel AI model parameters. + * @param logger - The logger to use for the Vercel AI provider. + */ + constructor(model: LanguageModel, parameters: VercelAIModelParameters, logger?: LDLogger) { super(logger); this._model = model; this._parameters = parameters; } // ============================================================================= - // MAIN FACTORY METHOD + // MAIN FACTORY METHODS // ============================================================================= /** * Static factory method to create a Vercel AIProvider from an AI configuration. + * This method auto-detects the provider and creates the model. + * Note: Messages from the AI config are not included in the provider - messages + * should be passed at invocation time via invokeModel(). + * + * @param aiConfig The LaunchDarkly AI configuration + * @param logger Optional logger + * @returns A Promise that resolves to a configured VercelProvider */ static async create(aiConfig: LDAIConfig, logger?: LDLogger): Promise { const model = await VercelProvider.createVercelModel(aiConfig); - const parameters = aiConfig.model?.parameters || {}; + const parameters = VercelProvider.mapParameters(aiConfig.model?.parameters); return new VercelProvider(model, parameters, logger); } @@ -45,23 +68,18 @@ export class VercelProvider extends AIProvider { * Invoke the Vercel AI model with an array of messages. */ async invokeModel(messages: LDMessage[]): Promise { - // Call Vercel AI generateText - // Type assertion: our MinLanguageModel is compatible with the expected LanguageModel interface - // The generateText function will work with any object that has the required properties const result = await generateText({ model: this._model, messages, ...this._parameters, }); - // Create the assistant message const assistantMessage: LDMessage = { role: 'assistant', content: result.text, }; - // Extract metrics including token usage and success status - const metrics = VercelProvider.createAIMetrics(result); + const metrics = VercelProvider.getAIMetricsFromResponse(result); return { message: assistantMessage, @@ -95,35 +113,213 @@ export class VercelProvider extends AIProvider { return mapping[lowercasedName] || lowercasedName; } + /** + * Map Vercel AI SDK usage data to LaunchDarkly token usage. + * + * @param usageData Usage data from Vercel AI SDK + * @returns LDTokenUsage + */ + static mapUsageDataToLDTokenUsage(usageData: ModelUsageTokens): LDTokenUsage { + // Support v4 field names (promptTokens, completionTokens) for backward compatibility + const { totalTokens, inputTokens, outputTokens, promptTokens, completionTokens } = usageData; + return { + total: totalTokens ?? 0, + input: inputTokens ?? promptTokens ?? 0, + output: outputTokens ?? completionTokens ?? 0, + }; + } + + /** + * Get AI metrics from a Vercel AI SDK text response + * This method extracts token usage information and success status from Vercel AI responses + * and returns a LaunchDarkly AIMetrics object. + * Supports both v4 and v5 field names for backward compatibility. + * + * @param response The response from generateText() or similar non-streaming operations + * @returns LDAIMetrics with success status and token usage + * + * @example + * const response = await aiConfig.tracker.trackMetricsOf( + * VercelProvider.getAIMetricsFromResponse, + * () => generateText(vercelConfig) + * ); + */ + static getAIMetricsFromResponse(response: TextResponse): LDAIMetrics { + const finishReason = response?.finishReason ?? 'unknown'; + + // favor totalUsage over usage for cumulative usage across all steps + let usage: LDTokenUsage | undefined; + if (response?.totalUsage) { + usage = VercelProvider.mapUsageDataToLDTokenUsage(response.totalUsage); + } else if (response?.usage) { + usage = VercelProvider.mapUsageDataToLDTokenUsage(response.usage); + } + + const success = finishReason !== 'error'; + + return { + success, + usage, + }; + } + /** * Create AI metrics information from a Vercel AI response. * This method extracts token usage information and success status from Vercel AI responses * and returns a LaunchDarkly AIMetrics object. * Supports both v4 and v5 field names for backward compatibility. + * + * @deprecated Use `getAIMetricsFromResponse()` instead. + * @param vercelResponse The response from generateText() or similar non-streaming operations + * @returns LDAIMetrics with success status and token usage + */ + static createAIMetrics(vercelResponse: TextResponse): LDAIMetrics { + return VercelProvider.getAIMetricsFromResponse(vercelResponse); + } + + /** + * Get AI metrics from a Vercel AI SDK streaming result. + * + * This method waits for the stream to complete, then extracts metrics using totalUsage + * (preferred for cumulative usage across all steps) or usage if totalUsage is unavailable. + * + * @param stream The stream result from streamText() + * @returns A Promise that resolves to LDAIMetrics + * + * @example + * const stream = aiConfig.tracker.trackStreamMetricsOf( + * () => streamText(vercelConfig), + * VercelProvider.getAIMetricsFromStream + * ); */ - static createAIMetrics(vercelResponse: any): LDAIMetrics { - // Extract token usage if available + static async getAIMetricsFromStream(stream: StreamResponse): Promise { + const finishReason = (await stream.finishReason?.catch(() => 'error')) ?? 'unknown'; + + // favor totalUsage over usage for cumulative usage across all steps let usage: LDTokenUsage | undefined; - if (vercelResponse?.usage) { - const { totalTokens, inputTokens, promptTokens, outputTokens, completionTokens } = - vercelResponse.usage; - usage = { - total: totalTokens ?? 0, - input: inputTokens ?? promptTokens ?? 0, - output: outputTokens ?? completionTokens ?? 0, - }; + if (stream.totalUsage) { + const usageData = await stream.totalUsage; + usage = VercelProvider.mapUsageDataToLDTokenUsage(usageData); + } else if (stream.usage) { + const usageData = await stream.usage; + usage = VercelProvider.mapUsageDataToLDTokenUsage(usageData); } - // Vercel AI responses that complete successfully are considered successful + const success = finishReason !== 'error'; + return { - success: true, + success, usage, }; } + /** + * Map LaunchDarkly model parameters to Vercel AI SDK parameters. + * + * Parameter mappings: + * - max_tokens → maxTokens + * - max_completion_tokens → maxOutputTokens + * - temperature → temperature + * - top_p → topP + * - top_k → topK + * - presence_penalty → presencePenalty + * - frequency_penalty → frequencyPenalty + * - stop → stopSequences + * - seed → seed + * + * @param parameters The LaunchDarkly model parameters to map + * @returns An object containing mapped Vercel AI SDK parameters + */ + static mapParameters(parameters?: { [index: string]: unknown }): VercelAIModelParameters { + if (!parameters) { + return {}; + } + + const params: VercelAIModelParameters = {}; + + if (parameters.max_tokens !== undefined) { + params.maxTokens = parameters.max_tokens as number; + } + if (parameters.max_completion_tokens !== undefined) { + params.maxOutputTokens = parameters.max_completion_tokens as number; + } + if (parameters.temperature !== undefined) { + params.temperature = parameters.temperature as number; + } + if (parameters.top_p !== undefined) { + params.topP = parameters.top_p as number; + } + if (parameters.top_k !== undefined) { + params.topK = parameters.top_k as number; + } + if (parameters.presence_penalty !== undefined) { + params.presencePenalty = parameters.presence_penalty as number; + } + if (parameters.frequency_penalty !== undefined) { + params.frequencyPenalty = parameters.frequency_penalty as number; + } + if (parameters.stop !== undefined) { + params.stopSequences = parameters.stop as string[]; + } + if (parameters.seed !== undefined) { + params.seed = parameters.seed as number; + } + + return params; + } + + /** + * Convert an AI configuration to Vercel AI SDK parameters. + * This static method allows converting an LDAIConfig to VercelAISDKConfig without + * requiring an instance of VercelProvider. + * + * @param aiConfig The LaunchDarkly AI configuration + * @param provider A Vercel AI SDK Provider or a map of provider names to Vercel AI SDK Providers + * @param options Optional mapping options + * @returns A configuration directly usable in Vercel AI SDK generateText() and streamText() + * @throws {Error} if a Vercel AI SDK model cannot be determined from the given provider parameter + */ + static toVercelAISDK( + aiConfig: LDAIConfig, + provider: VercelAISDKProvider | Record>, + options?: VercelAISDKMapOptions | undefined, + ): VercelAISDKConfig { + // Determine the model from the provider + let model: TMod | undefined; + if (typeof provider === 'function') { + model = provider(aiConfig.model?.name ?? ''); + } else { + model = provider[aiConfig.provider?.name ?? '']?.(aiConfig.model?.name ?? ''); + } + if (!model) { + throw new Error( + 'Vercel AI SDK model cannot be determined from the supplied provider parameter.', + ); + } + + // Merge messages from config and options + let messages: LDMessage[] | undefined; + const configMessages = ('messages' in aiConfig ? aiConfig.messages : undefined) as + | LDMessage[] + | undefined; + if (configMessages || options?.nonInterpolatedMessages) { + messages = [...(configMessages ?? []), ...(options?.nonInterpolatedMessages ?? [])]; + } + + // Map parameters using the shared mapping method + const params = VercelProvider.mapParameters(aiConfig.model?.parameters); + + // Build and return the Vercel AI SDK configuration + return { + model, + messages, + ...params, + }; + } + /** * Create a Vercel AI model from an AI configuration. - * This method creates a Vercel AI model based on the provider configuration. + * This method auto-detects the provider and creates the model instance. * * @param aiConfig The LaunchDarkly AI configuration * @returns A Promise that resolves to a configured Vercel AI model @@ -131,9 +327,6 @@ export class VercelProvider extends AIProvider { static async createVercelModel(aiConfig: LDAIConfig): Promise { const providerName = VercelProvider.mapProvider(aiConfig.provider?.name || ''); const modelName = aiConfig.model?.name || ''; - // Parameters are not used in model creation but kept for future use - // eslint-disable-next-line @typescript-eslint/no-unused-vars - const parameters = aiConfig.model?.parameters || {}; // Map provider names to their corresponding Vercel AI SDK imports switch (providerName) { diff --git a/packages/ai-providers/server-ai-vercel/src/index.ts b/packages/ai-providers/server-ai-vercel/src/index.ts index 3dde0dc68..6e7eb5502 100644 --- a/packages/ai-providers/server-ai-vercel/src/index.ts +++ b/packages/ai-providers/server-ai-vercel/src/index.ts @@ -1 +1,7 @@ export { VercelProvider } from './VercelProvider'; +export type { + VercelAIModelParameters, + VercelAISDKConfig, + VercelAISDKMapOptions, + VercelAISDKProvider, +} from './types'; diff --git a/packages/ai-providers/server-ai-vercel/src/types.ts b/packages/ai-providers/server-ai-vercel/src/types.ts new file mode 100644 index 000000000..098c7f625 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/src/types.ts @@ -0,0 +1,74 @@ +import type { LDMessage } from '@launchdarkly/server-sdk-ai'; + +/** + * Vercel AI SDK Provider type - a function that takes a model name and returns a model instance. + */ +export type VercelAISDKProvider = (modelName: string) => TMod; + +/** + * Options for mapping to Vercel AI SDK configuration. + */ +export interface VercelAISDKMapOptions { + /** + * Additional messages that should not be interpolated. + */ + nonInterpolatedMessages?: LDMessage[] | undefined; +} + +/** + * Vercel AI SDK model parameters. + * These are the parameters that can be passed to Vercel AI SDK methods like generateText() and streamText(). + */ +export interface VercelAIModelParameters { + maxTokens?: number | undefined; + maxOutputTokens?: number | undefined; + temperature?: number | undefined; + topP?: number | undefined; + topK?: number | undefined; + presencePenalty?: number | undefined; + frequencyPenalty?: number | undefined; + stopSequences?: string[] | undefined; + seed?: number | undefined; +} + +/** + * Configuration format compatible with Vercel AI SDK's generateText() and streamText() methods. + */ +export interface VercelAISDKConfig extends VercelAIModelParameters { + model: TMod; + messages?: LDMessage[] | undefined; +} + +/** + * Token usage information from Vercel AI SDK operations. + * Matches the LanguageModelUsage type from the Vercel AI SDK. + * Includes v4 field names (promptTokens, completionTokens) for backward compatibility. + */ +export interface ModelUsageTokens { + inputTokens?: number; + outputTokens?: number; + totalTokens?: number; + reasoningTokens?: number; + cachedInputTokens?: number; + // v4 backward compatibility field names + promptTokens?: number; + completionTokens?: number; +} + +/** + * Response type for non-streaming Vercel AI SDK operations (e.g., generateText). + */ +export interface TextResponse { + finishReason?: string; + totalUsage?: ModelUsageTokens; + usage?: ModelUsageTokens; +} + +/** + * Response type for streaming Vercel AI SDK operations (e.g., streamText). + */ +export interface StreamResponse { + finishReason?: Promise; + totalUsage?: Promise; + usage?: Promise; +}