Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
245 changes: 219 additions & 26 deletions packages/ai-providers/server-ai-vercel/src/VercelProvider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,30 +10,53 @@ import type {
LDTokenUsage,
} from '@launchdarkly/server-sdk-ai';

import type {
ModelUsageTokens,
StreamResponse,
TextResponse,
VercelAIModelParameters,
VercelAISDKConfig,
VercelAISDKMapOptions,
VercelAISDKProvider,
} from './types';

/**
* Vercel AI implementation of AIProvider.
* This provider integrates Vercel AI SDK with LaunchDarkly's tracking capabilities.
*/
export class VercelProvider extends AIProvider {
private _model: LanguageModel;
private _parameters: Record<string, unknown>;
private _parameters: VercelAIModelParameters;

constructor(model: LanguageModel, parameters: Record<string, unknown>, logger?: LDLogger) {
/**
* Constructor for the VercelProvider.
* @param model - The Vercel AI model to use.
* @param parameters - The Vercel AI model parameters.
* @param logger - The logger to use for the Vercel AI provider.
*/
constructor(model: LanguageModel, parameters: VercelAIModelParameters, logger?: LDLogger) {
super(logger);
this._model = model;
this._parameters = parameters;
}

// =============================================================================
// MAIN FACTORY METHOD
// MAIN FACTORY METHODS
// =============================================================================

/**
* Static factory method to create a Vercel AIProvider from an AI configuration.
* This method auto-detects the provider and creates the model.
* Note: Messages from the AI config are not included in the provider - messages
* should be passed at invocation time via invokeModel().
*
* @param aiConfig The LaunchDarkly AI configuration
* @param logger Optional logger
* @returns A Promise that resolves to a configured VercelProvider
*/
static async create(aiConfig: LDAIConfig, logger?: LDLogger): Promise<VercelProvider> {
const model = await VercelProvider.createVercelModel(aiConfig);
const parameters = aiConfig.model?.parameters || {};
const parameters = VercelProvider.mapParameters(aiConfig.model?.parameters);
return new VercelProvider(model, parameters, logger);
}

Expand All @@ -45,23 +68,18 @@ export class VercelProvider extends AIProvider {
* Invoke the Vercel AI model with an array of messages.
*/
async invokeModel(messages: LDMessage[]): Promise<ChatResponse> {
// Call Vercel AI generateText
// Type assertion: our MinLanguageModel is compatible with the expected LanguageModel interface
// The generateText function will work with any object that has the required properties
const result = await generateText({
model: this._model,
messages,
...this._parameters,
});

// Create the assistant message
const assistantMessage: LDMessage = {
role: 'assistant',
content: result.text,
};

// Extract metrics including token usage and success status
const metrics = VercelProvider.createAIMetrics(result);
const metrics = VercelProvider.getAIMetricsFromResponse(result);

return {
message: assistantMessage,
Expand Down Expand Up @@ -95,45 +113,220 @@ export class VercelProvider extends AIProvider {
return mapping[lowercasedName] || lowercasedName;
}

/**
* Map Vercel AI SDK usage data to LaunchDarkly token usage.
*
* @param usageData Usage data from Vercel AI SDK
* @returns LDTokenUsage
*/
static mapUsageDataToLDTokenUsage(usageData: ModelUsageTokens): LDTokenUsage {
// Support v4 field names (promptTokens, completionTokens) for backward compatibility
const { totalTokens, inputTokens, outputTokens, promptTokens, completionTokens } = usageData;
return {
total: totalTokens ?? 0,
input: inputTokens ?? promptTokens ?? 0,
output: outputTokens ?? completionTokens ?? 0,
};
}

/**
* Get AI metrics from a Vercel AI SDK text response
* This method extracts token usage information and success status from Vercel AI responses
* and returns a LaunchDarkly AIMetrics object.
* Supports both v4 and v5 field names for backward compatibility.
*
* @param response The response from generateText() or similar non-streaming operations
* @returns LDAIMetrics with success status and token usage
*
* @example
* const response = await aiConfig.tracker.trackMetricsOf(
* VercelProvider.getAIMetricsFromResponse,
* () => generateText(vercelConfig)
* );
*/
static getAIMetricsFromResponse(response: TextResponse): LDAIMetrics {
const finishReason = response?.finishReason ?? 'unknown';

// favor totalUsage over usage for cumulative usage across all steps
let usage: LDTokenUsage | undefined;
if (response?.totalUsage) {
usage = VercelProvider.mapUsageDataToLDTokenUsage(response.totalUsage);
} else if (response?.usage) {
usage = VercelProvider.mapUsageDataToLDTokenUsage(response.usage);
}

const success = finishReason !== 'error';

return {
success,
usage,
};
}

/**
* Create AI metrics information from a Vercel AI response.
* This method extracts token usage information and success status from Vercel AI responses
* and returns a LaunchDarkly AIMetrics object.
* Supports both v4 and v5 field names for backward compatibility.
*
* @deprecated Use `getAIMetricsFromResponse()` instead.
* @param vercelResponse The response from generateText() or similar non-streaming operations
* @returns LDAIMetrics with success status and token usage
*/
static createAIMetrics(vercelResponse: TextResponse): LDAIMetrics {
return VercelProvider.getAIMetricsFromResponse(vercelResponse);
}

/**
* Get AI metrics from a Vercel AI SDK streaming result.
*
* This method waits for the stream to complete, then extracts metrics using totalUsage
* (preferred for cumulative usage across all steps) or usage if totalUsage is unavailable.
*
* @param stream The stream result from streamText()
* @returns A Promise that resolves to LDAIMetrics
*
* @example
* const stream = aiConfig.tracker.trackStreamMetricsOf(
* () => streamText(vercelConfig),
* VercelProvider.getAIMetricsFromStream
* );
*/
static createAIMetrics(vercelResponse: any): LDAIMetrics {
// Extract token usage if available
static async getAIMetricsFromStream(stream: StreamResponse): Promise<LDAIMetrics> {
const finishReason = (await stream.finishReason?.catch(() => 'error')) ?? 'unknown';

// favor totalUsage over usage for cumulative usage across all steps
let usage: LDTokenUsage | undefined;
if (vercelResponse?.usage) {
const { totalTokens, inputTokens, promptTokens, outputTokens, completionTokens } =
vercelResponse.usage;
usage = {
total: totalTokens ?? 0,
input: inputTokens ?? promptTokens ?? 0,
output: outputTokens ?? completionTokens ?? 0,
};
if (stream.totalUsage) {
const usageData = await stream.totalUsage;
usage = VercelProvider.mapUsageDataToLDTokenUsage(usageData);
} else if (stream.usage) {
const usageData = await stream.usage;
usage = VercelProvider.mapUsageDataToLDTokenUsage(usageData);
}

// Vercel AI responses that complete successfully are considered successful
const success = finishReason !== 'error';

return {
success: true,
success,
usage,
};
}

/**
* Map LaunchDarkly model parameters to Vercel AI SDK parameters.
*
* Parameter mappings:
* - max_tokens → maxTokens
* - max_completion_tokens → maxOutputTokens
* - temperature → temperature
* - top_p → topP
* - top_k → topK
* - presence_penalty → presencePenalty
* - frequency_penalty → frequencyPenalty
* - stop → stopSequences
* - seed → seed
*
* @param parameters The LaunchDarkly model parameters to map
* @returns An object containing mapped Vercel AI SDK parameters
*/
static mapParameters(parameters?: { [index: string]: unknown }): VercelAIModelParameters {
if (!parameters) {
return {};
}

const params: VercelAIModelParameters = {};

if (parameters.max_tokens !== undefined) {
params.maxTokens = parameters.max_tokens as number;
}
if (parameters.max_completion_tokens !== undefined) {
params.maxOutputTokens = parameters.max_completion_tokens as number;
}
if (parameters.temperature !== undefined) {
params.temperature = parameters.temperature as number;
}
if (parameters.top_p !== undefined) {
params.topP = parameters.top_p as number;
}
if (parameters.top_k !== undefined) {
params.topK = parameters.top_k as number;
}
if (parameters.presence_penalty !== undefined) {
params.presencePenalty = parameters.presence_penalty as number;
}
if (parameters.frequency_penalty !== undefined) {
params.frequencyPenalty = parameters.frequency_penalty as number;
}
if (parameters.stop !== undefined) {
params.stopSequences = parameters.stop as string[];
}
if (parameters.seed !== undefined) {
params.seed = parameters.seed as number;
}

return params;
}

/**
* Convert an AI configuration to Vercel AI SDK parameters.
* This static method allows converting an LDAIConfig to VercelAISDKConfig without
* requiring an instance of VercelProvider.
*
* @param aiConfig The LaunchDarkly AI configuration
* @param provider A Vercel AI SDK Provider or a map of provider names to Vercel AI SDK Providers
* @param options Optional mapping options
* @returns A configuration directly usable in Vercel AI SDK generateText() and streamText()
* @throws {Error} if a Vercel AI SDK model cannot be determined from the given provider parameter
*/
static toVercelAISDK<TMod>(
aiConfig: LDAIConfig,
provider: VercelAISDKProvider<TMod> | Record<string, VercelAISDKProvider<TMod>>,
options?: VercelAISDKMapOptions | undefined,
): VercelAISDKConfig<TMod> {
// Determine the model from the provider
let model: TMod | undefined;
if (typeof provider === 'function') {
model = provider(aiConfig.model?.name ?? '');
} else {
model = provider[aiConfig.provider?.name ?? '']?.(aiConfig.model?.name ?? '');
}
if (!model) {
throw new Error(
'Vercel AI SDK model cannot be determined from the supplied provider parameter.',
);
}

// Merge messages from config and options
let messages: LDMessage[] | undefined;
const configMessages = ('messages' in aiConfig ? aiConfig.messages : undefined) as
| LDMessage[]
| undefined;
if (configMessages || options?.nonInterpolatedMessages) {
messages = [...(configMessages ?? []), ...(options?.nonInterpolatedMessages ?? [])];
}

// Map parameters using the shared mapping method
const params = VercelProvider.mapParameters(aiConfig.model?.parameters);

// Build and return the Vercel AI SDK configuration
return {
model,
messages,
...params,
};
}

/**
* Create a Vercel AI model from an AI configuration.
* This method creates a Vercel AI model based on the provider configuration.
* This method auto-detects the provider and creates the model instance.
*
* @param aiConfig The LaunchDarkly AI configuration
* @returns A Promise that resolves to a configured Vercel AI model
*/
static async createVercelModel(aiConfig: LDAIConfig): Promise<LanguageModel> {
const providerName = VercelProvider.mapProvider(aiConfig.provider?.name || '');
const modelName = aiConfig.model?.name || '';
// Parameters are not used in model creation but kept for future use
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const parameters = aiConfig.model?.parameters || {};

// Map provider names to their corresponding Vercel AI SDK imports
switch (providerName) {
Expand Down
6 changes: 6 additions & 0 deletions packages/ai-providers/server-ai-vercel/src/index.ts
Original file line number Diff line number Diff line change
@@ -1 +1,7 @@
export { VercelProvider } from './VercelProvider';
export type {
VercelAIModelParameters,
VercelAISDKConfig,
VercelAISDKMapOptions,
VercelAISDKProvider,
} from './types';
Loading