diff --git a/packages/sdk/server-ai/__tests__/LDAIConfigTrackerImpl.test.ts b/packages/sdk/server-ai/__tests__/LDAIConfigTrackerImpl.test.ts index 8d197f4ce5..a70fce0b12 100644 --- a/packages/sdk/server-ai/__tests__/LDAIConfigTrackerImpl.test.ts +++ b/packages/sdk/server-ai/__tests__/LDAIConfigTrackerImpl.test.ts @@ -898,3 +898,158 @@ it('tracks error', () => { 1, ); }); + +describe('trackMetricsOf', () => { + it('tracks success and token usage from metrics', async () => { + const tracker = new LDAIConfigTrackerImpl( + mockLdClient, + configKey, + variationKey, + version, + modelName, + providerName, + testContext, + ); + + const mockResult = { response: 'test' }; + const mockMetrics = { + success: true, + usage: { total: 100, input: 50, output: 50 }, + }; + + const metricsExtractor = jest.fn().mockReturnValue(mockMetrics); + const operation = jest.fn().mockResolvedValue(mockResult); + + const result = await tracker.trackMetricsOf(metricsExtractor, operation); + + expect(result).toBe(mockResult); + expect(metricsExtractor).toHaveBeenCalledWith(mockResult); + expect(operation).toHaveBeenCalled(); + + // Should track success + expect(mockTrack).toHaveBeenCalledWith( + '$ld:ai:generation:success', + testContext, + { configKey, variationKey, version, modelName, providerName }, + 1, + ); + + // Should track token usage + expect(mockTrack).toHaveBeenCalledWith( + '$ld:ai:tokens:total', + testContext, + { configKey, variationKey, version, modelName, providerName }, + 100, + ); + expect(mockTrack).toHaveBeenCalledWith( + '$ld:ai:tokens:input', + testContext, + { configKey, variationKey, version, modelName, providerName }, + 50, + ); + expect(mockTrack).toHaveBeenCalledWith( + '$ld:ai:tokens:output', + testContext, + { configKey, variationKey, version, modelName, providerName }, + 50, + ); + }); + + it('tracks failure when metrics indicate failure', async () => { + const tracker = new LDAIConfigTrackerImpl( + mockLdClient, + configKey, + variationKey, + version, + modelName, + providerName, + testContext, + ); + + const mockResult = { response: 'test' }; + const mockMetrics = { + success: false, + }; + + const metricsExtractor = jest.fn().mockReturnValue(mockMetrics); + const operation = jest.fn().mockResolvedValue(mockResult); + + await tracker.trackMetricsOf(metricsExtractor, operation); + + // Should track error + expect(mockTrack).toHaveBeenCalledWith( + '$ld:ai:generation:error', + testContext, + { configKey, variationKey, version, modelName, providerName }, + 1, + ); + }); + + it('tracks failure when operation throws', async () => { + const tracker = new LDAIConfigTrackerImpl( + mockLdClient, + configKey, + variationKey, + version, + modelName, + providerName, + testContext, + ); + + const error = new Error('Operation failed'); + const metricsExtractor = jest.fn(); + const operation = jest.fn().mockRejectedValue(error); + + await expect(tracker.trackMetricsOf(metricsExtractor, operation)).rejects.toThrow(error); + + // Should track error + expect(mockTrack).toHaveBeenCalledWith( + '$ld:ai:generation:error', + testContext, + { configKey, variationKey, version, modelName, providerName }, + 1, + ); + + // Should not call metrics extractor when operation fails + expect(metricsExtractor).not.toHaveBeenCalled(); + }); + + it('tracks metrics without token usage', async () => { + const tracker = new LDAIConfigTrackerImpl( + mockLdClient, + configKey, + variationKey, + version, + modelName, + providerName, + testContext, + ); + + const mockResult = { response: 'test' }; + const mockMetrics = { + success: true, + // No usage provided + }; + + const metricsExtractor = jest.fn().mockReturnValue(mockMetrics); + const operation = jest.fn().mockResolvedValue(mockResult); + + await tracker.trackMetricsOf(metricsExtractor, operation); + + // Should track success but not token usage + expect(mockTrack).toHaveBeenCalledWith( + '$ld:ai:generation:success', + testContext, + { configKey, variationKey, version, modelName, providerName }, + 1, + ); + + // Should not track token usage + expect(mockTrack).not.toHaveBeenCalledWith( + '$ld:ai:tokens:total', + expect.any(Object), + expect.any(Object), + expect.any(Number), + ); + }); +}); diff --git a/packages/sdk/server-ai/__tests__/TrackedChat.test.ts b/packages/sdk/server-ai/__tests__/TrackedChat.test.ts new file mode 100644 index 0000000000..c91b2d3d57 --- /dev/null +++ b/packages/sdk/server-ai/__tests__/TrackedChat.test.ts @@ -0,0 +1,231 @@ +import { TrackedChat } from '../src/api/chat/TrackedChat'; +import { ChatResponse } from '../src/api/chat/types'; +import { LDAIConfig, LDMessage } from '../src/api/config/LDAIConfig'; +import { LDAIConfigTracker } from '../src/api/config/LDAIConfigTracker'; +import { AIProvider } from '../src/api/providers/AIProvider'; + +describe('TrackedChat', () => { + let mockProvider: jest.Mocked; + let mockTracker: jest.Mocked; + let aiConfig: LDAIConfig; + + beforeEach(() => { + // Mock the AIProvider + mockProvider = { + invokeModel: jest.fn(), + } as any; + + // Mock the LDAIConfigTracker + mockTracker = { + trackMetricsOf: jest.fn(), + trackDuration: jest.fn(), + trackTokens: jest.fn(), + trackSuccess: jest.fn(), + trackError: jest.fn(), + trackFeedback: jest.fn(), + trackTimeToFirstToken: jest.fn(), + trackDurationOf: jest.fn(), + trackOpenAIMetrics: jest.fn(), + trackBedrockConverseMetrics: jest.fn(), + trackVercelAIMetrics: jest.fn(), + getSummary: jest.fn(), + } as any; + + // Create a basic AI config + aiConfig = { + enabled: true, + messages: [{ role: 'system', content: 'You are a helpful assistant.' }], + model: { name: 'gpt-4' }, + provider: { name: 'openai' }, + tracker: mockTracker, + toVercelAISDK: jest.fn(), + }; + }); + + describe('appendMessages', () => { + it('appends messages to the conversation history', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + const messagesToAppend: LDMessage[] = [ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi there!' }, + ]; + + chat.appendMessages(messagesToAppend); + + const messages = chat.getMessages(false); + expect(messages).toHaveLength(2); + expect(messages[0]).toEqual({ role: 'user', content: 'Hello' }); + expect(messages[1]).toEqual({ role: 'assistant', content: 'Hi there!' }); + }); + + it('appends multiple message batches sequentially', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + chat.appendMessages([{ role: 'user', content: 'First message' }]); + chat.appendMessages([{ role: 'assistant', content: 'Second message' }]); + chat.appendMessages([{ role: 'user', content: 'Third message' }]); + + const messages = chat.getMessages(false); + expect(messages).toHaveLength(3); + expect(messages[0].content).toBe('First message'); + expect(messages[1].content).toBe('Second message'); + expect(messages[2].content).toBe('Third message'); + }); + + it('handles empty message array', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + chat.appendMessages([]); + + const messages = chat.getMessages(false); + expect(messages).toHaveLength(0); + }); + }); + + describe('getMessages', () => { + it('returns only conversation history when includeConfigMessages is false', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + chat.appendMessages([ + { role: 'user', content: 'User message' }, + { role: 'assistant', content: 'Assistant message' }, + ]); + + const messages = chat.getMessages(false); + + expect(messages).toHaveLength(2); + expect(messages[0]).toEqual({ role: 'user', content: 'User message' }); + expect(messages[1]).toEqual({ role: 'assistant', content: 'Assistant message' }); + }); + + it('returns only conversation history when includeConfigMessages is omitted (defaults to false)', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + chat.appendMessages([{ role: 'user', content: 'User message' }]); + + const messages = chat.getMessages(); + + expect(messages).toHaveLength(1); + expect(messages[0]).toEqual({ role: 'user', content: 'User message' }); + }); + + it('returns config messages prepended when includeConfigMessages is true', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + chat.appendMessages([ + { role: 'user', content: 'User message' }, + { role: 'assistant', content: 'Assistant message' }, + ]); + + const messages = chat.getMessages(true); + + expect(messages).toHaveLength(3); + expect(messages[0]).toEqual({ role: 'system', content: 'You are a helpful assistant.' }); + expect(messages[1]).toEqual({ role: 'user', content: 'User message' }); + expect(messages[2]).toEqual({ role: 'assistant', content: 'Assistant message' }); + }); + + it('returns only config messages when no conversation history exists and includeConfigMessages is true', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + const messages = chat.getMessages(true); + + expect(messages).toHaveLength(1); + expect(messages[0]).toEqual({ role: 'system', content: 'You are a helpful assistant.' }); + }); + + it('returns empty array when no messages exist and includeConfigMessages is false', () => { + const configWithoutMessages: LDAIConfig = { + ...aiConfig, + messages: [], + }; + const chat = new TrackedChat(configWithoutMessages, mockTracker, mockProvider); + + const messages = chat.getMessages(false); + + expect(messages).toHaveLength(0); + }); + + it('returns a copy of the messages array (not a reference)', () => { + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + chat.appendMessages([{ role: 'user', content: 'Original message' }]); + + const messages1 = chat.getMessages(); + const messages2 = chat.getMessages(); + + expect(messages1).not.toBe(messages2); + expect(messages1).toEqual(messages2); + + // Modifying returned array should not affect internal state + messages1.push({ role: 'assistant', content: 'Modified' }); + + const messages3 = chat.getMessages(); + expect(messages3).toHaveLength(1); + expect(messages3[0].content).toBe('Original message'); + }); + + it('handles undefined config messages gracefully', () => { + const configWithoutMessages: LDAIConfig = { + ...aiConfig, + messages: undefined, + }; + const chat = new TrackedChat(configWithoutMessages, mockTracker, mockProvider); + + chat.appendMessages([{ role: 'user', content: 'User message' }]); + + const messagesWithConfig = chat.getMessages(true); + expect(messagesWithConfig).toHaveLength(1); + expect(messagesWithConfig[0].content).toBe('User message'); + + const messagesWithoutConfig = chat.getMessages(false); + expect(messagesWithoutConfig).toHaveLength(1); + expect(messagesWithoutConfig[0].content).toBe('User message'); + }); + }); + + describe('integration with invoke', () => { + it('adds messages from invoke to history accessible via getMessages', async () => { + const mockResponse: ChatResponse = { + message: { role: 'assistant', content: 'Response from model' }, + metrics: { success: true }, + }; + + mockTracker.trackMetricsOf.mockImplementation(async (extractor, func) => func()); + + mockProvider.invokeModel.mockResolvedValue(mockResponse); + + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + await chat.invoke('Hello'); + + const messages = chat.getMessages(false); + expect(messages).toHaveLength(2); + expect(messages[0]).toEqual({ role: 'user', content: 'Hello' }); + expect(messages[1]).toEqual({ role: 'assistant', content: 'Response from model' }); + }); + + it('preserves appended messages when invoking', async () => { + const mockResponse: ChatResponse = { + message: { role: 'assistant', content: 'Response' }, + metrics: { success: true }, + }; + + mockTracker.trackMetricsOf.mockImplementation(async (extractor, func) => func()); + + mockProvider.invokeModel.mockResolvedValue(mockResponse); + + const chat = new TrackedChat(aiConfig, mockTracker, mockProvider); + + chat.appendMessages([{ role: 'user', content: 'Pre-appended message' }]); + await chat.invoke('New user input'); + + const messages = chat.getMessages(false); + expect(messages).toHaveLength(3); + expect(messages[0].content).toBe('Pre-appended message'); + expect(messages[1].content).toBe('New user input'); + expect(messages[2].content).toBe('Response'); + }); + }); +}); diff --git a/packages/sdk/server-ai/src/LDAIClientImpl.ts b/packages/sdk/server-ai/src/LDAIClientImpl.ts index da50b04399..0cb7f96c0d 100644 --- a/packages/sdk/server-ai/src/LDAIClientImpl.ts +++ b/packages/sdk/server-ai/src/LDAIClientImpl.ts @@ -1,8 +1,9 @@ import * as Mustache from 'mustache'; -import { LDContext } from '@launchdarkly/js-server-sdk-common'; +import { LDContext, LDLogger } from '@launchdarkly/js-server-sdk-common'; import { LDAIAgent, LDAIAgentConfig, LDAIAgentDefaults } from './api/agents'; +import { TrackedChat, TrackedChatFactory } from './api/chat'; import { LDAIConfig, LDAIConfigTracker, @@ -57,7 +58,11 @@ interface EvaluationResult { } export class LDAIClientImpl implements LDAIClient { - constructor(private _ldClient: LDClientMin) {} + private _logger?: LDLogger; + + constructor(private _ldClient: LDClientMin) { + this._logger = _ldClient.logger; + } private _interpolateTemplate(template: string, variables: Record): string { return Mustache.render(template, variables, undefined, { escape: (item: any) => item }); @@ -222,4 +227,25 @@ export class LDAIClientImpl implements LDAIClient { return agents; } + + async initChat( + key: string, + context: LDContext, + defaultValue: LDAIDefaults, + variables?: Record, + ): Promise { + // Track chat initialization + this._ldClient.track('$ld:ai:config:function:initChat', context, key, 1); + + const aiConfig = await this.config(key, context, defaultValue, variables); + + // Return undefined if the configuration is disabled + if (!aiConfig.enabled) { + this._logger?.info(`Chat configuration is disabled: ${key}`); + return undefined; + } + + // Create the TrackedChat instance based on the provider + return TrackedChatFactory.create(aiConfig, aiConfig.tracker, this._logger); + } } diff --git a/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts b/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts index d820a85168..8830cee4ea 100644 --- a/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts +++ b/packages/sdk/server-ai/src/LDAIConfigTrackerImpl.ts @@ -6,6 +6,7 @@ import { createBedrockTokenUsage, createOpenAiUsage, createVercelAISDKTokenUsage, + LDAIMetrics, LDFeedbackKind, LDTokenUsage, } from './api/metrics'; @@ -87,6 +88,37 @@ export class LDAIConfigTrackerImpl implements LDAIConfigTracker { this._ldClient.track('$ld:ai:generation:error', this._context, this._getTrackData(), 1); } + async trackMetricsOf( + metricsExtractor: (result: TRes) => LDAIMetrics, + func: () => Promise, + ): Promise { + let result: TRes; + + try { + result = await this.trackDurationOf(func); + } catch (err) { + this.trackError(); + throw err; + } + + // Extract metrics after successful AI call + const metrics = metricsExtractor(result); + + // Track success/error based on metrics + if (metrics.success) { + this.trackSuccess(); + } else { + this.trackError(); + } + + // Track token usage if available + if (metrics.usage) { + this.trackTokens(metrics.usage); + } + + return result; + } + async trackOpenAIMetrics< TRes extends { usage?: { diff --git a/packages/sdk/server-ai/src/LDClientMin.ts b/packages/sdk/server-ai/src/LDClientMin.ts index 2158c569cc..864354c0fa 100644 --- a/packages/sdk/server-ai/src/LDClientMin.ts +++ b/packages/sdk/server-ai/src/LDClientMin.ts @@ -1,4 +1,4 @@ -import { LDContext, LDFlagValue } from '@launchdarkly/js-server-sdk-common'; +import { LDContext, LDFlagValue, LDLogger } from '@launchdarkly/js-server-sdk-common'; /** * Interface which represents the required interface components for a sever SDK @@ -13,4 +13,6 @@ export interface LDClientMin { ): Promise; track(key: string, context: LDContext, data?: any, metricValue?: number): void; + + readonly logger?: LDLogger; } diff --git a/packages/sdk/server-ai/src/api/LDAIClient.ts b/packages/sdk/server-ai/src/api/LDAIClient.ts index be02e887d1..ccf3098938 100644 --- a/packages/sdk/server-ai/src/api/LDAIClient.ts +++ b/packages/sdk/server-ai/src/api/LDAIClient.ts @@ -1,6 +1,7 @@ import { LDContext } from '@launchdarkly/js-server-sdk-common'; import { LDAIAgent, LDAIAgentConfig, LDAIAgentDefaults } from './agents'; +import { TrackedChat } from './chat'; import { LDAIConfig, LDAIDefaults } from './config/LDAIConfig'; /** @@ -143,4 +144,47 @@ export interface LDAIClient { agentConfigs: T, context: LDContext, ): Promise>; + + /** + * Initializes and returns a new TrackedChat instance for chat interactions. + * This method serves as the primary entry point for creating TrackedChat instances from configuration. + * + * @param key The key identifying the AI chat configuration to use. + * @param context The standard LDContext used when evaluating flags. + * @param defaultValue A default value representing a standard AI chat config result. + * @param variables Dictionary of values for instruction interpolation. + * @returns A promise that resolves to the TrackedChat instance, or null if the configuration is disabled. + * + * @example + * ``` + * const key = "customer_support_chat"; + * const context = {...}; + * const defaultValue = { + * config: { + * enabled: false, + * model: { name: "gpt-4" }, + * messages: [ + * { role: "system", content: "You are a helpful customer support agent." } + * ] + * } + * }; + * const variables = { customerName: 'John' }; + * + * const chat = await client.initChat(key, context, defaultValue, variables); + * if (chat) { + * const response = await chat.invoke("I need help with my order"); + * console.log(response.message.content); + * + * // Access configuration and tracker if needed + * console.log('Model:', chat.getConfig().model?.name); + * chat.getTracker().trackSuccess(); + * } + * ``` + */ + initChat( + key: string, + context: LDContext, + defaultValue: LDAIDefaults, + variables?: Record, + ): Promise; } diff --git a/packages/sdk/server-ai/src/api/chat/TrackedChat.ts b/packages/sdk/server-ai/src/api/chat/TrackedChat.ts new file mode 100644 index 0000000000..68a9af2f03 --- /dev/null +++ b/packages/sdk/server-ai/src/api/chat/TrackedChat.ts @@ -0,0 +1,100 @@ +import { LDAIConfig, LDMessage } from '../config/LDAIConfig'; +import { LDAIConfigTracker } from '../config/LDAIConfigTracker'; +import { AIProvider } from '../providers/AIProvider'; +import { ChatResponse } from './types'; + +/** + * Concrete implementation of TrackedChat that provides chat functionality + * by delegating to an AIProvider implementation. + * This class handles conversation management and tracking, while delegating + * the actual model invocation to the provider. + */ +export class TrackedChat { + protected messages: LDMessage[]; + + constructor( + protected readonly aiConfig: LDAIConfig, + protected readonly tracker: LDAIConfigTracker, + protected readonly provider: AIProvider, + ) { + this.messages = []; + } + + /** + * Invoke the chat model with a prompt string. + * This method handles conversation management and tracking, delegating to the provider's invokeModel method. + */ + async invoke(prompt: string): Promise { + // Convert prompt string to LDMessage with role 'user' and add to conversation history + const userMessage: LDMessage = { + role: 'user', + content: prompt, + }; + this.messages.push(userMessage); + + // Prepend config messages to conversation history for model invocation + const configMessages = this.aiConfig.messages || []; + const allMessages = [...configMessages, ...this.messages]; + + // Delegate to provider-specific implementation with tracking + const response = await this.tracker.trackMetricsOf( + (result: ChatResponse) => result.metrics, + () => this.provider.invokeModel(allMessages), + ); + + // Add the assistant response to the conversation history + this.messages.push(response.message); + + return response; + } + + /** + * Get the underlying AI configuration used to initialize this TrackedChat. + */ + getConfig(): LDAIConfig { + return this.aiConfig; + } + + /** + * Get the underlying AI configuration tracker used to initialize this TrackedChat. + */ + getTracker(): LDAIConfigTracker { + return this.tracker; + } + + /** + * Get the underlying AI provider instance. + * This provides direct access to the provider for advanced use cases. + */ + getProvider(): AIProvider { + return this.provider; + } + + /** + * Append messages to the conversation history. + * Adds messages to the conversation history without invoking the model, + * which is useful for managing multi-turn conversations or injecting context. + * + * @param messages Array of messages to append to the conversation history + */ + appendMessages(messages: LDMessage[]): void { + this.messages.push(...messages); + } + + /** + * Get all messages in the conversation history. + * + * @param includeConfigMessages Whether to include the config messages from the AIConfig. + * Defaults to false. + * @returns Array of messages. When includeConfigMessages is true, returns both config + * messages and conversation history with config messages prepended. When false, + * returns only the conversation history messages. + */ + getMessages(includeConfigMessages: boolean = false): LDMessage[] { + if (includeConfigMessages) { + const configMessages = this.aiConfig.messages || []; + return [...configMessages, ...this.messages]; + } + return [...this.messages]; + } +} diff --git a/packages/sdk/server-ai/src/api/chat/TrackedChatFactory.ts b/packages/sdk/server-ai/src/api/chat/TrackedChatFactory.ts new file mode 100644 index 0000000000..ea47625f78 --- /dev/null +++ b/packages/sdk/server-ai/src/api/chat/TrackedChatFactory.ts @@ -0,0 +1,95 @@ +import { LDLogger } from '@launchdarkly/js-server-sdk-common'; + +import { LDAIConfig } from '../config/LDAIConfig'; +import { LDAIConfigTracker } from '../config/LDAIConfigTracker'; +import { AIProvider } from '../providers/AIProvider'; +import { TrackedChat } from './TrackedChat'; + +/** + * Factory for creating TrackedChat instances based on the provider configuration. + */ +export class TrackedChatFactory { + /** + * Create a TrackedChat instance based on the AI configuration. + * This method attempts to load provider-specific implementations dynamically. + * Returns undefined if the provider is not supported. + * + * @param aiConfig The AI configuration + * @param tracker The tracker for AI operations + * @param logger Optional logger for logging provider initialization + */ + static async create( + aiConfig: LDAIConfig, + tracker: LDAIConfigTracker, + logger?: LDLogger, + ): Promise { + const provider = await this._createAIProvider(aiConfig, logger); + if (!provider) { + logger?.warn( + `Provider is not supported or failed to initialize: ${aiConfig.provider?.name ?? 'unknown'}`, + ); + return undefined; + } + + logger?.debug(`Successfully created TrackedChat for provider: ${aiConfig.provider?.name}`); + return new TrackedChat(aiConfig, tracker, provider); + } + + /** + * Create an AIProvider instance based on the AI configuration. + * This method attempts to load provider-specific implementations dynamically. + */ + private static async _createAIProvider( + aiConfig: LDAIConfig, + logger?: LDLogger, + ): Promise { + const providerName = aiConfig.provider?.name?.toLowerCase(); + logger?.debug(`Attempting to create AI provider: ${providerName ?? 'unknown'}`); + let provider: AIProvider | undefined; + + // Try specific implementations for the provider + switch (providerName) { + case 'openai': + // TODO: Return OpenAI AIProvider implementation when available + provider = undefined; + break; + case 'bedrock': + // TODO: Return Bedrock AIProvider implementation when available + provider = undefined; + break; + default: + provider = undefined; + } + + // If no specific implementation worked, try the multi-provider packages + if (!provider) { + provider = await this._createLangChainProvider(aiConfig, logger); + } + + return provider; + } + + /** + * Create a LangChain AIProvider instance if the LangChain provider is available. + */ + private static async _createLangChainProvider( + aiConfig: LDAIConfig, + logger?: LDLogger, + ): Promise { + try { + logger?.debug('Attempting to load LangChain provider'); + // Try to dynamically import the LangChain provider + // This will work if @launchdarkly/server-sdk-ai-langchain is installed + // eslint-disable-next-line import/no-extraneous-dependencies, global-require + const { LangChainProvider } = require('@launchdarkly/server-sdk-ai-langchain'); + + const provider = await LangChainProvider.create(aiConfig, logger); + logger?.debug('Successfully created LangChain provider'); + return provider; + } catch (error) { + // If the LangChain provider is not available or creation fails, return undefined + logger?.error(`Error creating LangChain provider: ${error}`); + return undefined; + } + } +} diff --git a/packages/sdk/server-ai/src/api/chat/index.ts b/packages/sdk/server-ai/src/api/chat/index.ts new file mode 100644 index 0000000000..c95ec22e3c --- /dev/null +++ b/packages/sdk/server-ai/src/api/chat/index.ts @@ -0,0 +1,3 @@ +export * from './types'; +export * from './TrackedChat'; +export * from './TrackedChatFactory'; diff --git a/packages/sdk/server-ai/src/api/chat/types.ts b/packages/sdk/server-ai/src/api/chat/types.ts new file mode 100644 index 0000000000..804bb21453 --- /dev/null +++ b/packages/sdk/server-ai/src/api/chat/types.ts @@ -0,0 +1,17 @@ +import { LDMessage } from '../config/LDAIConfig'; +import { LDAIMetrics } from '../metrics/LDAIMetrics'; + +/** + * Chat response structure. + */ +export interface ChatResponse { + /** + * The response message from the AI. + */ + message: LDMessage; + + /** + * Metrics information including success status and token usage. + */ + metrics: LDAIMetrics; +} diff --git a/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts b/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts index 444060ef32..7f0b3f5797 100644 --- a/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts +++ b/packages/sdk/server-ai/src/api/config/LDAIConfigTracker.ts @@ -1,4 +1,4 @@ -import { LDFeedbackKind, LDTokenUsage } from '../metrics'; +import { LDAIMetrics, LDFeedbackKind, LDTokenUsage } from '../metrics'; /** * Metrics which have been tracked. @@ -87,6 +87,25 @@ export interface LDAIConfigTracker { */ trackDurationOf(func: () => Promise): Promise; + /** + * Track metrics for a generic AI operation. + * + * This function will track the duration of the operation, extract metrics using the provided + * metrics extractor function, and track success or error status accordingly. + * + * If the provided function throws, then this method will also throw. + * In the case the provided function throws, this function will record the duration and an error. + * A failed operation will not have any token usage data. + * + * @param metricsExtractor Function that extracts LDAIMetrics from the operation result + * @param func Function which executes the operation + * @returns The result of the operation + */ + trackMetricsOf( + metricsExtractor: (result: TRes) => LDAIMetrics, + func: () => Promise, + ): Promise; + /** * Track an OpenAI operation. * diff --git a/packages/sdk/server-ai/src/api/index.ts b/packages/sdk/server-ai/src/api/index.ts index cd6333b027..cd27112f7a 100644 --- a/packages/sdk/server-ai/src/api/index.ts +++ b/packages/sdk/server-ai/src/api/index.ts @@ -1,4 +1,6 @@ export * from './config'; export * from './agents'; +export * from './chat'; export * from './metrics'; export * from './LDAIClient'; +export * from './providers'; diff --git a/packages/sdk/server-ai/src/api/metrics/LDAIMetrics.ts b/packages/sdk/server-ai/src/api/metrics/LDAIMetrics.ts new file mode 100644 index 0000000000..3b0fb99ec7 --- /dev/null +++ b/packages/sdk/server-ai/src/api/metrics/LDAIMetrics.ts @@ -0,0 +1,18 @@ +import { LDTokenUsage } from './LDTokenUsage'; + +/** + * Metrics information for AI operations that includes success status and token usage. + * This class combines success/failure tracking with token usage metrics. + */ +export interface LDAIMetrics { + /** + * Whether the AI operation was successful. + */ + success: boolean; + + /** + * Token usage information for the operation. + * This will be undefined if no token usage data is available. + */ + usage?: LDTokenUsage; +} diff --git a/packages/sdk/server-ai/src/api/metrics/index.ts b/packages/sdk/server-ai/src/api/metrics/index.ts index 157fbd593c..a7026f62d8 100644 --- a/packages/sdk/server-ai/src/api/metrics/index.ts +++ b/packages/sdk/server-ai/src/api/metrics/index.ts @@ -1,5 +1,6 @@ export * from './BedrockTokenUsage'; export * from './OpenAiUsage'; export * from './LDFeedbackKind'; +export * from './LDAIMetrics'; export * from './LDTokenUsage'; export * from './VercelAISDKTokenUsage'; diff --git a/packages/sdk/server-ai/src/api/providers/AIProvider.ts b/packages/sdk/server-ai/src/api/providers/AIProvider.ts new file mode 100644 index 0000000000..8f6475ef5e --- /dev/null +++ b/packages/sdk/server-ai/src/api/providers/AIProvider.ts @@ -0,0 +1,43 @@ +import { LDLogger } from '@launchdarkly/js-server-sdk-common'; + +import { ChatResponse } from '../chat/types'; +import { LDAIConfig, LDMessage } from '../config/LDAIConfig'; + +/** + * Abstract base class for AI providers that implement chat model functionality. + * This class provides the contract that all provider implementations must follow + * to integrate with LaunchDarkly's tracking and configuration capabilities. + * + * Following the AICHAT spec recommendation to use base classes with non-abstract methods + * for better extensibility and backwards compatibility. + */ +export abstract class AIProvider { + protected readonly logger?: LDLogger; + + constructor(logger?: LDLogger) { + this.logger = logger; + } + /** + * Invoke the chat model with an array of messages. + * This method should convert messages to provider format, invoke the model, + * and return a ChatResponse with the result and metrics. + * + * @param messages Array of LDMessage objects representing the conversation + * @returns Promise that resolves to a ChatResponse containing the model's response + */ + abstract invokeModel(messages: LDMessage[]): Promise; + + /** + * Static method that constructs an instance of the provider. + * Each provider implementation must provide their own static create method + * that accepts an AIConfig and returns a configured instance. + * + * @param aiConfig The LaunchDarkly AI configuration + * @param logger Optional logger for the provider + * @returns Promise that resolves to a configured provider instance + */ + // eslint-disable-next-line @typescript-eslint/no-unused-vars + static async create(aiConfig: LDAIConfig, logger?: LDLogger): Promise { + throw new Error('Provider implementations must override the static create method'); + } +} diff --git a/packages/sdk/server-ai/src/api/providers/index.ts b/packages/sdk/server-ai/src/api/providers/index.ts new file mode 100644 index 0000000000..3b4d3f90a4 --- /dev/null +++ b/packages/sdk/server-ai/src/api/providers/index.ts @@ -0,0 +1 @@ +export * from './AIProvider'; diff --git a/packages/sdk/server-ai/tsconfig.eslint.json b/packages/sdk/server-ai/tsconfig.eslint.json index 56c9b38305..156dde8255 100644 --- a/packages/sdk/server-ai/tsconfig.eslint.json +++ b/packages/sdk/server-ai/tsconfig.eslint.json @@ -1,5 +1,5 @@ { "extends": "./tsconfig.json", - "include": ["/**/*.ts"], - "exclude": ["node_modules"] + "include": ["**/*.ts"], + "exclude": ["node_modules", "dist"] }