Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion packages/ai-providers/server-ai-langchain/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ const allMessages = [...LangChainProvider.convertMessagesToLangChain(configMessa

// Track the model call with LaunchDarkly tracking
const response = await aiConfig.tracker.trackMetricsOf(
(result) => LangChainProvider.createAIMetrics(result),
LangChainProvider.getAIMetricsFromResponse,
() => llm.invoke(allMessages)
);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ describe('LangChainProvider', () => {
});
});

describe('createAIMetrics', () => {
describe('getAIMetricsFromResponse', () => {
it('creates metrics with success=true and token usage', () => {
const mockResponse = new AIMessage('Test response');
mockResponse.response_metadata = {
Expand All @@ -84,7 +84,7 @@ describe('LangChainProvider', () => {
},
};

const result = LangChainProvider.createAIMetrics(mockResponse);
const result = LangChainProvider.getAIMetricsFromResponse(mockResponse);

expect(result).toEqual({
success: true,
Expand All @@ -99,7 +99,7 @@ describe('LangChainProvider', () => {
it('creates metrics with success=true and no usage when metadata is missing', () => {
const mockResponse = new AIMessage('Test response');

const result = LangChainProvider.createAIMetrics(mockResponse);
const result = LangChainProvider.getAIMetricsFromResponse(mockResponse);

expect(result).toEqual({
success: true,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ export class LangChainProvider extends AIProvider {
const response: AIMessage = await this._llm.invoke(langchainMessages);

// Generate metrics early (assumes success by default)
const metrics = LangChainProvider.createAIMetrics(response);
const metrics = LangChainProvider.getAIMetricsFromResponse(response);

// Extract text content from the response
let content: string = '';
Expand Down Expand Up @@ -106,25 +106,27 @@ export class LangChainProvider extends AIProvider {
}

/**
* Create AI metrics information from a LangChain provider response.
* Get AI metrics from a LangChain provider response.
* This method extracts token usage information and success status from LangChain responses
* and returns a LaunchDarkly AIMetrics object.
*
* @param langChainResponse The response from the LangChain model
* @param response The response from the LangChain model
* @returns LDAIMetrics with success status and token usage
*
* @example
* ```typescript
* // Use with tracker.trackMetricsOf for automatic tracking
* const response = await tracker.trackMetricsOf(
* (result: AIMessage) => LangChainProvider.createAIMetrics(result),
* LangChainProvider.getAIMetricsFromResponse,
* () => llm.invoke(messages)
* );
* ```
*/
static createAIMetrics(langChainResponse: AIMessage): LDAIMetrics {
static getAIMetricsFromResponse(response: AIMessage): LDAIMetrics {
// Extract token usage if available
let usage: LDTokenUsage | undefined;
if (langChainResponse?.response_metadata?.tokenUsage) {
const { tokenUsage } = langChainResponse.response_metadata;
if (response?.response_metadata?.tokenUsage) {
const { tokenUsage } = response.response_metadata;
usage = {
total: tokenUsage.totalTokens || 0,
input: tokenUsage.promptTokens || 0,
Expand All @@ -139,6 +141,19 @@ export class LangChainProvider extends AIProvider {
};
}

/**
* Create AI metrics information from a LangChain provider response.
* This method extracts token usage information and success status from LangChain responses
* and returns a LaunchDarkly AIMetrics object.
*
* @deprecated Use `getAIMetricsFromResponse()` instead.
* @param langChainResponse The response from the LangChain model
* @returns LDAIMetrics with success status and token usage
*/
static createAIMetrics(langChainResponse: AIMessage): LDAIMetrics {
return LangChainProvider.getAIMetricsFromResponse(langChainResponse);
}

/**
* Convert LaunchDarkly messages to LangChain messages.
* This helper method enables developers to work directly with LangChain message types
Expand Down
2 changes: 1 addition & 1 deletion packages/ai-providers/server-ai-openai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ const allMessages = [...configMessages, userMessage];

// Track the model call with LaunchDarkly tracking
const response = await aiConfig.tracker.trackMetricsOf(
(result) => OpenAIProvider.createAIMetrics(result),
OpenAIProvider.getAIMetricsFromResponse,
() => client.chat.completions.create({
model: 'gpt-4',
messages: allMessages,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ describe('OpenAIProvider', () => {
provider = new OpenAIProvider(mockOpenAI, 'gpt-3.5-turbo', {});
});

describe('createAIMetrics', () => {
describe('getAIMetricsFromResponse', () => {
it('creates metrics with success=true and token usage', () => {
const mockResponse = {
usage: {
Expand All @@ -35,7 +35,7 @@ describe('OpenAIProvider', () => {
},
};

const result = OpenAIProvider.createAIMetrics(mockResponse);
const result = OpenAIProvider.getAIMetricsFromResponse(mockResponse);

expect(result).toEqual({
success: true,
Expand All @@ -50,7 +50,7 @@ describe('OpenAIProvider', () => {
it('creates metrics with success=true and no usage when usage is missing', () => {
const mockResponse = {};

const result = OpenAIProvider.createAIMetrics(mockResponse);
const result = OpenAIProvider.getAIMetricsFromResponse(mockResponse);

expect(result).toEqual({
success: true,
Expand All @@ -66,7 +66,7 @@ describe('OpenAIProvider', () => {
},
};

const result = OpenAIProvider.createAIMetrics(mockResponse);
const result = OpenAIProvider.getAIMetricsFromResponse(mockResponse);

expect(result).toEqual({
success: true,
Expand Down
32 changes: 27 additions & 5 deletions packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ export class OpenAIProvider extends AIProvider {
});

// Generate metrics early (assumes success by default)
const metrics = OpenAIProvider.createAIMetrics(response);
const metrics = OpenAIProvider.getAIMetricsFromResponse(response);

// Safely extract the first choice content using optional chaining
const content = response?.choices?.[0]?.message?.content || '';
Expand Down Expand Up @@ -97,15 +97,24 @@ export class OpenAIProvider extends AIProvider {
// =============================================================================

/**
* Create AI metrics information from an OpenAI response.
* Get AI metrics from an OpenAI response.
* This method extracts token usage information and success status from OpenAI responses
* and returns a LaunchDarkly AIMetrics object.
*
* @param response The response from OpenAI chat completions API
* @returns LDAIMetrics with success status and token usage
*
* @example
* const response = await aiConfig.tracker.trackMetricsOf(
* OpenAIProvider.getAIMetricsFromResponse,
* () => client.chat.completions.create(config)
* );
*/
static createAIMetrics(openaiResponse: any): LDAIMetrics {
static getAIMetricsFromResponse(response: any): LDAIMetrics {
// Extract token usage if available
let usage: LDTokenUsage | undefined;
if (openaiResponse?.usage) {
const { prompt_tokens, completion_tokens, total_tokens } = openaiResponse.usage;
if (response?.usage) {
const { prompt_tokens, completion_tokens, total_tokens } = response.usage;
usage = {
total: total_tokens || 0,
input: prompt_tokens || 0,
Expand All @@ -119,4 +128,17 @@ export class OpenAIProvider extends AIProvider {
usage,
};
}

/**
* Create AI metrics information from an OpenAI response.
* This method extracts token usage information and success status from OpenAI responses
* and returns a LaunchDarkly AIMetrics object.
*
* @deprecated Use `getAIMetricsFromResponse()` instead.
* @param openaiResponse The response from OpenAI chat completions API
* @returns LDAIMetrics with success status and token usage
*/
static createAIMetrics(openaiResponse: any): LDAIMetrics {
return OpenAIProvider.getAIMetricsFromResponse(openaiResponse);
}
}
2 changes: 1 addition & 1 deletion packages/ai-providers/server-ai-vercel/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ const allMessages = [...configMessages, userMessage];

// Track the model call with LaunchDarkly tracking
const response = await aiConfig.tracker.trackMetricsOf(
(result) => VercelProvider.createAIMetrics(result),
VercelProvider.getAIMetricsFromResponse,
() => generateText({ model, messages: allMessages })
);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ describe('VercelProvider', () => {
provider = new VercelProvider(mockModel, {});
});

describe('createAIMetrics', () => {
describe('getAIMetricsFromResponse', () => {
it('creates metrics with success=true and token usage', () => {
const mockResponse = {
usage: {
Expand All @@ -26,7 +26,7 @@ describe('VercelProvider', () => {
},
};

const result = VercelProvider.createAIMetrics(mockResponse);
const result = VercelProvider.getAIMetricsFromResponse(mockResponse);

expect(result).toEqual({
success: true,
Expand All @@ -41,7 +41,7 @@ describe('VercelProvider', () => {
it('creates metrics with success=true and no usage when usage is missing', () => {
const mockResponse = {};

const result = VercelProvider.createAIMetrics(mockResponse);
const result = VercelProvider.getAIMetricsFromResponse(mockResponse);

expect(result).toEqual({
success: true,
Expand All @@ -57,7 +57,7 @@ describe('VercelProvider', () => {
},
};

const result = VercelProvider.createAIMetrics(mockResponse);
const result = VercelProvider.getAIMetricsFromResponse(mockResponse);

expect(result).toEqual({
success: true,
Expand All @@ -78,7 +78,7 @@ describe('VercelProvider', () => {
},
};

const result = VercelProvider.createAIMetrics(mockResponse);
const result = VercelProvider.getAIMetricsFromResponse(mockResponse);

expect(result).toEqual({
success: true,
Expand All @@ -103,7 +103,7 @@ describe('VercelProvider', () => {
},
};

const result = VercelProvider.createAIMetrics(mockResponse);
const result = VercelProvider.getAIMetricsFromResponse(mockResponse);

expect(result).toEqual({
success: true,
Expand Down