Skip to content

Commit bfad20e

Browse files
committed
feat: Renamed createAIMetrics to getAIMetricsFromResponse
1 parent dc07917 commit bfad20e

File tree

8 files changed

+65
-28
lines changed

8 files changed

+65
-28
lines changed

packages/ai-providers/server-ai-langchain/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ const allMessages = [...LangChainProvider.convertMessagesToLangChain(configMessa
9494

9595
// Track the model call with LaunchDarkly tracking
9696
const response = await aiConfig.tracker.trackMetricsOf(
97-
(result) => LangChainProvider.createAIMetrics(result),
97+
LangChainProvider.getAIMetricsFromResponse,
9898
() => llm.invoke(allMessages)
9999
);
100100

packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ describe('LangChainProvider', () => {
7373
});
7474
});
7575

76-
describe('createAIMetrics', () => {
76+
describe('getAIMetricsFromResponse', () => {
7777
it('creates metrics with success=true and token usage', () => {
7878
const mockResponse = new AIMessage('Test response');
7979
mockResponse.response_metadata = {
@@ -84,7 +84,7 @@ describe('LangChainProvider', () => {
8484
},
8585
};
8686

87-
const result = LangChainProvider.createAIMetrics(mockResponse);
87+
const result = LangChainProvider.getAIMetricsFromResponse(mockResponse);
8888

8989
expect(result).toEqual({
9090
success: true,
@@ -99,7 +99,7 @@ describe('LangChainProvider', () => {
9999
it('creates metrics with success=true and no usage when metadata is missing', () => {
100100
const mockResponse = new AIMessage('Test response');
101101

102-
const result = LangChainProvider.createAIMetrics(mockResponse);
102+
const result = LangChainProvider.getAIMetricsFromResponse(mockResponse);
103103

104104
expect(result).toEqual({
105105
success: true,

packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts

Lines changed: 22 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ export class LangChainProvider extends AIProvider {
5151
const response: AIMessage = await this._llm.invoke(langchainMessages);
5252

5353
// Generate metrics early (assumes success by default)
54-
const metrics = LangChainProvider.createAIMetrics(response);
54+
const metrics = LangChainProvider.getAIMetricsFromResponse(response);
5555

5656
// Extract text content from the response
5757
let content: string = '';
@@ -106,25 +106,27 @@ export class LangChainProvider extends AIProvider {
106106
}
107107

108108
/**
109-
* Create AI metrics information from a LangChain provider response.
109+
* Get AI metrics from a LangChain provider response.
110110
* This method extracts token usage information and success status from LangChain responses
111111
* and returns a LaunchDarkly AIMetrics object.
112112
*
113-
* @param langChainResponse The response from the LangChain model
113+
* @param response The response from the LangChain model
114+
* @returns LDAIMetrics with success status and token usage
115+
*
114116
* @example
115117
* ```typescript
116118
* // Use with tracker.trackMetricsOf for automatic tracking
117119
* const response = await tracker.trackMetricsOf(
118-
* (result: AIMessage) => LangChainProvider.createAIMetrics(result),
120+
* LangChainProvider.getAIMetricsFromResponse,
119121
* () => llm.invoke(messages)
120122
* );
121123
* ```
122124
*/
123-
static createAIMetrics(langChainResponse: AIMessage): LDAIMetrics {
125+
static getAIMetricsFromResponse(response: AIMessage): LDAIMetrics {
124126
// Extract token usage if available
125127
let usage: LDTokenUsage | undefined;
126-
if (langChainResponse?.response_metadata?.tokenUsage) {
127-
const { tokenUsage } = langChainResponse.response_metadata;
128+
if (response?.response_metadata?.tokenUsage) {
129+
const { tokenUsage } = response.response_metadata;
128130
usage = {
129131
total: tokenUsage.totalTokens || 0,
130132
input: tokenUsage.promptTokens || 0,
@@ -139,6 +141,19 @@ export class LangChainProvider extends AIProvider {
139141
};
140142
}
141143

144+
/**
145+
* Create AI metrics information from a LangChain provider response.
146+
* This method extracts token usage information and success status from LangChain responses
147+
* and returns a LaunchDarkly AIMetrics object.
148+
*
149+
* @deprecated Use `getAIMetricsFromResponse()` instead.
150+
* @param langChainResponse The response from the LangChain model
151+
* @returns LDAIMetrics with success status and token usage
152+
*/
153+
static createAIMetrics(langChainResponse: AIMessage): LDAIMetrics {
154+
return LangChainProvider.getAIMetricsFromResponse(langChainResponse);
155+
}
156+
142157
/**
143158
* Convert LaunchDarkly messages to LangChain messages.
144159
* This helper method enables developers to work directly with LangChain message types

packages/ai-providers/server-ai-openai/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ const allMessages = [...configMessages, userMessage];
7878

7979
// Track the model call with LaunchDarkly tracking
8080
const response = await aiConfig.tracker.trackMetricsOf(
81-
(result) => OpenAIProvider.createAIMetrics(result),
81+
OpenAIProvider.getAIMetricsFromResponse,
8282
() => client.chat.completions.create({
8383
model: 'gpt-4',
8484
messages: allMessages,

packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ describe('OpenAIProvider', () => {
2525
provider = new OpenAIProvider(mockOpenAI, 'gpt-3.5-turbo', {});
2626
});
2727

28-
describe('createAIMetrics', () => {
28+
describe('getAIMetricsFromResponse', () => {
2929
it('creates metrics with success=true and token usage', () => {
3030
const mockResponse = {
3131
usage: {
@@ -35,7 +35,7 @@ describe('OpenAIProvider', () => {
3535
},
3636
};
3737

38-
const result = OpenAIProvider.createAIMetrics(mockResponse);
38+
const result = OpenAIProvider.getAIMetricsFromResponse(mockResponse);
3939

4040
expect(result).toEqual({
4141
success: true,
@@ -50,7 +50,7 @@ describe('OpenAIProvider', () => {
5050
it('creates metrics with success=true and no usage when usage is missing', () => {
5151
const mockResponse = {};
5252

53-
const result = OpenAIProvider.createAIMetrics(mockResponse);
53+
const result = OpenAIProvider.getAIMetricsFromResponse(mockResponse);
5454

5555
expect(result).toEqual({
5656
success: true,
@@ -66,7 +66,7 @@ describe('OpenAIProvider', () => {
6666
},
6767
};
6868

69-
const result = OpenAIProvider.createAIMetrics(mockResponse);
69+
const result = OpenAIProvider.getAIMetricsFromResponse(mockResponse);
7070

7171
expect(result).toEqual({
7272
success: true,

packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts

Lines changed: 27 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ export class OpenAIProvider extends AIProvider {
6363
});
6464

6565
// Generate metrics early (assumes success by default)
66-
const metrics = OpenAIProvider.createAIMetrics(response);
66+
const metrics = OpenAIProvider.getAIMetricsFromResponse(response);
6767

6868
// Safely extract the first choice content using optional chaining
6969
const content = response?.choices?.[0]?.message?.content || '';
@@ -97,15 +97,24 @@ export class OpenAIProvider extends AIProvider {
9797
// =============================================================================
9898

9999
/**
100-
* Create AI metrics information from an OpenAI response.
100+
* Get AI metrics from an OpenAI response.
101101
* This method extracts token usage information and success status from OpenAI responses
102102
* and returns a LaunchDarkly AIMetrics object.
103+
*
104+
* @param response The response from OpenAI chat completions API
105+
* @returns LDAIMetrics with success status and token usage
106+
*
107+
* @example
108+
* const response = await aiConfig.tracker.trackMetricsOf(
109+
* OpenAIProvider.getAIMetricsFromResponse,
110+
* () => client.chat.completions.create(config)
111+
* );
103112
*/
104-
static createAIMetrics(openaiResponse: any): LDAIMetrics {
113+
static getAIMetricsFromResponse(response: any): LDAIMetrics {
105114
// Extract token usage if available
106115
let usage: LDTokenUsage | undefined;
107-
if (openaiResponse?.usage) {
108-
const { prompt_tokens, completion_tokens, total_tokens } = openaiResponse.usage;
116+
if (response?.usage) {
117+
const { prompt_tokens, completion_tokens, total_tokens } = response.usage;
109118
usage = {
110119
total: total_tokens || 0,
111120
input: prompt_tokens || 0,
@@ -119,4 +128,17 @@ export class OpenAIProvider extends AIProvider {
119128
usage,
120129
};
121130
}
131+
132+
/**
133+
* Create AI metrics information from an OpenAI response.
134+
* This method extracts token usage information and success status from OpenAI responses
135+
* and returns a LaunchDarkly AIMetrics object.
136+
*
137+
* @deprecated Use `getAIMetricsFromResponse()` instead.
138+
* @param openaiResponse The response from OpenAI chat completions API
139+
* @returns LDAIMetrics with success status and token usage
140+
*/
141+
static createAIMetrics(openaiResponse: any): LDAIMetrics {
142+
return OpenAIProvider.getAIMetricsFromResponse(openaiResponse);
143+
}
122144
}

packages/ai-providers/server-ai-vercel/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ const allMessages = [...configMessages, userMessage];
9494

9595
// Track the model call with LaunchDarkly tracking
9696
const response = await aiConfig.tracker.trackMetricsOf(
97-
(result) => VercelProvider.createAIMetrics(result),
97+
VercelProvider.getAIMetricsFromResponse,
9898
() => generateText({ model, messages: allMessages })
9999
);
100100

packages/ai-providers/server-ai-vercel/__tests__/VercelProvider.test.ts

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ describe('VercelProvider', () => {
1616
provider = new VercelProvider(mockModel, {});
1717
});
1818

19-
describe('createAIMetrics', () => {
19+
describe('getAIMetricsFromResponse', () => {
2020
it('creates metrics with success=true and token usage', () => {
2121
const mockResponse = {
2222
usage: {
@@ -26,7 +26,7 @@ describe('VercelProvider', () => {
2626
},
2727
};
2828

29-
const result = VercelProvider.createAIMetrics(mockResponse);
29+
const result = VercelProvider.getAIMetricsFromResponse(mockResponse);
3030

3131
expect(result).toEqual({
3232
success: true,
@@ -41,7 +41,7 @@ describe('VercelProvider', () => {
4141
it('creates metrics with success=true and no usage when usage is missing', () => {
4242
const mockResponse = {};
4343

44-
const result = VercelProvider.createAIMetrics(mockResponse);
44+
const result = VercelProvider.getAIMetricsFromResponse(mockResponse);
4545

4646
expect(result).toEqual({
4747
success: true,
@@ -57,7 +57,7 @@ describe('VercelProvider', () => {
5757
},
5858
};
5959

60-
const result = VercelProvider.createAIMetrics(mockResponse);
60+
const result = VercelProvider.getAIMetricsFromResponse(mockResponse);
6161

6262
expect(result).toEqual({
6363
success: true,
@@ -78,7 +78,7 @@ describe('VercelProvider', () => {
7878
},
7979
};
8080

81-
const result = VercelProvider.createAIMetrics(mockResponse);
81+
const result = VercelProvider.getAIMetricsFromResponse(mockResponse);
8282

8383
expect(result).toEqual({
8484
success: true,
@@ -103,7 +103,7 @@ describe('VercelProvider', () => {
103103
},
104104
};
105105

106-
const result = VercelProvider.createAIMetrics(mockResponse);
106+
const result = VercelProvider.getAIMetricsFromResponse(mockResponse);
107107

108108
expect(result).toEqual({
109109
success: true,

0 commit comments

Comments
 (0)