Skip to content

Commit a7ad0ea

Browse files
authored
feat: Add support for TrackedChats in the AI SDK (#939)
1 parent f0b9b5d commit a7ad0ea

File tree

17 files changed

+795
-6
lines changed

17 files changed

+795
-6
lines changed

packages/sdk/server-ai/__tests__/LDAIConfigTrackerImpl.test.ts

Lines changed: 155 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -898,3 +898,158 @@ it('tracks error', () => {
898898
1,
899899
);
900900
});
901+
902+
describe('trackMetricsOf', () => {
903+
it('tracks success and token usage from metrics', async () => {
904+
const tracker = new LDAIConfigTrackerImpl(
905+
mockLdClient,
906+
configKey,
907+
variationKey,
908+
version,
909+
modelName,
910+
providerName,
911+
testContext,
912+
);
913+
914+
const mockResult = { response: 'test' };
915+
const mockMetrics = {
916+
success: true,
917+
usage: { total: 100, input: 50, output: 50 },
918+
};
919+
920+
const metricsExtractor = jest.fn().mockReturnValue(mockMetrics);
921+
const operation = jest.fn().mockResolvedValue(mockResult);
922+
923+
const result = await tracker.trackMetricsOf(metricsExtractor, operation);
924+
925+
expect(result).toBe(mockResult);
926+
expect(metricsExtractor).toHaveBeenCalledWith(mockResult);
927+
expect(operation).toHaveBeenCalled();
928+
929+
// Should track success
930+
expect(mockTrack).toHaveBeenCalledWith(
931+
'$ld:ai:generation:success',
932+
testContext,
933+
{ configKey, variationKey, version, modelName, providerName },
934+
1,
935+
);
936+
937+
// Should track token usage
938+
expect(mockTrack).toHaveBeenCalledWith(
939+
'$ld:ai:tokens:total',
940+
testContext,
941+
{ configKey, variationKey, version, modelName, providerName },
942+
100,
943+
);
944+
expect(mockTrack).toHaveBeenCalledWith(
945+
'$ld:ai:tokens:input',
946+
testContext,
947+
{ configKey, variationKey, version, modelName, providerName },
948+
50,
949+
);
950+
expect(mockTrack).toHaveBeenCalledWith(
951+
'$ld:ai:tokens:output',
952+
testContext,
953+
{ configKey, variationKey, version, modelName, providerName },
954+
50,
955+
);
956+
});
957+
958+
it('tracks failure when metrics indicate failure', async () => {
959+
const tracker = new LDAIConfigTrackerImpl(
960+
mockLdClient,
961+
configKey,
962+
variationKey,
963+
version,
964+
modelName,
965+
providerName,
966+
testContext,
967+
);
968+
969+
const mockResult = { response: 'test' };
970+
const mockMetrics = {
971+
success: false,
972+
};
973+
974+
const metricsExtractor = jest.fn().mockReturnValue(mockMetrics);
975+
const operation = jest.fn().mockResolvedValue(mockResult);
976+
977+
await tracker.trackMetricsOf(metricsExtractor, operation);
978+
979+
// Should track error
980+
expect(mockTrack).toHaveBeenCalledWith(
981+
'$ld:ai:generation:error',
982+
testContext,
983+
{ configKey, variationKey, version, modelName, providerName },
984+
1,
985+
);
986+
});
987+
988+
it('tracks failure when operation throws', async () => {
989+
const tracker = new LDAIConfigTrackerImpl(
990+
mockLdClient,
991+
configKey,
992+
variationKey,
993+
version,
994+
modelName,
995+
providerName,
996+
testContext,
997+
);
998+
999+
const error = new Error('Operation failed');
1000+
const metricsExtractor = jest.fn();
1001+
const operation = jest.fn().mockRejectedValue(error);
1002+
1003+
await expect(tracker.trackMetricsOf(metricsExtractor, operation)).rejects.toThrow(error);
1004+
1005+
// Should track error
1006+
expect(mockTrack).toHaveBeenCalledWith(
1007+
'$ld:ai:generation:error',
1008+
testContext,
1009+
{ configKey, variationKey, version, modelName, providerName },
1010+
1,
1011+
);
1012+
1013+
// Should not call metrics extractor when operation fails
1014+
expect(metricsExtractor).not.toHaveBeenCalled();
1015+
});
1016+
1017+
it('tracks metrics without token usage', async () => {
1018+
const tracker = new LDAIConfigTrackerImpl(
1019+
mockLdClient,
1020+
configKey,
1021+
variationKey,
1022+
version,
1023+
modelName,
1024+
providerName,
1025+
testContext,
1026+
);
1027+
1028+
const mockResult = { response: 'test' };
1029+
const mockMetrics = {
1030+
success: true,
1031+
// No usage provided
1032+
};
1033+
1034+
const metricsExtractor = jest.fn().mockReturnValue(mockMetrics);
1035+
const operation = jest.fn().mockResolvedValue(mockResult);
1036+
1037+
await tracker.trackMetricsOf(metricsExtractor, operation);
1038+
1039+
// Should track success but not token usage
1040+
expect(mockTrack).toHaveBeenCalledWith(
1041+
'$ld:ai:generation:success',
1042+
testContext,
1043+
{ configKey, variationKey, version, modelName, providerName },
1044+
1,
1045+
);
1046+
1047+
// Should not track token usage
1048+
expect(mockTrack).not.toHaveBeenCalledWith(
1049+
'$ld:ai:tokens:total',
1050+
expect.any(Object),
1051+
expect.any(Object),
1052+
expect.any(Number),
1053+
);
1054+
});
1055+
});
Lines changed: 231 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,231 @@
1+
import { TrackedChat } from '../src/api/chat/TrackedChat';
2+
import { ChatResponse } from '../src/api/chat/types';
3+
import { LDAIConfig, LDMessage } from '../src/api/config/LDAIConfig';
4+
import { LDAIConfigTracker } from '../src/api/config/LDAIConfigTracker';
5+
import { AIProvider } from '../src/api/providers/AIProvider';
6+
7+
describe('TrackedChat', () => {
8+
let mockProvider: jest.Mocked<AIProvider>;
9+
let mockTracker: jest.Mocked<LDAIConfigTracker>;
10+
let aiConfig: LDAIConfig;
11+
12+
beforeEach(() => {
13+
// Mock the AIProvider
14+
mockProvider = {
15+
invokeModel: jest.fn(),
16+
} as any;
17+
18+
// Mock the LDAIConfigTracker
19+
mockTracker = {
20+
trackMetricsOf: jest.fn(),
21+
trackDuration: jest.fn(),
22+
trackTokens: jest.fn(),
23+
trackSuccess: jest.fn(),
24+
trackError: jest.fn(),
25+
trackFeedback: jest.fn(),
26+
trackTimeToFirstToken: jest.fn(),
27+
trackDurationOf: jest.fn(),
28+
trackOpenAIMetrics: jest.fn(),
29+
trackBedrockConverseMetrics: jest.fn(),
30+
trackVercelAIMetrics: jest.fn(),
31+
getSummary: jest.fn(),
32+
} as any;
33+
34+
// Create a basic AI config
35+
aiConfig = {
36+
enabled: true,
37+
messages: [{ role: 'system', content: 'You are a helpful assistant.' }],
38+
model: { name: 'gpt-4' },
39+
provider: { name: 'openai' },
40+
tracker: mockTracker,
41+
toVercelAISDK: jest.fn(),
42+
};
43+
});
44+
45+
describe('appendMessages', () => {
46+
it('appends messages to the conversation history', () => {
47+
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);
48+
49+
const messagesToAppend: LDMessage[] = [
50+
{ role: 'user', content: 'Hello' },
51+
{ role: 'assistant', content: 'Hi there!' },
52+
];
53+
54+
chat.appendMessages(messagesToAppend);
55+
56+
const messages = chat.getMessages(false);
57+
expect(messages).toHaveLength(2);
58+
expect(messages[0]).toEqual({ role: 'user', content: 'Hello' });
59+
expect(messages[1]).toEqual({ role: 'assistant', content: 'Hi there!' });
60+
});
61+
62+
it('appends multiple message batches sequentially', () => {
63+
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);
64+
65+
chat.appendMessages([{ role: 'user', content: 'First message' }]);
66+
chat.appendMessages([{ role: 'assistant', content: 'Second message' }]);
67+
chat.appendMessages([{ role: 'user', content: 'Third message' }]);
68+
69+
const messages = chat.getMessages(false);
70+
expect(messages).toHaveLength(3);
71+
expect(messages[0].content).toBe('First message');
72+
expect(messages[1].content).toBe('Second message');
73+
expect(messages[2].content).toBe('Third message');
74+
});
75+
76+
it('handles empty message array', () => {
77+
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);
78+
79+
chat.appendMessages([]);
80+
81+
const messages = chat.getMessages(false);
82+
expect(messages).toHaveLength(0);
83+
});
84+
});
85+
86+
describe('getMessages', () => {
87+
it('returns only conversation history when includeConfigMessages is false', () => {
88+
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);
89+
90+
chat.appendMessages([
91+
{ role: 'user', content: 'User message' },
92+
{ role: 'assistant', content: 'Assistant message' },
93+
]);
94+
95+
const messages = chat.getMessages(false);
96+
97+
expect(messages).toHaveLength(2);
98+
expect(messages[0]).toEqual({ role: 'user', content: 'User message' });
99+
expect(messages[1]).toEqual({ role: 'assistant', content: 'Assistant message' });
100+
});
101+
102+
it('returns only conversation history when includeConfigMessages is omitted (defaults to false)', () => {
103+
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);
104+
105+
chat.appendMessages([{ role: 'user', content: 'User message' }]);
106+
107+
const messages = chat.getMessages();
108+
109+
expect(messages).toHaveLength(1);
110+
expect(messages[0]).toEqual({ role: 'user', content: 'User message' });
111+
});
112+
113+
it('returns config messages prepended when includeConfigMessages is true', () => {
114+
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);
115+
116+
chat.appendMessages([
117+
{ role: 'user', content: 'User message' },
118+
{ role: 'assistant', content: 'Assistant message' },
119+
]);
120+
121+
const messages = chat.getMessages(true);
122+
123+
expect(messages).toHaveLength(3);
124+
expect(messages[0]).toEqual({ role: 'system', content: 'You are a helpful assistant.' });
125+
expect(messages[1]).toEqual({ role: 'user', content: 'User message' });
126+
expect(messages[2]).toEqual({ role: 'assistant', content: 'Assistant message' });
127+
});
128+
129+
it('returns only config messages when no conversation history exists and includeConfigMessages is true', () => {
130+
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);
131+
132+
const messages = chat.getMessages(true);
133+
134+
expect(messages).toHaveLength(1);
135+
expect(messages[0]).toEqual({ role: 'system', content: 'You are a helpful assistant.' });
136+
});
137+
138+
it('returns empty array when no messages exist and includeConfigMessages is false', () => {
139+
const configWithoutMessages: LDAIConfig = {
140+
...aiConfig,
141+
messages: [],
142+
};
143+
const chat = new TrackedChat(configWithoutMessages, mockTracker, mockProvider);
144+
145+
const messages = chat.getMessages(false);
146+
147+
expect(messages).toHaveLength(0);
148+
});
149+
150+
it('returns a copy of the messages array (not a reference)', () => {
151+
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);
152+
153+
chat.appendMessages([{ role: 'user', content: 'Original message' }]);
154+
155+
const messages1 = chat.getMessages();
156+
const messages2 = chat.getMessages();
157+
158+
expect(messages1).not.toBe(messages2);
159+
expect(messages1).toEqual(messages2);
160+
161+
// Modifying returned array should not affect internal state
162+
messages1.push({ role: 'assistant', content: 'Modified' });
163+
164+
const messages3 = chat.getMessages();
165+
expect(messages3).toHaveLength(1);
166+
expect(messages3[0].content).toBe('Original message');
167+
});
168+
169+
it('handles undefined config messages gracefully', () => {
170+
const configWithoutMessages: LDAIConfig = {
171+
...aiConfig,
172+
messages: undefined,
173+
};
174+
const chat = new TrackedChat(configWithoutMessages, mockTracker, mockProvider);
175+
176+
chat.appendMessages([{ role: 'user', content: 'User message' }]);
177+
178+
const messagesWithConfig = chat.getMessages(true);
179+
expect(messagesWithConfig).toHaveLength(1);
180+
expect(messagesWithConfig[0].content).toBe('User message');
181+
182+
const messagesWithoutConfig = chat.getMessages(false);
183+
expect(messagesWithoutConfig).toHaveLength(1);
184+
expect(messagesWithoutConfig[0].content).toBe('User message');
185+
});
186+
});
187+
188+
describe('integration with invoke', () => {
189+
it('adds messages from invoke to history accessible via getMessages', async () => {
190+
const mockResponse: ChatResponse = {
191+
message: { role: 'assistant', content: 'Response from model' },
192+
metrics: { success: true },
193+
};
194+
195+
mockTracker.trackMetricsOf.mockImplementation(async (extractor, func) => func());
196+
197+
mockProvider.invokeModel.mockResolvedValue(mockResponse);
198+
199+
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);
200+
201+
await chat.invoke('Hello');
202+
203+
const messages = chat.getMessages(false);
204+
expect(messages).toHaveLength(2);
205+
expect(messages[0]).toEqual({ role: 'user', content: 'Hello' });
206+
expect(messages[1]).toEqual({ role: 'assistant', content: 'Response from model' });
207+
});
208+
209+
it('preserves appended messages when invoking', async () => {
210+
const mockResponse: ChatResponse = {
211+
message: { role: 'assistant', content: 'Response' },
212+
metrics: { success: true },
213+
};
214+
215+
mockTracker.trackMetricsOf.mockImplementation(async (extractor, func) => func());
216+
217+
mockProvider.invokeModel.mockResolvedValue(mockResponse);
218+
219+
const chat = new TrackedChat(aiConfig, mockTracker, mockProvider);
220+
221+
chat.appendMessages([{ role: 'user', content: 'Pre-appended message' }]);
222+
await chat.invoke('New user input');
223+
224+
const messages = chat.getMessages(false);
225+
expect(messages).toHaveLength(3);
226+
expect(messages[0].content).toBe('Pre-appended message');
227+
expect(messages[1].content).toBe('New user input');
228+
expect(messages[2].content).toBe('Response');
229+
});
230+
});
231+
});

0 commit comments

Comments
 (0)