Skip to content

Commit d0cb41d

Browse files
authored
feat!: Support invoke with structured output in VercelAI provider (#981)
1 parent 6ecd9ab commit d0cb41d

File tree

2 files changed

+213
-18
lines changed

2 files changed

+213
-18
lines changed

packages/ai-providers/server-ai-vercel/__tests__/VercelProvider.test.ts

Lines changed: 143 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
1-
import { generateText } from 'ai';
1+
import { generateObject, generateText, jsonSchema } from 'ai';
22

33
import { VercelProvider } from '../src/VercelProvider';
44

55
// Mock Vercel AI SDK
66
jest.mock('ai', () => ({
77
generateText: jest.fn(),
8+
generateObject: jest.fn(),
9+
jsonSchema: jest.fn((schema) => schema),
810
}));
911

1012
describe('VercelProvider', () => {
@@ -14,6 +16,7 @@ describe('VercelProvider', () => {
1416
beforeEach(() => {
1517
mockModel = { name: 'test-model' };
1618
provider = new VercelProvider(mockModel, {});
19+
jest.clearAllMocks();
1720
});
1821

1922
describe('getAIMetricsFromResponse', () => {
@@ -176,6 +179,145 @@ describe('VercelProvider', () => {
176179
},
177180
});
178181
});
182+
183+
it('handles errors and returns failure metrics', async () => {
184+
const mockError = new Error('API call failed');
185+
(generateText as jest.Mock).mockRejectedValue(mockError);
186+
187+
const mockLogger = {
188+
warn: jest.fn(),
189+
};
190+
provider = new VercelProvider(mockModel, {}, mockLogger as any);
191+
192+
const messages = [{ role: 'user' as const, content: 'Hello!' }];
193+
const result = await provider.invokeModel(messages);
194+
195+
expect(mockLogger.warn).toHaveBeenCalledWith('Vercel AI model invocation failed:', mockError);
196+
expect(result).toEqual({
197+
message: {
198+
role: 'assistant',
199+
content: '',
200+
},
201+
metrics: {
202+
success: false,
203+
},
204+
});
205+
});
206+
});
207+
208+
describe('invokeStructuredModel', () => {
209+
it('invokes Vercel AI generateObject and returns structured response', async () => {
210+
const mockResponse = {
211+
object: {
212+
name: 'John Doe',
213+
age: 30,
214+
isActive: true,
215+
},
216+
usage: {
217+
promptTokens: 10,
218+
completionTokens: 15,
219+
totalTokens: 25,
220+
},
221+
};
222+
223+
(generateObject as jest.Mock).mockResolvedValue(mockResponse);
224+
225+
const messages = [{ role: 'user' as const, content: 'Generate user data' }];
226+
const responseStructure = {
227+
name: 'string',
228+
age: 0,
229+
isActive: true,
230+
};
231+
232+
const result = await provider.invokeStructuredModel(messages, responseStructure);
233+
234+
expect(generateObject).toHaveBeenCalledWith({
235+
model: mockModel,
236+
messages: [{ role: 'user', content: 'Generate user data' }],
237+
schema: responseStructure,
238+
});
239+
expect(jsonSchema).toHaveBeenCalledWith(responseStructure);
240+
241+
expect(result).toEqual({
242+
data: {
243+
name: 'John Doe',
244+
age: 30,
245+
isActive: true,
246+
},
247+
rawResponse: JSON.stringify({
248+
name: 'John Doe',
249+
age: 30,
250+
isActive: true,
251+
}),
252+
metrics: {
253+
success: true,
254+
usage: {
255+
total: 25,
256+
input: 10,
257+
output: 15,
258+
},
259+
},
260+
});
261+
});
262+
263+
it('handles structured response without usage data', async () => {
264+
const mockResponse = {
265+
object: {
266+
result: 'success',
267+
},
268+
};
269+
270+
(generateObject as jest.Mock).mockResolvedValue(mockResponse);
271+
272+
const messages = [{ role: 'user' as const, content: 'Generate result' }];
273+
const responseStructure = {
274+
result: 'string',
275+
};
276+
277+
const result = await provider.invokeStructuredModel(messages, responseStructure);
278+
279+
expect(result).toEqual({
280+
data: {
281+
result: 'success',
282+
},
283+
rawResponse: JSON.stringify({
284+
result: 'success',
285+
}),
286+
metrics: {
287+
success: true,
288+
usage: undefined,
289+
},
290+
});
291+
});
292+
293+
it('handles errors and returns failure metrics', async () => {
294+
const mockError = new Error('API call failed');
295+
(generateObject as jest.Mock).mockRejectedValue(mockError);
296+
297+
const mockLogger = {
298+
warn: jest.fn(),
299+
};
300+
provider = new VercelProvider(mockModel, {}, mockLogger as any);
301+
302+
const messages = [{ role: 'user' as const, content: 'Generate result' }];
303+
const responseStructure = {
304+
result: 'string',
305+
};
306+
307+
const result = await provider.invokeStructuredModel(messages, responseStructure);
308+
309+
expect(mockLogger.warn).toHaveBeenCalledWith(
310+
'Vercel AI structured model invocation failed:',
311+
mockError,
312+
);
313+
expect(result).toEqual({
314+
data: {},
315+
rawResponse: '',
316+
metrics: {
317+
success: false,
318+
},
319+
});
320+
});
179321
});
180322

181323
describe('getModel', () => {

packages/ai-providers/server-ai-vercel/src/VercelProvider.ts

Lines changed: 70 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { generateText, LanguageModel } from 'ai';
1+
import { generateObject, generateText, jsonSchema, LanguageModel } from 'ai';
22

33
import { AIProvider } from '@launchdarkly/server-sdk-ai';
44
import type {
@@ -8,6 +8,7 @@ import type {
88
LDLogger,
99
LDMessage,
1010
LDTokenUsage,
11+
StructuredResponse,
1112
} from '@launchdarkly/server-sdk-ai';
1213

1314
import type {
@@ -68,23 +69,75 @@ export class VercelProvider extends AIProvider {
6869
* Invoke the Vercel AI model with an array of messages.
6970
*/
7071
async invokeModel(messages: LDMessage[]): Promise<ChatResponse> {
71-
const result = await generateText({
72-
model: this._model,
73-
messages,
74-
...this._parameters,
75-
});
76-
77-
const assistantMessage: LDMessage = {
78-
role: 'assistant',
79-
content: result.text,
80-
};
81-
82-
const metrics = VercelProvider.getAIMetricsFromResponse(result);
72+
try {
73+
// Call Vercel AI generateText
74+
const result = await generateText({
75+
...this._parameters,
76+
model: this._model,
77+
messages,
78+
});
79+
80+
// Create the assistant message
81+
const assistantMessage: LDMessage = {
82+
role: 'assistant',
83+
content: result.text,
84+
};
85+
86+
// Extract metrics including token usage and success status
87+
const metrics = VercelProvider.getAIMetricsFromResponse(result);
88+
89+
return {
90+
message: assistantMessage,
91+
metrics,
92+
};
93+
} catch (error) {
94+
this.logger?.warn('Vercel AI model invocation failed:', error);
95+
96+
return {
97+
message: {
98+
role: 'assistant',
99+
content: '',
100+
},
101+
metrics: {
102+
success: false,
103+
},
104+
};
105+
}
106+
}
83107

84-
return {
85-
message: assistantMessage,
86-
metrics,
87-
};
108+
/**
109+
* Invoke the Vercel AI model with structured output support.
110+
*/
111+
async invokeStructuredModel(
112+
messages: LDMessage[],
113+
responseStructure: Record<string, unknown>,
114+
): Promise<StructuredResponse> {
115+
try {
116+
const result = await generateObject({
117+
...this._parameters,
118+
model: this._model,
119+
messages,
120+
schema: jsonSchema(responseStructure),
121+
});
122+
123+
const metrics = VercelProvider.createAIMetrics(result);
124+
125+
return {
126+
data: result.object as Record<string, unknown>,
127+
rawResponse: JSON.stringify(result.object),
128+
metrics,
129+
};
130+
} catch (error) {
131+
this.logger?.warn('Vercel AI structured model invocation failed:', error);
132+
133+
return {
134+
data: {},
135+
rawResponse: '',
136+
metrics: {
137+
success: false,
138+
},
139+
};
140+
}
88141
}
89142

90143
/**

0 commit comments

Comments
 (0)