Skip to content

Commit 39664fa

Browse files
committed
feat! : Support invoke with structured output in OpenAI provider
1 parent 8d57904 commit 39664fa

File tree

2 files changed

+201
-0
lines changed

2 files changed

+201
-0
lines changed

packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts

Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -199,6 +199,157 @@ describe('OpenAIProvider', () => {
199199
});
200200
});
201201

202+
describe('invokeStructuredModel', () => {
203+
it('invokes OpenAI with structured output and returns parsed response', async () => {
204+
const mockResponse = {
205+
choices: [
206+
{
207+
message: {
208+
content: '{"name": "John", "age": 30, "city": "New York"}',
209+
},
210+
},
211+
],
212+
usage: {
213+
prompt_tokens: 20,
214+
completion_tokens: 10,
215+
total_tokens: 30,
216+
},
217+
};
218+
219+
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);
220+
221+
const messages = [{ role: 'user' as const, content: 'Tell me about a person' }];
222+
const responseStructure = {
223+
type: 'object',
224+
properties: {
225+
name: { type: 'string' },
226+
age: { type: 'number' },
227+
city: { type: 'string' },
228+
},
229+
required: ['name', 'age', 'city'],
230+
};
231+
232+
const result = await provider.invokeStructuredModel(messages, responseStructure);
233+
234+
expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({
235+
model: 'gpt-3.5-turbo',
236+
messages: [{ role: 'user', content: 'Tell me about a person' }],
237+
response_format: {
238+
type: 'json_schema',
239+
json_schema: {
240+
name: 'structured_output',
241+
schema: responseStructure,
242+
strict: true,
243+
},
244+
},
245+
});
246+
247+
expect(result).toEqual({
248+
data: {
249+
name: 'John',
250+
age: 30,
251+
city: 'New York',
252+
},
253+
rawResponse: '{"name": "John", "age": 30, "city": "New York"}',
254+
metrics: {
255+
success: true,
256+
usage: {
257+
total: 30,
258+
input: 20,
259+
output: 10,
260+
},
261+
},
262+
});
263+
});
264+
265+
it('returns unsuccessful response when no content in structured response', async () => {
266+
const mockResponse = {
267+
choices: [
268+
{
269+
message: {
270+
// content is missing
271+
},
272+
},
273+
],
274+
};
275+
276+
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);
277+
278+
const messages = [{ role: 'user' as const, content: 'Tell me about a person' }];
279+
const responseStructure = { type: 'object' };
280+
281+
const result = await provider.invokeStructuredModel(messages, responseStructure);
282+
283+
expect(result).toEqual({
284+
data: {},
285+
rawResponse: '',
286+
metrics: {
287+
success: false,
288+
usage: undefined,
289+
},
290+
});
291+
});
292+
293+
it('handles JSON parsing errors gracefully', async () => {
294+
const mockResponse = {
295+
choices: [
296+
{
297+
message: {
298+
content: 'invalid json content',
299+
},
300+
},
301+
],
302+
usage: {
303+
prompt_tokens: 10,
304+
completion_tokens: 5,
305+
total_tokens: 15,
306+
},
307+
};
308+
309+
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);
310+
311+
const messages = [{ role: 'user' as const, content: 'Tell me about a person' }];
312+
const responseStructure = { type: 'object' };
313+
314+
const result = await provider.invokeStructuredModel(messages, responseStructure);
315+
316+
expect(result).toEqual({
317+
data: {},
318+
rawResponse: 'invalid json content',
319+
metrics: {
320+
success: false,
321+
usage: {
322+
total: 15,
323+
input: 10,
324+
output: 5,
325+
},
326+
},
327+
});
328+
});
329+
330+
it('handles empty choices array in structured response', async () => {
331+
const mockResponse = {
332+
choices: [],
333+
};
334+
335+
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);
336+
337+
const messages = [{ role: 'user' as const, content: 'Tell me about a person' }];
338+
const responseStructure = { type: 'object' };
339+
340+
const result = await provider.invokeStructuredModel(messages, responseStructure);
341+
342+
expect(result).toEqual({
343+
data: {},
344+
rawResponse: '',
345+
metrics: {
346+
success: false,
347+
usage: undefined,
348+
},
349+
});
350+
});
351+
});
352+
202353
describe('getClient', () => {
203354
it('returns the underlying OpenAI client', () => {
204355
const client = provider.getClient();

packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import type {
88
LDLogger,
99
LDMessage,
1010
LDTokenUsage,
11+
StructuredResponse,
1112
} from '@launchdarkly/server-sdk-ai';
1213

1314
/**
@@ -85,6 +86,55 @@ export class OpenAIProvider extends AIProvider {
8586
};
8687
}
8788

89+
/**
90+
* Invoke the OpenAI model with structured output support.
91+
*/
92+
async invokeStructuredModel(
93+
messages: LDMessage[],
94+
responseStructure: Record<string, unknown>,
95+
): Promise<StructuredResponse> {
96+
// Call OpenAI chat completions API with structured output
97+
const response = await this._client.chat.completions.create({
98+
model: this._modelName,
99+
messages,
100+
response_format: {
101+
type: 'json_schema',
102+
json_schema: {
103+
name: 'structured_output',
104+
schema: responseStructure,
105+
strict: true,
106+
},
107+
},
108+
...this._parameters,
109+
});
110+
111+
// Generate metrics early (assumes success by default)
112+
const metrics = OpenAIProvider.createAIMetrics(response);
113+
114+
// Safely extract the first choice content using optional chaining
115+
const content = response?.choices?.[0]?.message?.content || '';
116+
117+
if (!content) {
118+
this.logger?.warn('OpenAI structured response has no content available');
119+
metrics.success = false;
120+
}
121+
122+
// Parse the structured JSON response
123+
let data: Record<string, unknown> = {};
124+
try {
125+
data = JSON.parse(content);
126+
} catch (error) {
127+
this.logger?.warn('Failed to parse structured response as JSON:', error);
128+
metrics.success = false;
129+
}
130+
131+
return {
132+
data,
133+
rawResponse: content,
134+
metrics,
135+
};
136+
}
137+
88138
/**
89139
* Get the underlying OpenAI client instance.
90140
*/

0 commit comments

Comments
 (0)