Skip to content

Commit 515dbdf

Browse files
authored
feat!: Support invoke with structured output in OpenAI provider (#980)
1 parent d0cb41d commit 515dbdf

File tree

2 files changed

+249
-16
lines changed

2 files changed

+249
-16
lines changed

packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts

Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -199,6 +199,157 @@ describe('OpenAIProvider', () => {
199199
});
200200
});
201201

202+
describe('invokeStructuredModel', () => {
203+
it('invokes OpenAI with structured output and returns parsed response', async () => {
204+
const mockResponse = {
205+
choices: [
206+
{
207+
message: {
208+
content: '{"name": "John", "age": 30, "city": "New York"}',
209+
},
210+
},
211+
],
212+
usage: {
213+
prompt_tokens: 20,
214+
completion_tokens: 10,
215+
total_tokens: 30,
216+
},
217+
};
218+
219+
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);
220+
221+
const messages = [{ role: 'user' as const, content: 'Tell me about a person' }];
222+
const responseStructure = {
223+
type: 'object',
224+
properties: {
225+
name: { type: 'string' },
226+
age: { type: 'number' },
227+
city: { type: 'string' },
228+
},
229+
required: ['name', 'age', 'city'],
230+
};
231+
232+
const result = await provider.invokeStructuredModel(messages, responseStructure);
233+
234+
expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({
235+
model: 'gpt-3.5-turbo',
236+
messages: [{ role: 'user', content: 'Tell me about a person' }],
237+
response_format: {
238+
type: 'json_schema',
239+
json_schema: {
240+
name: 'structured_output',
241+
schema: responseStructure,
242+
strict: true,
243+
},
244+
},
245+
});
246+
247+
expect(result).toEqual({
248+
data: {
249+
name: 'John',
250+
age: 30,
251+
city: 'New York',
252+
},
253+
rawResponse: '{"name": "John", "age": 30, "city": "New York"}',
254+
metrics: {
255+
success: true,
256+
usage: {
257+
total: 30,
258+
input: 20,
259+
output: 10,
260+
},
261+
},
262+
});
263+
});
264+
265+
it('returns unsuccessful response when no content in structured response', async () => {
266+
const mockResponse = {
267+
choices: [
268+
{
269+
message: {
270+
// content is missing
271+
},
272+
},
273+
],
274+
};
275+
276+
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);
277+
278+
const messages = [{ role: 'user' as const, content: 'Tell me about a person' }];
279+
const responseStructure = { type: 'object' };
280+
281+
const result = await provider.invokeStructuredModel(messages, responseStructure);
282+
283+
expect(result).toEqual({
284+
data: {},
285+
rawResponse: '',
286+
metrics: {
287+
success: false,
288+
usage: undefined,
289+
},
290+
});
291+
});
292+
293+
it('handles JSON parsing errors gracefully', async () => {
294+
const mockResponse = {
295+
choices: [
296+
{
297+
message: {
298+
content: 'invalid json content',
299+
},
300+
},
301+
],
302+
usage: {
303+
prompt_tokens: 10,
304+
completion_tokens: 5,
305+
total_tokens: 15,
306+
},
307+
};
308+
309+
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);
310+
311+
const messages = [{ role: 'user' as const, content: 'Tell me about a person' }];
312+
const responseStructure = { type: 'object' };
313+
314+
const result = await provider.invokeStructuredModel(messages, responseStructure);
315+
316+
expect(result).toEqual({
317+
data: {},
318+
rawResponse: 'invalid json content',
319+
metrics: {
320+
success: false,
321+
usage: {
322+
total: 15,
323+
input: 10,
324+
output: 5,
325+
},
326+
},
327+
});
328+
});
329+
330+
it('handles empty choices array in structured response', async () => {
331+
const mockResponse = {
332+
choices: [],
333+
};
334+
335+
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);
336+
337+
const messages = [{ role: 'user' as const, content: 'Tell me about a person' }];
338+
const responseStructure = { type: 'object' };
339+
340+
const result = await provider.invokeStructuredModel(messages, responseStructure);
341+
342+
expect(result).toEqual({
343+
data: {},
344+
rawResponse: '',
345+
metrics: {
346+
success: false,
347+
usage: undefined,
348+
},
349+
});
350+
});
351+
});
352+
202353
describe('getClient', () => {
203354
it('returns the underlying OpenAI client', () => {
204355
const client = provider.getClient();

packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts

Lines changed: 98 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import type {
88
LDLogger,
99
LDMessage,
1010
LDTokenUsage,
11+
StructuredResponse,
1112
} from '@launchdarkly/server-sdk-ai';
1213

1314
/**
@@ -55,12 +56,81 @@ export class OpenAIProvider extends AIProvider {
5556
* Invoke the OpenAI model with an array of messages.
5657
*/
5758
async invokeModel(messages: LDMessage[]): Promise<ChatResponse> {
58-
// Call OpenAI chat completions API
59-
const response = await this._client.chat.completions.create({
60-
model: this._modelName,
61-
messages,
62-
...this._parameters,
63-
});
59+
try {
60+
const response = await this._client.chat.completions.create({
61+
...this._parameters,
62+
model: this._modelName,
63+
messages,
64+
});
65+
66+
// Generate metrics early (assumes success by default)
67+
const metrics = OpenAIProvider.getAIMetricsFromResponse(response);
68+
69+
// Safely extract the first choice content using optional chaining
70+
const content = response?.choices?.[0]?.message?.content || '';
71+
72+
if (!content) {
73+
this.logger?.warn('OpenAI response has no content available');
74+
metrics.success = false;
75+
}
76+
77+
const assistantMessage: LDMessage = {
78+
role: 'assistant',
79+
content,
80+
};
81+
82+
return {
83+
message: assistantMessage,
84+
metrics,
85+
};
86+
} catch (error) {
87+
this.logger?.warn('OpenAI model invocation failed:', error);
88+
89+
return {
90+
message: {
91+
role: 'assistant',
92+
content: '',
93+
},
94+
metrics: {
95+
success: false,
96+
},
97+
};
98+
}
99+
}
100+
101+
/**
102+
* Invoke the OpenAI model with structured output support.
103+
*/
104+
async invokeStructuredModel(
105+
messages: LDMessage[],
106+
responseStructure: Record<string, unknown>,
107+
): Promise<StructuredResponse> {
108+
let response;
109+
try {
110+
response = await this._client.chat.completions.create({
111+
...this._parameters,
112+
model: this._modelName,
113+
messages,
114+
response_format: {
115+
type: 'json_schema',
116+
json_schema: {
117+
name: 'structured_output',
118+
schema: responseStructure,
119+
strict: true,
120+
},
121+
},
122+
});
123+
} catch (error) {
124+
this.logger?.warn('OpenAI structured model invocation failed:', error);
125+
126+
return {
127+
data: {},
128+
rawResponse: '',
129+
metrics: {
130+
success: false,
131+
},
132+
};
133+
}
64134

65135
// Generate metrics early (assumes success by default)
66136
const metrics = OpenAIProvider.getAIMetricsFromResponse(response);
@@ -69,20 +139,32 @@ export class OpenAIProvider extends AIProvider {
69139
const content = response?.choices?.[0]?.message?.content || '';
70140

71141
if (!content) {
72-
this.logger?.warn('OpenAI response has no content available');
142+
this.logger?.warn('OpenAI structured response has no content available');
73143
metrics.success = false;
144+
return {
145+
data: {},
146+
rawResponse: '',
147+
metrics,
148+
};
74149
}
75150

76-
// Create the assistant message
77-
const assistantMessage: LDMessage = {
78-
role: 'assistant',
79-
content,
80-
};
151+
try {
152+
const data = JSON.parse(content) as Record<string, unknown>;
81153

82-
return {
83-
message: assistantMessage,
84-
metrics,
85-
};
154+
return {
155+
data,
156+
rawResponse: content,
157+
metrics,
158+
};
159+
} catch (parseError) {
160+
this.logger?.warn('OpenAI structured response contains invalid JSON:', parseError);
161+
metrics.success = false;
162+
return {
163+
data: {},
164+
rawResponse: content,
165+
metrics,
166+
};
167+
}
86168
}
87169

88170
/**

0 commit comments

Comments
 (0)