Skip to content

Commit 20e5c15

Browse files
committed
don't throw exception when the response is empty.
1 parent a300dea commit 20e5c15

File tree

2 files changed

+81
-40
lines changed

2 files changed

+81
-40
lines changed

packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts

Lines changed: 63 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -3,20 +3,18 @@ import { OpenAI } from 'openai';
33
import { OpenAIProvider } from '../src/OpenAIProvider';
44

55
// Mock OpenAI
6-
jest.mock('openai', () => {
7-
return {
8-
OpenAI: jest.fn().mockImplementation(() => ({
9-
chat: {
10-
completions: {
11-
create: jest.fn().mockResolvedValue({
12-
choices: [{ message: { content: 'Test response' } }],
13-
usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 },
14-
}),
15-
},
6+
jest.mock('openai', () => ({
7+
OpenAI: jest.fn().mockImplementation(() => ({
8+
chat: {
9+
completions: {
10+
create: jest.fn().mockResolvedValue({
11+
choices: [{ message: { content: 'Test response' } }],
12+
usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 },
13+
}),
1614
},
17-
})),
18-
};
19-
});
15+
},
16+
})),
17+
}));
2018

2119
describe('OpenAIProvider', () => {
2220
let mockOpenAI: jest.Mocked<OpenAI>;
@@ -27,7 +25,6 @@ describe('OpenAIProvider', () => {
2725
provider = new OpenAIProvider(mockOpenAI, 'gpt-3.5-turbo', {});
2826
});
2927

30-
3128
describe('createAIMetrics', () => {
3229
it('creates metrics with success=true and token usage', () => {
3330
const mockResponse = {
@@ -101,9 +98,7 @@ describe('OpenAIProvider', () => {
10198

10299
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);
103100

104-
const messages = [
105-
{ role: 'user' as const, content: 'Hello!' },
106-
];
101+
const messages = [{ role: 'user' as const, content: 'Hello!' }];
107102

108103
const result = await provider.invokeModel(messages);
109104

@@ -128,7 +123,7 @@ describe('OpenAIProvider', () => {
128123
});
129124
});
130125

131-
it('throws error when no content in response', async () => {
126+
it('returns unsuccessful response when no content in response', async () => {
132127
const mockResponse = {
133128
choices: [
134129
{
@@ -141,25 +136,66 @@ describe('OpenAIProvider', () => {
141136

142137
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);
143138

144-
const messages = [
145-
{ role: 'user' as const, content: 'Hello!' },
146-
];
139+
const messages = [{ role: 'user' as const, content: 'Hello!' }];
140+
141+
const result = await provider.invokeModel(messages);
147142

148-
await expect(provider.invokeModel(messages)).rejects.toThrow('No content in OpenAI response');
143+
expect(result).toEqual({
144+
message: {
145+
role: 'assistant',
146+
content: '',
147+
},
148+
metrics: {
149+
success: false,
150+
usage: undefined,
151+
},
152+
});
149153
});
150154

151-
it('handles empty choices array', async () => {
155+
it('returns unsuccessful response when choices array is empty', async () => {
152156
const mockResponse = {
153157
choices: [],
154158
};
155159

156160
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);
157161

158-
const messages = [
159-
{ role: 'user' as const, content: 'Hello!' },
160-
];
162+
const messages = [{ role: 'user' as const, content: 'Hello!' }];
163+
164+
const result = await provider.invokeModel(messages);
165+
166+
expect(result).toEqual({
167+
message: {
168+
role: 'assistant',
169+
content: '',
170+
},
171+
metrics: {
172+
success: false,
173+
usage: undefined,
174+
},
175+
});
176+
});
177+
178+
it('returns unsuccessful response when choices is undefined', async () => {
179+
const mockResponse = {
180+
// choices is missing entirely
181+
};
182+
183+
(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);
184+
185+
const messages = [{ role: 'user' as const, content: 'Hello!' }];
161186

162-
await expect(provider.invokeModel(messages)).rejects.toThrow('No content in OpenAI response');
187+
const result = await provider.invokeModel(messages);
188+
189+
expect(result).toEqual({
190+
message: {
191+
role: 'assistant',
192+
content: '',
193+
},
194+
metrics: {
195+
success: false,
196+
usage: undefined,
197+
},
198+
});
163199
});
164200
});
165201

packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts

Lines changed: 18 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,12 @@ export class OpenAIProvider extends AIProvider {
1919
private _modelName: string;
2020
private _parameters: Record<string, unknown>;
2121

22-
constructor(client: OpenAI, modelName: string, parameters: Record<string, unknown>, logger?: LDLogger) {
22+
constructor(
23+
client: OpenAI,
24+
modelName: string,
25+
parameters: Record<string, unknown>,
26+
logger?: LDLogger,
27+
) {
2328
super(logger);
2429
this._client = client;
2530
this._modelName = modelName;
@@ -53,25 +58,27 @@ export class OpenAIProvider extends AIProvider {
5358
// Call OpenAI chat completions API
5459
const response = await this._client.chat.completions.create({
5560
model: this._modelName,
56-
messages: messages,
61+
messages,
5762
...this._parameters,
5863
});
5964

60-
// Extract the first choice content
61-
const choice = response.choices[0];
62-
if (!choice?.message?.content) {
63-
throw new Error('No content in OpenAI response');
65+
// Generate metrics early (assumes success by default)
66+
const metrics = OpenAIProvider.createAIMetrics(response);
67+
68+
// Safely extract the first choice content using optional chaining
69+
const content = response?.choices?.[0]?.message?.content || '';
70+
71+
if (!content) {
72+
this.logger?.warn('OpenAI response has no content available');
73+
metrics.success = false;
6474
}
6575

6676
// Create the assistant message
6777
const assistantMessage: LDMessage = {
6878
role: 'assistant',
69-
content: choice.message.content,
79+
content,
7080
};
7181

72-
// Extract metrics including token usage and success status
73-
const metrics = OpenAIProvider.createAIMetrics(response);
74-
7582
return {
7683
message: assistantMessage,
7784
metrics,
@@ -85,7 +92,6 @@ export class OpenAIProvider extends AIProvider {
8592
return this._client;
8693
}
8794

88-
8995
// =============================================================================
9096
// STATIC UTILITY METHODS
9197
// =============================================================================
@@ -107,11 +113,10 @@ export class OpenAIProvider extends AIProvider {
107113
};
108114
}
109115

110-
// OpenAI responses that complete successfully are considered successful
116+
// OpenAI responses that complete successfully are considered successful by default
111117
return {
112118
success: true,
113119
usage,
114120
};
115121
}
116-
117122
}

0 commit comments

Comments
 (0)