diff --git a/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts b/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts index d94326d01..0abfef7f9 100644 --- a/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts +++ b/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts @@ -199,6 +199,157 @@ describe('OpenAIProvider', () => { }); }); + describe('invokeStructuredModel', () => { + it('invokes OpenAI with structured output and returns parsed response', async () => { + const mockResponse = { + choices: [ + { + message: { + content: '{"name": "John", "age": 30, "city": "New York"}', + }, + }, + ], + usage: { + prompt_tokens: 20, + completion_tokens: 10, + total_tokens: 30, + }, + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [{ role: 'user' as const, content: 'Tell me about a person' }]; + const responseStructure = { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + city: { type: 'string' }, + }, + required: ['name', 'age', 'city'], + }; + + const result = await provider.invokeStructuredModel(messages, responseStructure); + + expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({ + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: 'Tell me about a person' }], + response_format: { + type: 'json_schema', + json_schema: { + name: 'structured_output', + schema: responseStructure, + strict: true, + }, + }, + }); + + expect(result).toEqual({ + data: { + name: 'John', + age: 30, + city: 'New York', + }, + rawResponse: '{"name": "John", "age": 30, "city": "New York"}', + metrics: { + success: true, + usage: { + total: 30, + input: 20, + output: 10, + }, + }, + }); + }); + + it('returns unsuccessful response when no content in structured response', async () => { + const mockResponse = { + choices: [ + { + message: { + // content is missing + }, + }, + ], + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [{ role: 'user' as const, content: 'Tell me about a person' }]; + const responseStructure = { type: 'object' }; + + const result = await provider.invokeStructuredModel(messages, responseStructure); + + expect(result).toEqual({ + data: {}, + rawResponse: '', + metrics: { + success: false, + usage: undefined, + }, + }); + }); + + it('handles JSON parsing errors gracefully', async () => { + const mockResponse = { + choices: [ + { + message: { + content: 'invalid json content', + }, + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 5, + total_tokens: 15, + }, + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [{ role: 'user' as const, content: 'Tell me about a person' }]; + const responseStructure = { type: 'object' }; + + const result = await provider.invokeStructuredModel(messages, responseStructure); + + expect(result).toEqual({ + data: {}, + rawResponse: 'invalid json content', + metrics: { + success: false, + usage: { + total: 15, + input: 10, + output: 5, + }, + }, + }); + }); + + it('handles empty choices array in structured response', async () => { + const mockResponse = { + choices: [], + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [{ role: 'user' as const, content: 'Tell me about a person' }]; + const responseStructure = { type: 'object' }; + + const result = await provider.invokeStructuredModel(messages, responseStructure); + + expect(result).toEqual({ + data: {}, + rawResponse: '', + metrics: { + success: false, + usage: undefined, + }, + }); + }); + }); + describe('getClient', () => { it('returns the underlying OpenAI client', () => { const client = provider.getClient(); diff --git a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts index 7b30012b8..7376bce09 100644 --- a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts +++ b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts @@ -8,6 +8,7 @@ import type { LDLogger, LDMessage, LDTokenUsage, + StructuredResponse, } from '@launchdarkly/server-sdk-ai'; /** @@ -55,12 +56,81 @@ export class OpenAIProvider extends AIProvider { * Invoke the OpenAI model with an array of messages. */ async invokeModel(messages: LDMessage[]): Promise { - // Call OpenAI chat completions API - const response = await this._client.chat.completions.create({ - model: this._modelName, - messages, - ...this._parameters, - }); + try { + const response = await this._client.chat.completions.create({ + ...this._parameters, + model: this._modelName, + messages, + }); + + // Generate metrics early (assumes success by default) + const metrics = OpenAIProvider.getAIMetricsFromResponse(response); + + // Safely extract the first choice content using optional chaining + const content = response?.choices?.[0]?.message?.content || ''; + + if (!content) { + this.logger?.warn('OpenAI response has no content available'); + metrics.success = false; + } + + const assistantMessage: LDMessage = { + role: 'assistant', + content, + }; + + return { + message: assistantMessage, + metrics, + }; + } catch (error) { + this.logger?.warn('OpenAI model invocation failed:', error); + + return { + message: { + role: 'assistant', + content: '', + }, + metrics: { + success: false, + }, + }; + } + } + + /** + * Invoke the OpenAI model with structured output support. + */ + async invokeStructuredModel( + messages: LDMessage[], + responseStructure: Record, + ): Promise { + let response; + try { + response = await this._client.chat.completions.create({ + ...this._parameters, + model: this._modelName, + messages, + response_format: { + type: 'json_schema', + json_schema: { + name: 'structured_output', + schema: responseStructure, + strict: true, + }, + }, + }); + } catch (error) { + this.logger?.warn('OpenAI structured model invocation failed:', error); + + return { + data: {}, + rawResponse: '', + metrics: { + success: false, + }, + }; + } // Generate metrics early (assumes success by default) const metrics = OpenAIProvider.getAIMetricsFromResponse(response); @@ -69,20 +139,32 @@ export class OpenAIProvider extends AIProvider { const content = response?.choices?.[0]?.message?.content || ''; if (!content) { - this.logger?.warn('OpenAI response has no content available'); + this.logger?.warn('OpenAI structured response has no content available'); metrics.success = false; + return { + data: {}, + rawResponse: '', + metrics, + }; } - // Create the assistant message - const assistantMessage: LDMessage = { - role: 'assistant', - content, - }; + try { + const data = JSON.parse(content) as Record; - return { - message: assistantMessage, - metrics, - }; + return { + data, + rawResponse: content, + metrics, + }; + } catch (parseError) { + this.logger?.warn('OpenAI structured response contains invalid JSON:', parseError); + metrics.success = false; + return { + data: {}, + rawResponse: content, + metrics, + }; + } } /**