From 39664fa7f54d0d27215af3f8992cb03492151c7f Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 30 Oct 2025 17:46:02 +0000 Subject: [PATCH 1/5] feat! : Support invoke with structured output in OpenAI provider --- .../__tests__/OpenAIProvider.test.ts | 151 ++++++++++++++++++ .../server-ai-openai/src/OpenAIProvider.ts | 50 ++++++ 2 files changed, 201 insertions(+) diff --git a/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts b/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts index 50bc4b9cde..f9376cd377 100644 --- a/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts +++ b/packages/ai-providers/server-ai-openai/__tests__/OpenAIProvider.test.ts @@ -199,6 +199,157 @@ describe('OpenAIProvider', () => { }); }); + describe('invokeStructuredModel', () => { + it('invokes OpenAI with structured output and returns parsed response', async () => { + const mockResponse = { + choices: [ + { + message: { + content: '{"name": "John", "age": 30, "city": "New York"}', + }, + }, + ], + usage: { + prompt_tokens: 20, + completion_tokens: 10, + total_tokens: 30, + }, + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [{ role: 'user' as const, content: 'Tell me about a person' }]; + const responseStructure = { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + city: { type: 'string' }, + }, + required: ['name', 'age', 'city'], + }; + + const result = await provider.invokeStructuredModel(messages, responseStructure); + + expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({ + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: 'Tell me about a person' }], + response_format: { + type: 'json_schema', + json_schema: { + name: 'structured_output', + schema: responseStructure, + strict: true, + }, + }, + }); + + expect(result).toEqual({ + data: { + name: 'John', + age: 30, + city: 'New York', + }, + rawResponse: '{"name": "John", "age": 30, "city": "New York"}', + metrics: { + success: true, + usage: { + total: 30, + input: 20, + output: 10, + }, + }, + }); + }); + + it('returns unsuccessful response when no content in structured response', async () => { + const mockResponse = { + choices: [ + { + message: { + // content is missing + }, + }, + ], + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [{ role: 'user' as const, content: 'Tell me about a person' }]; + const responseStructure = { type: 'object' }; + + const result = await provider.invokeStructuredModel(messages, responseStructure); + + expect(result).toEqual({ + data: {}, + rawResponse: '', + metrics: { + success: false, + usage: undefined, + }, + }); + }); + + it('handles JSON parsing errors gracefully', async () => { + const mockResponse = { + choices: [ + { + message: { + content: 'invalid json content', + }, + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 5, + total_tokens: 15, + }, + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [{ role: 'user' as const, content: 'Tell me about a person' }]; + const responseStructure = { type: 'object' }; + + const result = await provider.invokeStructuredModel(messages, responseStructure); + + expect(result).toEqual({ + data: {}, + rawResponse: 'invalid json content', + metrics: { + success: false, + usage: { + total: 15, + input: 10, + output: 5, + }, + }, + }); + }); + + it('handles empty choices array in structured response', async () => { + const mockResponse = { + choices: [], + }; + + (mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any); + + const messages = [{ role: 'user' as const, content: 'Tell me about a person' }]; + const responseStructure = { type: 'object' }; + + const result = await provider.invokeStructuredModel(messages, responseStructure); + + expect(result).toEqual({ + data: {}, + rawResponse: '', + metrics: { + success: false, + usage: undefined, + }, + }); + }); + }); + describe('getClient', () => { it('returns the underlying OpenAI client', () => { const client = provider.getClient(); diff --git a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts index 4d1ca699df..7449772aae 100644 --- a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts +++ b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts @@ -8,6 +8,7 @@ import type { LDLogger, LDMessage, LDTokenUsage, + StructuredResponse, } from '@launchdarkly/server-sdk-ai'; /** @@ -85,6 +86,55 @@ export class OpenAIProvider extends AIProvider { }; } + /** + * Invoke the OpenAI model with structured output support. + */ + async invokeStructuredModel( + messages: LDMessage[], + responseStructure: Record, + ): Promise { + // Call OpenAI chat completions API with structured output + const response = await this._client.chat.completions.create({ + model: this._modelName, + messages, + response_format: { + type: 'json_schema', + json_schema: { + name: 'structured_output', + schema: responseStructure, + strict: true, + }, + }, + ...this._parameters, + }); + + // Generate metrics early (assumes success by default) + const metrics = OpenAIProvider.createAIMetrics(response); + + // Safely extract the first choice content using optional chaining + const content = response?.choices?.[0]?.message?.content || ''; + + if (!content) { + this.logger?.warn('OpenAI structured response has no content available'); + metrics.success = false; + } + + // Parse the structured JSON response + let data: Record = {}; + try { + data = JSON.parse(content); + } catch (error) { + this.logger?.warn('Failed to parse structured response as JSON:', error); + metrics.success = false; + } + + return { + data, + rawResponse: content, + metrics, + }; + } + /** * Get the underlying OpenAI client instance. */ From 9b6b148342610b210d0ed3fa8cf47e8a60cb7a4e Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 5 Nov 2025 14:04:17 +0000 Subject: [PATCH 2/5] don't throw exceptions in openai provider --- .../server-ai-openai/src/OpenAIProvider.ts | 149 ++++++++++-------- 1 file changed, 87 insertions(+), 62 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts index 7449772aae..5d1d62bfea 100644 --- a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts +++ b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts @@ -56,34 +56,48 @@ export class OpenAIProvider extends AIProvider { * Invoke the OpenAI model with an array of messages. */ async invokeModel(messages: LDMessage[]): Promise { - // Call OpenAI chat completions API - const response = await this._client.chat.completions.create({ - model: this._modelName, - messages, - ...this._parameters, - }); - - // Generate metrics early (assumes success by default) - const metrics = OpenAIProvider.createAIMetrics(response); + try { + // Call OpenAI chat completions API + const response = await this._client.chat.completions.create({ + model: this._modelName, + messages, + ...this._parameters, + }); + + // Generate metrics early (assumes success by default) + const metrics = OpenAIProvider.createAIMetrics(response); + + // Safely extract the first choice content using optional chaining + const content = response?.choices?.[0]?.message?.content || ''; + + if (!content) { + this.logger?.warn('OpenAI response has no content available'); + metrics.success = false; + } + + // Create the assistant message + const assistantMessage: LDMessage = { + role: 'assistant', + content, + }; - // Safely extract the first choice content using optional chaining - const content = response?.choices?.[0]?.message?.content || ''; + return { + message: assistantMessage, + metrics, + }; + } catch (error) { + this.logger?.warn('OpenAI model invocation failed:', error); - if (!content) { - this.logger?.warn('OpenAI response has no content available'); - metrics.success = false; + return { + message: { + role: 'assistant', + content: '', + }, + metrics: { + success: false, + }, + }; } - - // Create the assistant message - const assistantMessage: LDMessage = { - role: 'assistant', - content, - }; - - return { - message: assistantMessage, - metrics, - }; } /** @@ -93,46 +107,57 @@ export class OpenAIProvider extends AIProvider { messages: LDMessage[], responseStructure: Record, ): Promise { - // Call OpenAI chat completions API with structured output - const response = await this._client.chat.completions.create({ - model: this._modelName, - messages, - response_format: { - type: 'json_schema', - json_schema: { - name: 'structured_output', - schema: responseStructure, - strict: true, - }, - }, - ...this._parameters, - }); - - // Generate metrics early (assumes success by default) - const metrics = OpenAIProvider.createAIMetrics(response); - - // Safely extract the first choice content using optional chaining - const content = response?.choices?.[0]?.message?.content || ''; - - if (!content) { - this.logger?.warn('OpenAI structured response has no content available'); - metrics.success = false; - } - - // Parse the structured JSON response - let data: Record = {}; try { - data = JSON.parse(content); + // Call OpenAI chat completions API with structured output + const response = await this._client.chat.completions.create({ + model: this._modelName, + messages, + response_format: { + type: 'json_schema', + json_schema: { + name: 'structured_output', + schema: responseStructure, + strict: true, + }, + }, + ...this._parameters, + }); + + // Generate metrics early (assumes success by default) + const metrics = OpenAIProvider.createAIMetrics(response); + + // Safely extract the first choice content using optional chaining + const content = response?.choices?.[0]?.message?.content || ''; + + if (!content) { + this.logger?.warn('OpenAI structured response has no content available'); + metrics.success = false; + return { + data: {}, + rawResponse: '', + metrics, + }; + } + + // Parse the structured JSON response + const data = JSON.parse(content) as Record; + + return { + data, + rawResponse: content, + metrics, + }; } catch (error) { - this.logger?.warn('Failed to parse structured response as JSON:', error); - metrics.success = false; - } + this.logger?.warn('OpenAI structured model invocation failed:', error); - return { - data, - rawResponse: content, - metrics, - }; + return { + data: {}, + rawResponse: '', + metrics: { + success: false, + }, + }; + } } /** From 927566321e410b9cfcc17102d1bff6f86397f7a4 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 5 Nov 2025 19:23:11 +0000 Subject: [PATCH 3/5] fix failing test --- .../server-ai-openai/src/OpenAIProvider.ts | 57 +++++++++++-------- 1 file changed, 32 insertions(+), 25 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts index 017d1157c3..6192e4f76e 100644 --- a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts +++ b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts @@ -57,7 +57,6 @@ export class OpenAIProvider extends AIProvider { */ async invokeModel(messages: LDMessage[]): Promise { try { - // Call OpenAI chat completions API const response = await this._client.chat.completions.create({ model: this._modelName, messages, @@ -75,7 +74,6 @@ export class OpenAIProvider extends AIProvider { metrics.success = false; } - // Create the assistant message const assistantMessage: LDMessage = { role: 'assistant', content, @@ -107,9 +105,9 @@ export class OpenAIProvider extends AIProvider { messages: LDMessage[], responseStructure: Record, ): Promise { + let response; try { - // Call OpenAI chat completions API with structured output - const response = await this._client.chat.completions.create({ + response = await this._client.chat.completions.create({ model: this._modelName, messages, response_format: { @@ -122,24 +120,35 @@ export class OpenAIProvider extends AIProvider { }, ...this._parameters, }); + } catch (error) { + this.logger?.warn('OpenAI structured model invocation failed:', error); - // Generate metrics early (assumes success by default) - const metrics = OpenAIProvider.createAIMetrics(response); + return { + data: {}, + rawResponse: '', + metrics: { + success: false, + }, + }; + } - // Safely extract the first choice content using optional chaining - const content = response?.choices?.[0]?.message?.content || ''; + // Generate metrics early (assumes success by default) + const metrics = OpenAIProvider.getAIMetricsFromResponse(response); - if (!content) { - this.logger?.warn('OpenAI structured response has no content available'); - metrics.success = false; - return { - data: {}, - rawResponse: '', - metrics, - }; - } + // Safely extract the first choice content using optional chaining + const content = response?.choices?.[0]?.message?.content || ''; - // Parse the structured JSON response + if (!content) { + this.logger?.warn('OpenAI structured response has no content available'); + metrics.success = false; + return { + data: {}, + rawResponse: '', + metrics, + }; + } + + try { const data = JSON.parse(content) as Record; return { @@ -147,15 +156,13 @@ export class OpenAIProvider extends AIProvider { rawResponse: content, metrics, }; - } catch (error) { - this.logger?.warn('OpenAI structured model invocation failed:', error); - + } catch (parseError) { + this.logger?.warn('OpenAI structured response contains invalid JSON:', parseError); + metrics.success = false; return { data: {}, - rawResponse: '', - metrics: { - success: false, - }, + rawResponse: content, + metrics, }; } } From d40285cc41c9422182fbf7ed4e5dd1744d7039b8 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 5 Nov 2025 19:34:20 +0000 Subject: [PATCH 4/5] expand parameters first so it cannot overwrite other properties --- packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts index 6192e4f76e..0609fa6751 100644 --- a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts +++ b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts @@ -108,6 +108,7 @@ export class OpenAIProvider extends AIProvider { let response; try { response = await this._client.chat.completions.create({ + ...this._parameters, model: this._modelName, messages, response_format: { @@ -118,7 +119,6 @@ export class OpenAIProvider extends AIProvider { strict: true, }, }, - ...this._parameters, }); } catch (error) { this.logger?.warn('OpenAI structured model invocation failed:', error); From 5a3cffe10c43f38f53efc9a1de8df8b4d8768fd8 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 5 Nov 2025 20:48:12 +0000 Subject: [PATCH 5/5] use parameters as defaults not overrides --- packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts index 0609fa6751..7376bce09f 100644 --- a/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts +++ b/packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts @@ -58,9 +58,9 @@ export class OpenAIProvider extends AIProvider { async invokeModel(messages: LDMessage[]): Promise { try { const response = await this._client.chat.completions.create({ + ...this._parameters, model: this._modelName, messages, - ...this._parameters, }); // Generate metrics early (assumes success by default)