Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,157 @@ describe('OpenAIProvider', () => {
});
});

describe('invokeStructuredModel', () => {
it('invokes OpenAI with structured output and returns parsed response', async () => {
const mockResponse = {
choices: [
{
message: {
content: '{"name": "John", "age": 30, "city": "New York"}',
},
},
],
usage: {
prompt_tokens: 20,
completion_tokens: 10,
total_tokens: 30,
},
};

(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);

const messages = [{ role: 'user' as const, content: 'Tell me about a person' }];
const responseStructure = {
type: 'object',
properties: {
name: { type: 'string' },
age: { type: 'number' },
city: { type: 'string' },
},
required: ['name', 'age', 'city'],
};

const result = await provider.invokeStructuredModel(messages, responseStructure);

expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Tell me about a person' }],
response_format: {
type: 'json_schema',
json_schema: {
name: 'structured_output',
schema: responseStructure,
strict: true,
},
},
});

expect(result).toEqual({
data: {
name: 'John',
age: 30,
city: 'New York',
},
rawResponse: '{"name": "John", "age": 30, "city": "New York"}',
metrics: {
success: true,
usage: {
total: 30,
input: 20,
output: 10,
},
},
});
});

it('returns unsuccessful response when no content in structured response', async () => {
const mockResponse = {
choices: [
{
message: {
// content is missing
},
},
],
};

(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);

const messages = [{ role: 'user' as const, content: 'Tell me about a person' }];
const responseStructure = { type: 'object' };

const result = await provider.invokeStructuredModel(messages, responseStructure);

expect(result).toEqual({
data: {},
rawResponse: '',
metrics: {
success: false,
usage: undefined,
},
});
});

it('handles JSON parsing errors gracefully', async () => {
const mockResponse = {
choices: [
{
message: {
content: 'invalid json content',
},
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15,
},
};

(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);

const messages = [{ role: 'user' as const, content: 'Tell me about a person' }];
const responseStructure = { type: 'object' };

const result = await provider.invokeStructuredModel(messages, responseStructure);

expect(result).toEqual({
data: {},
rawResponse: 'invalid json content',
metrics: {
success: false,
usage: {
total: 15,
input: 10,
output: 5,
},
},
});
});

it('handles empty choices array in structured response', async () => {
const mockResponse = {
choices: [],
};

(mockOpenAI.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse as any);

const messages = [{ role: 'user' as const, content: 'Tell me about a person' }];
const responseStructure = { type: 'object' };

const result = await provider.invokeStructuredModel(messages, responseStructure);

expect(result).toEqual({
data: {},
rawResponse: '',
metrics: {
success: false,
usage: undefined,
},
});
});
});

describe('getClient', () => {
it('returns the underlying OpenAI client', () => {
const client = provider.getClient();
Expand Down
125 changes: 100 additions & 25 deletions packages/ai-providers/server-ai-openai/src/OpenAIProvider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import type {
LDLogger,
LDMessage,
LDTokenUsage,
StructuredResponse,
} from '@launchdarkly/server-sdk-ai';

/**
Expand Down Expand Up @@ -55,34 +56,108 @@ export class OpenAIProvider extends AIProvider {
* Invoke the OpenAI model with an array of messages.
*/
async invokeModel(messages: LDMessage[]): Promise<ChatResponse> {
// Call OpenAI chat completions API
const response = await this._client.chat.completions.create({
model: this._modelName,
messages,
...this._parameters,
});

// Generate metrics early (assumes success by default)
const metrics = OpenAIProvider.createAIMetrics(response);

// Safely extract the first choice content using optional chaining
const content = response?.choices?.[0]?.message?.content || '';
try {
// Call OpenAI chat completions API
const response = await this._client.chat.completions.create({
model: this._modelName,
messages,
...this._parameters,
});

// Generate metrics early (assumes success by default)
const metrics = OpenAIProvider.createAIMetrics(response);

// Safely extract the first choice content using optional chaining
const content = response?.choices?.[0]?.message?.content || '';

if (!content) {
this.logger?.warn('OpenAI response has no content available');
metrics.success = false;
}

// Create the assistant message
const assistantMessage: LDMessage = {
role: 'assistant',
content,
};

if (!content) {
this.logger?.warn('OpenAI response has no content available');
metrics.success = false;
return {
message: assistantMessage,
metrics,
};
} catch (error) {
this.logger?.warn('OpenAI model invocation failed:', error);

return {
message: {
role: 'assistant',
content: '',
},
metrics: {
success: false,
},
};
}
}

// Create the assistant message
const assistantMessage: LDMessage = {
role: 'assistant',
content,
};

return {
message: assistantMessage,
metrics,
};
/**
* Invoke the OpenAI model with structured output support.
*/
async invokeStructuredModel(
messages: LDMessage[],
responseStructure: Record<string, unknown>,
): Promise<StructuredResponse> {
try {
// Call OpenAI chat completions API with structured output
const response = await this._client.chat.completions.create({
model: this._modelName,
messages,
response_format: {
type: 'json_schema',
json_schema: {
name: 'structured_output',
schema: responseStructure,
strict: true,
},
},
...this._parameters,
});

// Generate metrics early (assumes success by default)
const metrics = OpenAIProvider.createAIMetrics(response);

// Safely extract the first choice content using optional chaining
const content = response?.choices?.[0]?.message?.content || '';

if (!content) {
this.logger?.warn('OpenAI structured response has no content available');
metrics.success = false;
return {
data: {},
rawResponse: '',
metrics,
};
}

// Parse the structured JSON response
const data = JSON.parse(content) as Record<string, unknown>;

return {
data,
rawResponse: content,
metrics,
};
} catch (error) {
this.logger?.warn('OpenAI structured model invocation failed:', error);

return {
data: {},
rawResponse: '',
metrics: {
success: false,
},
};
}
}

/**
Expand Down
Loading