Skip to content

Commit ea659d4

Browse files
committed
return success false if we do not support the result
This is debatable since the model did produce a result. Returning not success allows users to rollback an invalid config that generates an unsupported response so we are taking a more cautious approach.
1 parent 6df89f7 commit ea659d4

File tree

2 files changed

+70
-4
lines changed

2 files changed

+70
-4
lines changed

packages/ai-providers/server-ai-langchain/__tests__/LangChainProvider.test.ts

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,14 @@ jest.mock('langchain/chat_models/universal', () => ({
77
initChatModel: jest.fn(),
88
}));
99

10+
// Mock logger
11+
const mockLogger = {
12+
warn: jest.fn(),
13+
info: jest.fn(),
14+
error: jest.fn(),
15+
debug: jest.fn(),
16+
};
17+
1018
describe('LangChainProvider', () => {
1119
describe('convertMessagesToLangChain', () => {
1220
it('converts system messages to SystemMessage', () => {
@@ -100,6 +108,61 @@ describe('LangChainProvider', () => {
100108
});
101109
});
102110

111+
describe('invokeModel', () => {
112+
let mockLLM: any;
113+
let provider: LangChainProvider;
114+
115+
beforeEach(() => {
116+
mockLLM = {
117+
invoke: jest.fn(),
118+
};
119+
provider = new LangChainProvider(mockLLM, mockLogger);
120+
jest.clearAllMocks();
121+
});
122+
123+
it('returns success=true for string content', async () => {
124+
const mockResponse = new AIMessage('Test response');
125+
mockLLM.invoke.mockResolvedValue(mockResponse);
126+
127+
const messages = [{ role: 'user' as const, content: 'Hello' }];
128+
const result = await provider.invokeModel(messages);
129+
130+
expect(result.metrics.success).toBe(true);
131+
expect(result.message.content).toBe('Test response');
132+
expect(mockLogger.warn).not.toHaveBeenCalled();
133+
});
134+
135+
it('returns success=false for non-string content and logs warning', async () => {
136+
const mockResponse = new AIMessage({ type: 'image', data: 'base64data' } as any);
137+
mockLLM.invoke.mockResolvedValue(mockResponse);
138+
139+
const messages = [{ role: 'user' as const, content: 'Hello' }];
140+
const result = await provider.invokeModel(messages);
141+
142+
expect(result.metrics.success).toBe(false);
143+
expect(result.message.content).toBe('');
144+
expect(mockLogger.warn).toHaveBeenCalledWith(
145+
'Multimodal response not supported, expecting a string. Content type: object, Content:',
146+
JSON.stringify({ type: 'image', data: 'base64data' }, null, 2),
147+
);
148+
});
149+
150+
it('returns success=false for array content and logs warning', async () => {
151+
const mockResponse = new AIMessage(['text', { type: 'image', data: 'base64data' }] as any);
152+
mockLLM.invoke.mockResolvedValue(mockResponse);
153+
154+
const messages = [{ role: 'user' as const, content: 'Hello' }];
155+
const result = await provider.invokeModel(messages);
156+
157+
expect(result.metrics.success).toBe(false);
158+
expect(result.message.content).toBe('');
159+
expect(mockLogger.warn).toHaveBeenCalledWith(
160+
'Multimodal response not supported, expecting a string. Content type: object, Content:',
161+
JSON.stringify(['text', { type: 'image', data: 'base64data' }], null, 2),
162+
);
163+
});
164+
});
165+
103166
describe('mapProvider', () => {
104167
it('maps gemini to google-genai', () => {
105168
expect(LangChainProvider.mapProvider('gemini')).toBe('google-genai');

packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,9 @@ export class LangChainProvider extends AIProvider {
5050
// Get the LangChain response
5151
const response: AIMessage = await this._llm.invoke(langchainMessages);
5252

53+
// Generate metrics early (assumes success by default)
54+
const metrics = LangChainProvider.createAIMetrics(response);
55+
5356
// Extract text content from the response
5457
let content: string = '';
5558
if (typeof response.content === 'string') {
@@ -60,6 +63,8 @@ export class LangChainProvider extends AIProvider {
6063
`Multimodal response not supported, expecting a string. Content type: ${typeof response.content}, Content:`,
6164
JSON.stringify(response.content, null, 2),
6265
);
66+
// Update metrics to reflect content loss
67+
metrics.success = false;
6368
}
6469

6570
// Create the assistant message
@@ -68,9 +73,6 @@ export class LangChainProvider extends AIProvider {
6873
content,
6974
};
7075

71-
// Extract metrics including token usage and success status
72-
const metrics = LangChainProvider.createAIMetrics(response);
73-
7476
return {
7577
message: assistantMessage,
7678
metrics,
@@ -108,6 +110,7 @@ export class LangChainProvider extends AIProvider {
108110
* This method extracts token usage information and success status from LangChain responses
109111
* and returns a LaunchDarkly AIMetrics object.
110112
*
113+
* @param langChainResponse The response from the LangChain model
111114
* @example
112115
* ```typescript
113116
* // Use with tracker.trackMetricsOf for automatic tracking
@@ -129,7 +132,7 @@ export class LangChainProvider extends AIProvider {
129132
};
130133
}
131134

132-
// LangChain responses that complete successfully are considered successful
135+
// LangChain responses that complete successfully are considered successful by default
133136
return {
134137
success: true,
135138
usage,

0 commit comments

Comments
 (0)