Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,66 @@ describe('LangChainProvider', () => {
expect(result.message.content).toBe('');
expect(mockLogger.warn).toHaveBeenCalledTimes(1);
});

it('returns success=false when model invocation throws an error', async () => {
const error = new Error('Model invocation failed');
mockLLM.invoke.mockRejectedValue(error);

const messages = [{ role: 'user' as const, content: 'Hello' }];
const result = await provider.invokeModel(messages);

expect(result.metrics.success).toBe(false);
expect(result.message.content).toBe('');
expect(result.message.role).toBe('assistant');
expect(mockLogger.error).toHaveBeenCalledWith('LangChain model invocation failed:', error);
});
});

describe('invokeStructuredModel', () => {
let mockLLM: any;
let provider: LangChainProvider;

beforeEach(() => {
mockLLM = {
withStructuredOutput: jest.fn(),
};
provider = new LangChainProvider(mockLLM, mockLogger);
jest.clearAllMocks();
});

it('returns success=true for successful invocation', async () => {
const mockResponse = { result: 'structured data' };
const mockInvoke = jest.fn().mockResolvedValue(mockResponse);
mockLLM.withStructuredOutput.mockReturnValue({ invoke: mockInvoke });

const messages = [{ role: 'user' as const, content: 'Hello' }];
const responseStructure = { type: 'object', properties: {} };
const result = await provider.invokeStructuredModel(messages, responseStructure);

expect(result.metrics.success).toBe(true);
expect(result.data).toEqual(mockResponse);
expect(result.rawResponse).toBe(JSON.stringify(mockResponse));
expect(mockLogger.error).not.toHaveBeenCalled();
});

it('returns success=false when structured model invocation throws an error', async () => {
const error = new Error('Structured invocation failed');
const mockInvoke = jest.fn().mockRejectedValue(error);
mockLLM.withStructuredOutput.mockReturnValue({ invoke: mockInvoke });

const messages = [{ role: 'user' as const, content: 'Hello' }];
const responseStructure = { type: 'object', properties: {} };
const result = await provider.invokeStructuredModel(messages, responseStructure);

expect(result.metrics.success).toBe(false);
expect(result.data).toEqual({});
expect(result.rawResponse).toBe('');
expect(result.metrics.usage).toEqual({ total: 0, input: 0, output: 0 });
expect(mockLogger.error).toHaveBeenCalledWith(
'LangChain structured model invocation failed:',
error,
);
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: Mismatched Logging Levels for Model Invocation Errors

The invokeModel and invokeStructuredModel methods in LangChainProvider.ts log caught errors using logger.warn(), but their corresponding tests expect logger.error(). This mismatch causes test failures and suggests an incorrect severity classification for these error conditions.

Additional Locations (3)

Fix in Cursor Fix in Web

});
});

describe('mapProvider', () => {
Expand Down
4 changes: 2 additions & 2 deletions packages/ai-providers/server-ai-langchain/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
"license": "Apache-2.0",
"devDependencies": {
"@langchain/core": "^0.3.0",
"@launchdarkly/server-sdk-ai": "^0.12.3",
"@launchdarkly/server-sdk-ai": "^0.12.0",
"@trivago/prettier-plugin-sort-imports": "^4.1.1",
"@types/jest": "^29.5.3",
"@typescript-eslint/eslint-plugin": "^6.20.0",
Expand All @@ -48,7 +48,7 @@
},
"peerDependencies": {
"@langchain/core": "^0.2.0 || ^0.3.0",
"@launchdarkly/server-sdk-ai": "^0.12.2",
"@launchdarkly/server-sdk-ai": "^0.12.0",
"langchain": "^0.2.0 || ^0.3.0"
}
}
124 changes: 94 additions & 30 deletions packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import type {
LDLogger,
LDMessage,
LDTokenUsage,
StructuredResponse,
} from '@launchdarkly/server-sdk-ai';

/**
Expand Down Expand Up @@ -44,39 +45,102 @@ export class LangChainProvider extends AIProvider {
* Invoke the LangChain model with an array of messages.
*/
async invokeModel(messages: LDMessage[]): Promise<ChatResponse> {
// Convert LDMessage[] to LangChain messages
const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages);

// Get the LangChain response
const response: AIMessage = await this._llm.invoke(langchainMessages);

// Generate metrics early (assumes success by default)
const metrics = LangChainProvider.createAIMetrics(response);

// Extract text content from the response
let content: string = '';
if (typeof response.content === 'string') {
content = response.content;
} else {
// Log warning for non-string content (likely multimodal)
this.logger?.warn(
`Multimodal response not supported, expecting a string. Content type: ${typeof response.content}, Content:`,
JSON.stringify(response.content, null, 2),
);
// Update metrics to reflect content loss
metrics.success = false;
try {
// Convert LDMessage[] to LangChain messages
const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages);

// Get the LangChain response
const response: AIMessage = await this._llm.invoke(langchainMessages);

// Generate metrics early (assumes success by default)
const metrics = LangChainProvider.createAIMetrics(response);

// Extract text content from the response
let content: string = '';
if (typeof response.content === 'string') {
content = response.content;
} else {
// Log warning for non-string content (likely multimodal)
this.logger?.warn(
`Multimodal response not supported, expecting a string. Content type: ${typeof response.content}, Content:`,
JSON.stringify(response.content, null, 2),
);
// Update metrics to reflect content loss
metrics.success = false;
}

// Create the assistant message
const assistantMessage: LDMessage = {
role: 'assistant',
content,
};

return {
message: assistantMessage,
metrics,
};
} catch (error) {
this.logger?.warn('LangChain model invocation failed:', error);

return {
message: {
role: 'assistant',
content: '',
},
metrics: {
success: false,
},
};
}
}

// Create the assistant message
const assistantMessage: LDMessage = {
role: 'assistant',
content,
};
/**
* Invoke the LangChain model with structured output support.
*/
async invokeStructuredModel(
messages: LDMessage[],
responseStructure: Record<string, unknown>,
): Promise<StructuredResponse> {
try {
// Convert LDMessage[] to LangChain messages
const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages);

// Get the LangChain response
const response = await this._llm
.withStructuredOutput(responseStructure)
.invoke(langchainMessages);

// Using structured output doesn't support metrics
const metrics = {
success: true,
usage: {
total: 0,
input: 0,
output: 0,
},
};

return {
message: assistantMessage,
metrics,
};
return {
data: response,
rawResponse: JSON.stringify(response),
metrics,
};
} catch (error) {
this.logger?.warn('LangChain structured model invocation failed:', error);
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: Mismatch: log level for model invocation failures should be error

The invokeModel and invokeStructuredModel methods log model invocation failures at the warn level, but their corresponding tests expect error level logging. This mismatch causes test failures and suggests error is the more appropriate log level for these critical failures.

Additional Locations (2)

Fix in Cursor Fix in Web


return {
data: {},
rawResponse: '',
metrics: {
success: false,
usage: {
total: 0,
input: 0,
output: 0,
},
},
};
}
}

/**
Expand Down
Loading