Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,66 @@ describe('LangChainProvider', () => {
expect(result.message.content).toBe('');
expect(mockLogger.warn).toHaveBeenCalledTimes(1);
});

it('returns success=false when model invocation throws an error', async () => {
const error = new Error('Model invocation failed');
mockLLM.invoke.mockRejectedValue(error);

const messages = [{ role: 'user' as const, content: 'Hello' }];
const result = await provider.invokeModel(messages);

expect(result.metrics.success).toBe(false);
expect(result.message.content).toBe('');
expect(result.message.role).toBe('assistant');
expect(mockLogger.warn).toHaveBeenCalledWith('LangChain model invocation failed:', error);
});
});

describe('invokeStructuredModel', () => {
let mockLLM: any;
let provider: LangChainProvider;

beforeEach(() => {
mockLLM = {
withStructuredOutput: jest.fn(),
};
provider = new LangChainProvider(mockLLM, mockLogger);
jest.clearAllMocks();
});

it('returns success=true for successful invocation', async () => {
const mockResponse = { result: 'structured data' };
const mockInvoke = jest.fn().mockResolvedValue(mockResponse);
mockLLM.withStructuredOutput.mockReturnValue({ invoke: mockInvoke });

const messages = [{ role: 'user' as const, content: 'Hello' }];
const responseStructure = { type: 'object', properties: {} };
const result = await provider.invokeStructuredModel(messages, responseStructure);

expect(result.metrics.success).toBe(true);
expect(result.data).toEqual(mockResponse);
expect(result.rawResponse).toBe(JSON.stringify(mockResponse));
expect(mockLogger.warn).not.toHaveBeenCalled();
});

it('returns success=false when structured model invocation throws an error', async () => {
const error = new Error('Structured invocation failed');
const mockInvoke = jest.fn().mockRejectedValue(error);
mockLLM.withStructuredOutput.mockReturnValue({ invoke: mockInvoke });

const messages = [{ role: 'user' as const, content: 'Hello' }];
const responseStructure = { type: 'object', properties: {} };
const result = await provider.invokeStructuredModel(messages, responseStructure);

expect(result.metrics.success).toBe(false);
expect(result.data).toEqual({});
expect(result.rawResponse).toBe('');
expect(result.metrics.usage).toEqual({ total: 0, input: 0, output: 0 });
expect(mockLogger.warn).toHaveBeenCalledWith(
'LangChain structured model invocation failed:',
error,
);
});
});

describe('mapProvider', () => {
Expand Down
126 changes: 95 additions & 31 deletions packages/ai-providers/server-ai-langchain/src/LangChainProvider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import type {
LDLogger,
LDMessage,
LDTokenUsage,
StructuredResponse,
} from '@launchdarkly/server-sdk-ai';

/**
Expand Down Expand Up @@ -44,39 +45,102 @@ export class LangChainProvider extends AIProvider {
* Invoke the LangChain model with an array of messages.
*/
async invokeModel(messages: LDMessage[]): Promise<ChatResponse> {
// Convert LDMessage[] to LangChain messages
const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages);

// Get the LangChain response
const response: AIMessage = await this._llm.invoke(langchainMessages);

// Generate metrics early (assumes success by default)
const metrics = LangChainProvider.getAIMetricsFromResponse(response);

// Extract text content from the response
let content: string = '';
if (typeof response.content === 'string') {
content = response.content;
} else {
// Log warning for non-string content (likely multimodal)
this.logger?.warn(
`Multimodal response not supported, expecting a string. Content type: ${typeof response.content}, Content:`,
JSON.stringify(response.content, null, 2),
);
// Update metrics to reflect content loss
metrics.success = false;
try {
// Convert LDMessage[] to LangChain messages
const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages);

// Get the LangChain response
const response: AIMessage = await this._llm.invoke(langchainMessages);

// Generate metrics early (assumes success by default)
const metrics = LangChainProvider.getAIMetricsFromResponse(response);

// Extract text content from the response
let content: string = '';
if (typeof response.content === 'string') {
content = response.content;
} else {
// Log warning for non-string content (likely multimodal)
this.logger?.warn(
`Multimodal response not supported, expecting a string. Content type: ${typeof response.content}, Content:`,
JSON.stringify(response.content, null, 2),
);
// Update metrics to reflect content loss
metrics.success = false;
}

// Create the assistant message
const assistantMessage: LDMessage = {
role: 'assistant',
content,
};

return {
message: assistantMessage,
metrics,
};
} catch (error) {
this.logger?.warn('LangChain model invocation failed:', error);

return {
message: {
role: 'assistant',
content: '',
},
metrics: {
success: false,
},
};
}
}

// Create the assistant message
const assistantMessage: LDMessage = {
role: 'assistant',
content,
};
/**
* Invoke the LangChain model with structured output support.
*/
async invokeStructuredModel(
messages: LDMessage[],
responseStructure: Record<string, unknown>,
): Promise<StructuredResponse> {
try {
// Convert LDMessage[] to LangChain messages
const langchainMessages = LangChainProvider.convertMessagesToLangChain(messages);

return {
message: assistantMessage,
metrics,
};
// Get the LangChain response
const response = await this._llm
.withStructuredOutput(responseStructure)
.invoke(langchainMessages);

// Using structured output doesn't support metrics
const metrics = {
success: true,
usage: {
total: 0,
input: 0,
output: 0,
},
};

return {
data: response,
rawResponse: JSON.stringify(response),
metrics,
};
} catch (error) {
this.logger?.warn('LangChain structured model invocation failed:', error);

return {
data: {},
rawResponse: '',
metrics: {
success: false,
usage: {
total: 0,
input: 0,
output: 0,
},
},
};
}
}

/**
Expand Down Expand Up @@ -191,8 +255,8 @@ export class LangChainProvider extends AIProvider {

// Use LangChain's universal initChatModel to support multiple providers
return initChatModel(modelName, {
modelProvider: LangChainProvider.mapProvider(provider),
...parameters,
modelProvider: LangChainProvider.mapProvider(provider),
});
}
}