diff --git a/.changeset/fancy-breads-allow.md b/.changeset/fancy-breads-allow.md new file mode 100644 index 000000000000..f81555e0c215 --- /dev/null +++ b/.changeset/fancy-breads-allow.md @@ -0,0 +1,5 @@ +--- +"langchain": patch +--- + +avoid invalid message order after summarization diff --git a/libs/langchain/src/agents/middleware/summarization.ts b/libs/langchain/src/agents/middleware/summarization.ts index 0844dafa0f0b..5ab1f0e06a96 100644 --- a/libs/langchain/src/agents/middleware/summarization.ts +++ b/libs/langchain/src/agents/middleware/summarization.ts @@ -316,6 +316,15 @@ function isSafeCutoffPoint( return true; } + // Prevent preserved messages from starting with AI message containing tool calls + if ( + cutoffIndex < messages.length && + AIMessage.isInstance(messages[cutoffIndex]) && + hasToolCalls(messages[cutoffIndex]) + ) { + return false; + } + const searchStart = Math.max(0, cutoffIndex - SEARCH_RANGE_FOR_TOOL_PAIRS); const searchEnd = Math.min( messages.length, diff --git a/libs/langchain/src/agents/middleware/tests/summarization.test.ts b/libs/langchain/src/agents/middleware/tests/summarization.test.ts index bc0be4a4d6d8..c5d330154523 100644 --- a/libs/langchain/src/agents/middleware/tests/summarization.test.ts +++ b/libs/langchain/src/agents/middleware/tests/summarization.test.ts @@ -10,6 +10,7 @@ import { import { summarizationMiddleware } from "../summarization.js"; import { countTokensApproximately } from "../utils.js"; import { createAgent } from "../../index.js"; +import { hasToolCalls } from "../../utils.js"; import { FakeToolCallingChatModel } from "../../tests/utils.js"; // Mock @langchain/anthropic to test model string usage without requiring the built package @@ -379,8 +380,67 @@ describe("summarizationMiddleware", () => { model, middleware: [middleware], }); - const result = await agent.invoke({ messages: [] }); expect(result.messages.at(-1)?.content).toBe("Mocked response"); }); + + it("should not start preserved messages with AI message containing tool calls", async () => { + const summarizationModel = createMockSummarizationModel(); + const model = createMockMainModel(); + + const middleware = summarizationMiddleware({ + model: summarizationModel as any, + maxTokensBeforeSummary: 50, // Very low threshold to trigger summarization + messagesToKeep: 2, // Keep very few messages to force problematic cutoff + }); + + const agent = createAgent({ + model, + middleware: [middleware], + }); + + // Create a conversation history that would cause the problematic scenario + // We need messages where an AI message with tool calls would be the first preserved message + // after summarization if the cutoff isn't adjusted properly + const messages = [ + new HumanMessage( + `First message with some content to take up tokens. ${"x".repeat(100)}` + ), + new AIMessage(`First response. ${"x".repeat(100)}`), + new HumanMessage( + `Second message with more content to build up tokens. ${"x".repeat( + 100 + )}` + ), + new AIMessage(`Second response. ${"x".repeat(100)}`), + // This AI message with tool calls should NOT be the first preserved message + new AIMessage({ + content: "Let me search for information.", + tool_calls: [{ id: "call_1", name: "search", args: { query: "test" } }], + }), + new ToolMessage({ + content: "Search results", + tool_call_id: "call_1", + }), + new HumanMessage("What did you find?"), + ]; + + const result = await agent.invoke({ messages }); + + // Verify summarization occurred + expect(result.messages[0]).toBeInstanceOf(SystemMessage); + const systemPrompt = result.messages[0] as SystemMessage; + expect(systemPrompt.content).toContain("## Previous conversation summary:"); + + // Verify preserved messages don't start with AI(tool calls) + const preservedMessages = result.messages.filter( + (m) => !SystemMessage.isInstance(m) + ); + expect(preservedMessages.length).toBeGreaterThan(0); + const firstPreserved = preservedMessages[0]; + // The first preserved message should not be an AI message with tool calls + expect( + !(AIMessage.isInstance(firstPreserved) && hasToolCalls(firstPreserved)) + ).toBe(true); + }); });