Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/blue-humans-rhyme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"langchain": patch
---

fix(langchain): respect version:"v1" in afterModel router's pending tool call path
8 changes: 8 additions & 0 deletions libs/langchain/src/agents/ReactAgent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -919,6 +919,14 @@ export class ReactAgent<
(call) => !toolMessages.some((m) => m.tool_call_id === call.id)
);
if (pendingToolCalls && pendingToolCalls.length > 0) {
/**
* v1: route the full message to the ToolNode; it filters already-processed
* calls internally and runs the remaining ones via Promise.all.
* v2: dispatch each pending call as a separate Send task.
*/
if (this.#toolBehaviorVersion === "v1") {
return TOOLS_NODE_NAME;
}
return pendingToolCalls.map(
(toolCall) =>
new Send(TOOLS_NODE_NAME, { ...state, lg_tool_call: toolCall })
Expand Down
112 changes: 111 additions & 1 deletion libs/langchain/src/agents/tests/tools.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import { tool, ToolRuntime } from "@langchain/core/tools";
import { HumanMessage, ToolMessage } from "@langchain/core/messages";
import { InMemoryStore } from "@langchain/langgraph";

import { createAgent } from "../index.js";
import { createAgent, createMiddleware } from "../index.js";
import { FakeToolCallingModel } from "./utils.js";

describe("tools", () => {
Expand Down Expand Up @@ -69,4 +69,114 @@ describe("tools", () => {
"The weather in Tokyo is sunny. The foo is bar and the bar is baz."
);
});

describe("version: 'v1' — parallel tool execution via Promise.all", () => {
it("runs all tool calls from a single AIMessage in the same ToolNode invocation", async () => {
const invocationOrder: string[] = [];

const model = new FakeToolCallingModel({
toolCalls: [
[
{ type: "tool_call", name: "tool_a", args: {}, id: "call_a" },
{ type: "tool_call", name: "tool_b", args: {}, id: "call_b" },
],
// second LLM turn returns a plain text response to end the agent loop
[],
],
});

const toolA = tool(
async () => {
invocationOrder.push("tool_a");
return "result_a";
},
{ name: "tool_a", description: "tool a", schema: z.object({}) }
);

const toolB = tool(
async () => {
invocationOrder.push("tool_b");
return "result_b";
},
{ name: "tool_b", description: "tool b", schema: z.object({}) }
);

const agent = createAgent({
model,
tools: [toolA, toolB],
version: "v1",
});

const result = await agent.invoke({
messages: [new HumanMessage("run both tools")],
});

// Both tools must have been called
expect(invocationOrder).toContain("tool_a");
expect(invocationOrder).toContain("tool_b");

// Both ToolMessages must appear in the conversation
const toolMessages = result.messages.filter(ToolMessage.isInstance);
expect(toolMessages).toHaveLength(2);
expect(toolMessages.map((m) => m.tool_call_id).sort()).toEqual(
["call_a", "call_b"].sort()
);
});

it("runs pending tool calls via ToolNode (not Send) when afterModel middleware is present", async () => {
const invocationOrder: string[] = [];

const model = new FakeToolCallingModel({
toolCalls: [
[
{ type: "tool_call", name: "tool_a", args: {}, id: "call_a" },
{ type: "tool_call", name: "tool_b", args: {}, id: "call_b" },
],
[],
],
});

const toolA = tool(
async () => {
invocationOrder.push("tool_a");
return "result_a";
},
{ name: "tool_a", description: "tool a", schema: z.object({}) }
);

const toolB = tool(
async () => {
invocationOrder.push("tool_b");
return "result_b";
},
{ name: "tool_b", description: "tool b", schema: z.object({}) }
);

// afterModel middleware forces the #createAfterModelRouter code path
const afterModelMiddleware = createMiddleware({
name: "afterModelMiddleware",
afterModel: async ({ messages }) => ({ messages }),
});

const agent = createAgent({
model,
tools: [toolA, toolB],
middleware: [afterModelMiddleware],
version: "v1",
});

const result = await agent.invoke({
messages: [new HumanMessage("run both tools")],
});

expect(invocationOrder).toContain("tool_a");
expect(invocationOrder).toContain("tool_b");

const toolMessages = result.messages.filter(ToolMessage.isInstance);
expect(toolMessages).toHaveLength(2);
expect(toolMessages.map((m) => m.tool_call_id).sort()).toEqual(
["call_a", "call_b"].sort()
);
});
});
});
16 changes: 12 additions & 4 deletions libs/langchain/src/agents/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -805,10 +805,18 @@ export type CreateAgentParams<
* Determines the version of the graph to create.
*
* Can be one of
* - `"v1"`: The tool node processes a single message. All tool calls in the message are
* executed in parallel within the tool node.
* - `"v2"`: The tool node processes a single tool call. Tool calls are distributed across
* multiple instances of the tool node using the Send API.
* - `"v1"`: The tool node processes the full `AIMessage` containing all tool calls. All tool
* calls are executed concurrently via `Promise.all` inside a single graph node.
* **Choose v1** when your tools invoke sub-graphs or other long-running async work
* and you need true parallelism — the `Promise.all` approach is unaffected by
* LangGraph's per-task checkpoint serialisation.
*
* - `"v2"`: Each tool call is dispatched as an independent graph task using the Send API.
* Tasks are scheduled in parallel by LangGraph, but when tools invoke sub-graphs
* the underlying checkpoint writes can cause effective serialisation, making
* concurrent tool calls execute sequentially. v2 is the better choice when you
* need per-tool-call checkpointing, independent fault isolation, or `interrupt()`
* support inside individual tool calls.
*
* @default `"v2"`
*/
Expand Down
Loading