Skip to content

Commit 2fa9741

Browse files
chore(langchain_v1): rename model_request node -> model (#33310)
1 parent ba35387 commit 2fa9741

File tree

3 files changed

+78
-78
lines changed

3 files changed

+78
-78
lines changed

libs/langchain_v1/langchain/agents/factory.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ def create_agent( # noqa: PLR0915
219219
model: The language model for the agent. Can be a string identifier
220220
(e.g., ``"openai:gpt-4"``), a chat model instance (e.g., ``ChatOpenAI()``).
221221
tools: A list of tools, dicts, or callables. If ``None`` or an empty list,
222-
the agent will consist of a model_request node without a tool calling loop.
222+
the agent will consist of a model node without a tool calling loop.
223223
system_prompt: An optional system prompt for the LLM. If provided as a string,
224224
it will be converted to a SystemMessage and added to the beginning
225225
of the message list.
@@ -608,7 +608,7 @@ def _get_bound_model(request: ModelRequest) -> tuple[Runnable, ResponseFormat |
608608
)
609609
return request.model.bind(**request.model_settings), None
610610

611-
def model_request(state: AgentState, runtime: Runtime[ContextT]) -> dict[str, Any]:
611+
def model_node(state: AgentState, runtime: Runtime[ContextT]) -> dict[str, Any]:
612612
"""Sync model request handler with sequential middleware processing."""
613613
request = ModelRequest(
614614
model=model,
@@ -674,7 +674,7 @@ def model_request(state: AgentState, runtime: Runtime[ContextT]) -> dict[str, An
674674
msg = f"Maximum retry attempts ({max_attempts}) exceeded"
675675
raise RuntimeError(msg)
676676

677-
async def amodel_request(state: AgentState, runtime: Runtime[ContextT]) -> dict[str, Any]:
677+
async def amodel_node(state: AgentState, runtime: Runtime[ContextT]) -> dict[str, Any]:
678678
"""Async model request handler with sequential middleware processing."""
679679
request = ModelRequest(
680680
model=model,
@@ -724,7 +724,7 @@ async def amodel_request(state: AgentState, runtime: Runtime[ContextT]) -> dict[
724724
raise RuntimeError(msg)
725725

726726
# Use sync or async based on model capabilities
727-
graph.add_node("model_request", RunnableCallable(model_request, amodel_request, trace=False))
727+
graph.add_node("model", RunnableCallable(model_node, amodel_node, trace=False))
728728

729729
# Only add tools node if we have tools
730730
if tool_node is not None:
@@ -808,27 +808,27 @@ async def amodel_request(state: AgentState, runtime: Runtime[ContextT]) -> dict[
808808
after_agent_node = RunnableCallable(sync_after_agent, async_after_agent, trace=False)
809809
graph.add_node(f"{m.name}.after_agent", after_agent_node, input_schema=state_schema)
810810

811-
# Determine the entry node (runs once at start): before_agent -> before_model -> model_request
811+
# Determine the entry node (runs once at start): before_agent -> before_model -> model
812812
if middleware_w_before_agent:
813813
entry_node = f"{middleware_w_before_agent[0].name}.before_agent"
814814
elif middleware_w_before_model:
815815
entry_node = f"{middleware_w_before_model[0].name}.before_model"
816816
else:
817-
entry_node = "model_request"
817+
entry_node = "model"
818818

819819
# Determine the loop entry node (beginning of agent loop, excludes before_agent)
820820
# This is where tools will loop back to for the next iteration
821821
if middleware_w_before_model:
822822
loop_entry_node = f"{middleware_w_before_model[0].name}.before_model"
823823
else:
824-
loop_entry_node = "model_request"
824+
loop_entry_node = "model"
825825

826826
# Determine the loop exit node (end of each iteration, can run multiple times)
827-
# This is after_model or model_request, but NOT after_agent
827+
# This is after_model or model, but NOT after_agent
828828
if middleware_w_after_model:
829829
loop_exit_node = f"{middleware_w_after_model[0].name}.after_model"
830830
else:
831-
loop_exit_node = "model_request"
831+
loop_exit_node = "model"
832832

833833
# Determine the exit node (runs once at end): after_agent or END
834834
if middleware_w_after_agent:
@@ -860,7 +860,7 @@ async def amodel_request(state: AgentState, runtime: Runtime[ContextT]) -> dict[
860860
_make_model_to_model_edge(loop_entry_node, exit_node),
861861
[loop_entry_node, exit_node],
862862
)
863-
elif loop_exit_node == "model_request":
863+
elif loop_exit_node == "model":
864864
# If no tools and no after_model, go directly to exit_node
865865
graph.add_edge(loop_exit_node, exit_node)
866866
# No tools but we have after_model - connect after_model to exit_node
@@ -883,7 +883,7 @@ async def amodel_request(state: AgentState, runtime: Runtime[ContextT]) -> dict[
883883
loop_entry_node,
884884
can_jump_to=_get_can_jump_to(m1, "before_agent"),
885885
)
886-
# Connect last before_agent to loop_entry_node (before_model or model_request)
886+
# Connect last before_agent to loop_entry_node (before_model or model)
887887
_add_middleware_edge(
888888
graph,
889889
f"{middleware_w_before_agent[-1].name}.before_agent",
@@ -902,18 +902,18 @@ async def amodel_request(state: AgentState, runtime: Runtime[ContextT]) -> dict[
902902
loop_entry_node,
903903
can_jump_to=_get_can_jump_to(m1, "before_model"),
904904
)
905-
# Go directly to model_request after the last before_model
905+
# Go directly to model after the last before_model
906906
_add_middleware_edge(
907907
graph,
908908
f"{middleware_w_before_model[-1].name}.before_model",
909-
"model_request",
909+
"model",
910910
loop_entry_node,
911911
can_jump_to=_get_can_jump_to(middleware_w_before_model[-1], "before_model"),
912912
)
913913

914914
# Add after_model middleware edges
915915
if middleware_w_after_model:
916-
graph.add_edge("model_request", f"{middleware_w_after_model[-1].name}.after_model")
916+
graph.add_edge("model", f"{middleware_w_after_model[-1].name}.after_model")
917917
for idx in range(len(middleware_w_after_model) - 1, 0, -1):
918918
m1 = middleware_w_after_model[idx]
919919
m2 = middleware_w_after_model[idx - 1]

0 commit comments

Comments
 (0)