Skip to content

Commit 52b1516

Browse files
authored
style(langchain): fix some middleware ref syntax (#33988)
1 parent 8a3bb73 commit 52b1516

File tree

14 files changed

+530
-411
lines changed

14 files changed

+530
-411
lines changed

libs/langchain_v1/langchain/agents/middleware/file_search.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -120,9 +120,9 @@ def __init__(
120120
121121
Args:
122122
root_path: Root directory to search.
123-
use_ripgrep: Whether to use ripgrep for search.
123+
use_ripgrep: Whether to use `ripgrep` for search.
124124
125-
Falls back to Python if ripgrep unavailable.
125+
Falls back to Python if `ripgrep` unavailable.
126126
max_file_size_mb: Maximum file size to search in MB.
127127
"""
128128
self.root_path = Path(root_path).resolve()

libs/langchain_v1/langchain/agents/middleware/model_call_limit.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,7 @@ def __init__(
133133
134134
`None` means no limit.
135135
exit_behavior: What to do when limits are exceeded.
136+
136137
- `'end'`: Jump to the end of the agent execution and
137138
inject an artificial AI message indicating that the limit was
138139
exceeded.

libs/langchain_v1/langchain/agents/middleware/shell_tool.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -389,7 +389,7 @@ def __init__(
389389
shell_command: Sequence[str] | str | None = None,
390390
env: Mapping[str, Any] | None = None,
391391
) -> None:
392-
"""Initialize the middleware.
392+
"""Initialize an instance of `ShellToolMiddleware`.
393393
394394
Args:
395395
workspace_root: Base directory for the shell session.

libs/langchain_v1/langchain/agents/middleware/summarization.py

Lines changed: 20 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -87,18 +87,27 @@ def __init__(
8787
8888
Args:
8989
model: The language model to use for generating summaries.
90-
trigger: One or more thresholds that trigger summarization. Provide a single
91-
`ContextSize` tuple or a list of tuples, in which case summarization runs
92-
when any threshold is breached. Examples: `("messages", 50)`, `("tokens", 3000)`,
93-
`[("fraction", 0.8), ("messages", 100)]`.
94-
keep: Context retention policy applied after summarization. Provide a
95-
`ContextSize` tuple to specify how much history to preserve. Defaults to
96-
keeping the most recent 20 messages. Examples: `("messages", 20)`,
97-
`("tokens", 3000)`, or `("fraction", 0.3)`.
90+
trigger: One or more thresholds that trigger summarization.
91+
92+
Provide a single `ContextSize` tuple or a list of tuples, in which case
93+
summarization runs when any threshold is breached.
94+
95+
Examples: `("messages", 50)`, `("tokens", 3000)`, `[("fraction", 0.8),
96+
("messages", 100)]`.
97+
keep: Context retention policy applied after summarization.
98+
99+
Provide a `ContextSize` tuple to specify how much history to preserve.
100+
101+
Defaults to keeping the most recent 20 messages.
102+
103+
Examples: `("messages", 20)`, `("tokens", 3000)`, or
104+
`("fraction", 0.3)`.
98105
token_counter: Function to count tokens in messages.
99106
summary_prompt: Prompt template for generating summaries.
100-
trim_tokens_to_summarize: Maximum tokens to keep when preparing messages for the
101-
summarization call. Pass `None` to skip trimming entirely.
107+
trim_tokens_to_summarize: Maximum tokens to keep when preparing messages for
108+
the summarization call.
109+
110+
Pass `None` to skip trimming entirely.
102111
"""
103112
# Handle deprecated parameters
104113
if "max_tokens_before_summary" in deprecated_kwargs:
@@ -354,7 +363,7 @@ def _find_safe_cutoff(self, messages: list[AnyMessage], messages_to_keep: int) -
354363
"""Find safe cutoff point that preserves AI/Tool message pairs.
355364
356365
Returns the index where messages can be safely cut without separating
357-
related AI and Tool messages. Returns 0 if no safe cutoff is found.
366+
related AI and Tool messages. Returns `0` if no safe cutoff is found.
358367
"""
359368
if len(messages) <= messages_to_keep:
360369
return 0

libs/langchain_v1/langchain/agents/middleware/todo.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -150,10 +150,6 @@ class TodoListMiddleware(AgentMiddleware):
150150
151151
print(result["todos"]) # Array of todo items with status tracking
152152
```
153-
154-
Args:
155-
system_prompt: Custom system prompt to guide the agent on using the todo tool.
156-
tool_description: Custom description for the write_todos tool.
157153
"""
158154

159155
state_schema = PlanningState

libs/langchain_v1/langchain/agents/middleware/tool_call_limit.py

Lines changed: 36 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -153,38 +153,46 @@ class ToolCallLimitMiddleware(
153153
are other pending tool calls (due to parallel tool calling).
154154
155155
Examples:
156-
```python title="Continue execution with blocked tools (default)"
157-
from langchain.agents.middleware.tool_call_limit import ToolCallLimitMiddleware
158-
from langchain.agents import create_agent
159-
160-
# Block exceeded tools but let other tools and model continue
161-
limiter = ToolCallLimitMiddleware(
162-
thread_limit=20,
163-
run_limit=10,
164-
exit_behavior="continue", # default
165-
)
156+
!!! example "Continue execution with blocked tools (default)"
157+
158+
```python
159+
from langchain.agents.middleware.tool_call_limit import ToolCallLimitMiddleware
160+
from langchain.agents import create_agent
161+
162+
# Block exceeded tools but let other tools and model continue
163+
limiter = ToolCallLimitMiddleware(
164+
thread_limit=20,
165+
run_limit=10,
166+
exit_behavior="continue", # default
167+
)
166168
167-
agent = create_agent("openai:gpt-4o", middleware=[limiter])
168-
```
169+
agent = create_agent("openai:gpt-4o", middleware=[limiter])
170+
```
169171
170-
```python title="Stop immediately when limit exceeded"
171-
# End execution immediately with an AI message
172-
limiter = ToolCallLimitMiddleware(run_limit=5, exit_behavior="end")
172+
!!! example "Stop immediately when limit exceeded"
173173
174-
agent = create_agent("openai:gpt-4o", middleware=[limiter])
175-
```
174+
```python
175+
# End execution immediately with an AI message
176+
limiter = ToolCallLimitMiddleware(run_limit=5, exit_behavior="end")
176177
177-
```python title="Raise exception on limit"
178-
# Strict limit with exception handling
179-
limiter = ToolCallLimitMiddleware(tool_name="search", thread_limit=5, exit_behavior="error")
178+
agent = create_agent("openai:gpt-4o", middleware=[limiter])
179+
```
180180
181-
agent = create_agent("openai:gpt-4o", middleware=[limiter])
181+
!!! example "Raise exception on limit"
182182
183-
try:
184-
result = await agent.invoke({"messages": [HumanMessage("Task")]})
185-
except ToolCallLimitExceededError as e:
186-
print(f"Search limit exceeded: {e}")
187-
```
183+
```python
184+
# Strict limit with exception handling
185+
limiter = ToolCallLimitMiddleware(
186+
tool_name="search", thread_limit=5, exit_behavior="error"
187+
)
188+
189+
agent = create_agent("openai:gpt-4o", middleware=[limiter])
190+
191+
try:
192+
result = await agent.invoke({"messages": [HumanMessage("Task")]})
193+
except ToolCallLimitExceededError as e:
194+
print(f"Search limit exceeded: {e}")
195+
```
188196
189197
"""
190198

@@ -208,6 +216,7 @@ def __init__(
208216
run_limit: Maximum number of tool calls allowed per run.
209217
`None` means no limit.
210218
exit_behavior: How to handle when limits are exceeded.
219+
211220
- `'continue'`: Block exceeded tools with error messages, let other
212221
tools continue. Model decides when to end.
213222
- `'error'`: Raise a `ToolCallLimitExceededError` exception
@@ -218,7 +227,7 @@ def __init__(
218227
219228
Raises:
220229
ValueError: If both limits are `None`, if `exit_behavior` is invalid,
221-
or if `run_limit` exceeds thread_limit.
230+
or if `run_limit` exceeds `thread_limit`.
222231
"""
223232
super().__init__()
224233

libs/langchain_v1/langchain/agents/middleware/tool_emulator.py

Lines changed: 29 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -25,34 +25,42 @@ class LLMToolEmulator(AgentMiddleware):
2525
This middleware allows selective emulation of tools for testing purposes.
2626
2727
By default (when `tools=None`), all tools are emulated. You can specify which
28-
tools to emulate by passing a list of tool names or BaseTool instances.
28+
tools to emulate by passing a list of tool names or `BaseTool` instances.
2929
3030
Examples:
31-
```python title="Emulate all tools (default behavior)"
32-
from langchain.agents.middleware import LLMToolEmulator
31+
!!! example "Emulate all tools (default behavior)"
3332
34-
middleware = LLMToolEmulator()
33+
```python
34+
from langchain.agents.middleware import LLMToolEmulator
3535
36-
agent = create_agent(
37-
model="openai:gpt-4o",
38-
tools=[get_weather, get_user_location, calculator],
39-
middleware=[middleware],
40-
)
41-
```
36+
middleware = LLMToolEmulator()
4237
43-
```python title="Emulate specific tools by name"
44-
middleware = LLMToolEmulator(tools=["get_weather", "get_user_location"])
45-
```
38+
agent = create_agent(
39+
model="openai:gpt-4o",
40+
tools=[get_weather, get_user_location, calculator],
41+
middleware=[middleware],
42+
)
43+
```
4644
47-
```python title="Use a custom model for emulation"
48-
middleware = LLMToolEmulator(
49-
tools=["get_weather"], model="anthropic:claude-sonnet-4-5-20250929"
50-
)
51-
```
45+
!!! example "Emulate specific tools by name"
46+
47+
```python
48+
middleware = LLMToolEmulator(tools=["get_weather", "get_user_location"])
49+
```
50+
51+
!!! example "Use a custom model for emulation"
52+
53+
```python
54+
middleware = LLMToolEmulator(
55+
tools=["get_weather"], model="anthropic:claude-sonnet-4-5-20250929"
56+
)
57+
```
58+
59+
!!! example "Emulate specific tools by passing tool instances"
5260
53-
```python title="Emulate specific tools by passing tool instances"
54-
middleware = LLMToolEmulator(tools=[get_weather, get_user_location])
55-
```
61+
```python
62+
middleware = LLMToolEmulator(tools=[get_weather, get_user_location])
63+
```
5664
"""
5765

5866
def __init__(

0 commit comments

Comments
 (0)