Skip to content

Commit b7d1831

Browse files
fix: deprecate setattr on ModelCallRequest (#34022)
* one alternative considered was setting `frozen=True` on the dataclass, but this is breaking, so a deprecation is a nicer approach
1 parent 328ba36 commit b7d1831

File tree

16 files changed

+876
-621
lines changed

16 files changed

+876
-621
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,3 +163,6 @@ node_modules
163163

164164
prof
165165
virtualenv/
166+
scratch/
167+
168+
.langgraph_api/

libs/langchain_v1/langchain/agents/middleware/context_editing.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
from __future__ import annotations
1111

1212
from collections.abc import Awaitable, Callable, Iterable, Sequence
13+
from copy import deepcopy
1314
from dataclasses import dataclass
1415
from typing import Literal
1516

@@ -238,10 +239,11 @@ def count_tokens(messages: Sequence[BaseMessage]) -> int:
238239
system_msg + list(messages), request.tools
239240
)
240241

242+
edited_messages = deepcopy(list(request.messages))
241243
for edit in self.edits:
242-
edit.apply(request.messages, count_tokens=count_tokens)
244+
edit.apply(edited_messages, count_tokens=count_tokens)
243245

244-
return handler(request)
246+
return handler(request.override(messages=edited_messages))
245247

246248
async def awrap_model_call(
247249
self,
@@ -266,10 +268,11 @@ def count_tokens(messages: Sequence[BaseMessage]) -> int:
266268
system_msg + list(messages), request.tools
267269
)
268270

271+
edited_messages = deepcopy(list(request.messages))
269272
for edit in self.edits:
270-
edit.apply(request.messages, count_tokens=count_tokens)
273+
edit.apply(edited_messages, count_tokens=count_tokens)
271274

272-
return await handler(request)
275+
return await handler(request.override(messages=edited_messages))
273276

274277

275278
__all__ = [

libs/langchain_v1/langchain/agents/middleware/model_fallback.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -92,9 +92,8 @@ def wrap_model_call(
9292

9393
# Try fallback models
9494
for fallback_model in self.models:
95-
request.model = fallback_model
9695
try:
97-
return handler(request)
96+
return handler(request.override(model=fallback_model))
9897
except Exception as e: # noqa: BLE001
9998
last_exception = e
10099
continue
@@ -127,9 +126,8 @@ async def awrap_model_call(
127126

128127
# Try fallback models
129128
for fallback_model in self.models:
130-
request.model = fallback_model
131129
try:
132-
return await handler(request)
130+
return await handler(request.override(model=fallback_model))
133131
except Exception as e: # noqa: BLE001
134132
last_exception = e
135133
continue

libs/langchain_v1/langchain/agents/middleware/todo.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -194,22 +194,22 @@ def wrap_model_call(
194194
handler: Callable[[ModelRequest], ModelResponse],
195195
) -> ModelCallResult:
196196
"""Update the system prompt to include the todo system prompt."""
197-
request.system_prompt = (
197+
new_system_prompt = (
198198
request.system_prompt + "\n\n" + self.system_prompt
199199
if request.system_prompt
200200
else self.system_prompt
201201
)
202-
return handler(request)
202+
return handler(request.override(system_prompt=new_system_prompt))
203203

204204
async def awrap_model_call(
205205
self,
206206
request: ModelRequest,
207207
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
208208
) -> ModelCallResult:
209209
"""Update the system prompt to include the todo system prompt (async version)."""
210-
request.system_prompt = (
210+
new_system_prompt = (
211211
request.system_prompt + "\n\n" + self.system_prompt
212212
if request.system_prompt
213213
else self.system_prompt
214214
)
215-
return await handler(request)
215+
return await handler(request.override(system_prompt=new_system_prompt))

libs/langchain_v1/langchain/agents/middleware/tool_selection.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -255,8 +255,7 @@ def _process_selection_response(
255255
# Also preserve any provider-specific tool dicts from the original request
256256
provider_tools = [tool for tool in request.tools if isinstance(tool, dict)]
257257

258-
request.tools = [*selected_tools, *provider_tools]
259-
return request
258+
return request.override(tools=[*selected_tools, *provider_tools])
260259

261260
def wrap_model_call(
262261
self,

libs/langchain_v1/langchain/agents/middleware/types.py

Lines changed: 45 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,31 @@ class ModelRequest:
9494
runtime: Runtime[ContextT] # type: ignore[valid-type]
9595
model_settings: dict[str, Any] = field(default_factory=dict)
9696

97+
def __setattr__(self, name: str, value: Any) -> None:
98+
"""Set an attribute with a deprecation warning.
99+
100+
Direct attribute assignment on `ModelRequest` is deprecated. Use the
101+
`override()` method instead to create a new request with modified attributes.
102+
103+
Args:
104+
name: Attribute name.
105+
value: Attribute value.
106+
"""
107+
import warnings
108+
109+
# Allow setting attributes during __init__ (when object is being constructed)
110+
if not hasattr(self, "__dataclass_fields__") or not hasattr(self, name):
111+
object.__setattr__(self, name, value)
112+
else:
113+
warnings.warn(
114+
f"Direct attribute assignment to ModelRequest.{name} is deprecated. "
115+
f"Use request.override({name}=...) instead to create a new request "
116+
f"with the modified attribute.",
117+
DeprecationWarning,
118+
stacklevel=2,
119+
)
120+
object.__setattr__(self, name, value)
121+
97122
def override(self, **overrides: Unpack[_ModelRequestOverrides]) -> ModelRequest:
98123
"""Replace the request with a new request with the given overrides.
99124
@@ -446,7 +471,14 @@ def wrap_tool_call(
446471
447472
```python
448473
def wrap_tool_call(self, request, handler):
449-
request.tool_call["args"]["value"] *= 2
474+
modified_call = {
475+
**request.tool_call,
476+
"args": {
477+
**request.tool_call["args"],
478+
"value": request.tool_call["args"]["value"] * 2,
479+
},
480+
}
481+
request = request.override(tool_call=modified_call)
450482
return handler(request)
451483
```
452484
@@ -1337,7 +1369,7 @@ async def async_wrapped(
13371369
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
13381370
) -> ModelCallResult:
13391371
prompt = await func(request) # type: ignore[misc]
1340-
request.system_prompt = prompt
1372+
request = request.override(system_prompt=prompt)
13411373
return await handler(request)
13421374

13431375
middleware_name = cast("str", getattr(func, "__name__", "DynamicPromptMiddleware"))
@@ -1358,7 +1390,7 @@ def wrapped(
13581390
handler: Callable[[ModelRequest], ModelResponse],
13591391
) -> ModelCallResult:
13601392
prompt = cast("str", func(request))
1361-
request.system_prompt = prompt
1393+
request = request.override(system_prompt=prompt)
13621394
return handler(request)
13631395

13641396
async def async_wrapped_from_sync(
@@ -1368,7 +1400,7 @@ async def async_wrapped_from_sync(
13681400
) -> ModelCallResult:
13691401
# Delegate to sync function
13701402
prompt = cast("str", func(request))
1371-
request.system_prompt = prompt
1403+
request = request.override(system_prompt=prompt)
13721404
return await handler(request)
13731405

13741406
middleware_name = cast("str", getattr(func, "__name__", "DynamicPromptMiddleware"))
@@ -1469,7 +1501,7 @@ def fallback_model(request, handler):
14691501
pass
14701502
14711503
# Try fallback model
1472-
request.model = fallback_model_instance
1504+
request = request.override(model=fallback_model_instance)
14731505
return handler(request)
14741506
```
14751507
@@ -1632,7 +1664,14 @@ async def async_retry(request, handler):
16321664
```python
16331665
@wrap_tool_call
16341666
def modify_args(request, handler):
1635-
request.tool_call["args"]["value"] *= 2
1667+
modified_call = {
1668+
**request.tool_call,
1669+
"args": {
1670+
**request.tool_call["args"],
1671+
"value": request.tool_call["args"]["value"] * 2,
1672+
},
1673+
}
1674+
request = request.override(tool_call=modified_call)
16361675
return handler(request)
16371676
```
16381677

libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_composition.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -230,9 +230,7 @@ def inner(request, handler):
230230
test_runtime = {"test": "runtime"}
231231

232232
# Create request with state and runtime
233-
test_request = create_test_request()
234-
test_request.state = test_state
235-
test_request.runtime = test_runtime
233+
test_request = create_test_request(state=test_state, runtime=test_runtime)
236234
result = composed(test_request, create_mock_base_handler())
237235

238236
# Both handlers should see same state and runtime

libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_decorators.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -90,8 +90,7 @@ def test_on_model_call_decorator() -> None:
9090

9191
@wrap_model_call(state_schema=CustomState, tools=[test_tool], name="CustomOnModelCall")
9292
def custom_on_model_call(request, handler):
93-
request.system_prompt = "Modified"
94-
return handler(request)
93+
return handler(request.override(system_prompt="Modified"))
9594

9695
# Verify all options were applied
9796
assert isinstance(custom_on_model_call, AgentMiddleware)
@@ -277,8 +276,7 @@ def test_async_on_model_call_decorator() -> None:
277276

278277
@wrap_model_call(state_schema=CustomState, tools=[test_tool], name="AsyncOnModelCall")
279278
async def async_on_model_call(request, handler):
280-
request.system_prompt = "Modified async"
281-
return await handler(request)
279+
return await handler(request.override(system_prompt="Modified async"))
282280

283281
assert isinstance(async_on_model_call, AgentMiddleware)
284282
assert async_on_model_call.state_schema == CustomState

libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_tools.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -79,8 +79,8 @@ def wrap_model_call(
7979
handler: Callable[[ModelRequest], AIMessage],
8080
) -> AIMessage:
8181
# Only allow tool_a and tool_b
82-
request.tools = [t for t in request.tools if t.name in ["tool_a", "tool_b"]]
83-
return handler(request)
82+
filtered_tools = [t for t in request.tools if t.name in ["tool_a", "tool_b"]]
83+
return handler(request.override(tools=filtered_tools))
8484

8585
# Model will try to call tool_a
8686
model = FakeToolCallingModel(
@@ -123,8 +123,7 @@ def wrap_model_call(
123123
handler: Callable[[ModelRequest], AIMessage],
124124
) -> AIMessage:
125125
# Add an unknown tool
126-
request.tools = request.tools + [unknown_tool]
127-
return handler(request)
126+
return handler(request.override(tools=request.tools + [unknown_tool]))
128127

129128
agent = create_agent(
130129
model=FakeToolCallingModel(),
@@ -163,7 +162,8 @@ def wrap_model_call(
163162
) -> AIMessage:
164163
# Remove admin_tool if not admin
165164
if not request.state.get("is_admin", False):
166-
request.tools = [t for t in request.tools if t.name != "admin_tool"]
165+
filtered_tools = [t for t in request.tools if t.name != "admin_tool"]
166+
request = request.override(tools=filtered_tools)
167167
return handler(request)
168168

169169
model = FakeToolCallingModel()
@@ -200,7 +200,7 @@ def wrap_model_call(
200200
handler: Callable[[ModelRequest], AIMessage],
201201
) -> AIMessage:
202202
# Remove all tools
203-
request.tools = []
203+
request = request.override(tools=[])
204204
return handler(request)
205205

206206
model = FakeToolCallingModel()
@@ -244,7 +244,8 @@ def wrap_model_call(
244244
) -> AIMessage:
245245
modification_order.append([t.name for t in request.tools])
246246
# Remove tool_c
247-
request.tools = [t for t in request.tools if t.name != "tool_c"]
247+
filtered_tools = [t for t in request.tools if t.name != "tool_c"]
248+
request = request.override(tools=filtered_tools)
248249
return handler(request)
249250

250251
class SecondMiddleware(AgentMiddleware):
@@ -257,7 +258,8 @@ def wrap_model_call(
257258
# Should not see tool_c here
258259
assert all(t.name != "tool_c" for t in request.tools)
259260
# Remove tool_b
260-
request.tools = [t for t in request.tools if t.name != "tool_b"]
261+
filtered_tools = [t for t in request.tools if t.name != "tool_b"]
262+
request = request.override(tools=filtered_tools)
261263
return handler(request)
262264

263265
agent = create_agent(

0 commit comments

Comments
 (0)