Skip to content

Commit acd1aa8

Browse files
feat(langchain_v1): implement nicer devx for dynamic prompt (#33264)
Adding a `dynamic_prompt` decorator to support smoother devx for dynamic system prompts ```py from langchain.agents.middleware.types import dynamic_prompt, ModelRequest, AgentState from langchain.agents.middleware_agent import create_agent from langgraph.runtime import Runtime from dataclasses import dataclass from langchain_core.messages import HumanMessage @DataClass class Context: user_name: str @dynamic_prompt def my_prompt(request: ModelRequest, state: AgentState, runtime: Runtime[Context]) -> str: user_name = runtime.context.user_name return ( f"You are a helpful assistant helping {user_name}. Please refer to the user as {user_name}." ) agent = create_agent(model="openai:gpt-4o", middleware=[my_prompt]).compile() result = agent.invoke({"messages": [HumanMessage("Hello")]}, context=Context(user_name="Sydney")) for msg in result["messages"]: msg.pretty_print() """ ================================ Human Message ================================= Hello ================================== Ai Message ================================== Hello Sydney! How can I assist you today? """ ```
1 parent 2671fee commit acd1aa8

File tree

3 files changed

+279
-0
lines changed

3 files changed

+279
-0
lines changed

libs/langchain_v1/langchain/agents/middleware/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
ModelRequest,
1111
after_model,
1212
before_model,
13+
dynamic_prompt,
1314
hook_config,
1415
modify_model_request,
1516
)
@@ -25,6 +26,7 @@
2526
"SummarizationMiddleware",
2627
"after_model",
2728
"before_model",
29+
"dynamic_prompt",
2830
"hook_config",
2931
"modify_model_request",
3032
]

libs/langchain_v1/langchain/agents/middleware/types.py

Lines changed: 134 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
"ModelRequest",
4646
"OmitFromSchema",
4747
"PublicAgentState",
48+
"dynamic_prompt",
4849
"hook_config",
4950
]
5051

@@ -180,6 +181,16 @@ def __call__(
180181
...
181182

182183

184+
class _CallableReturningPromptString(Protocol[StateT_contra, ContextT]):
185+
"""Callable that returns a prompt string given ModelRequest, AgentState, and Runtime."""
186+
187+
def __call__(
188+
self, request: ModelRequest, state: StateT_contra, runtime: Runtime[ContextT]
189+
) -> str | Awaitable[str]:
190+
"""Generate a system prompt string based on the request, state, and runtime."""
191+
...
192+
193+
183194
CallableT = TypeVar("CallableT", bound=Callable[..., Any])
184195

185196

@@ -639,3 +650,126 @@ def wrapped(
639650
if func is not None:
640651
return decorator(func)
641652
return decorator
653+
654+
655+
@overload
656+
def dynamic_prompt(
657+
func: _CallableReturningPromptString[StateT, ContextT],
658+
) -> AgentMiddleware[StateT, ContextT]: ...
659+
660+
661+
@overload
662+
def dynamic_prompt(
663+
func: None = None,
664+
) -> Callable[
665+
[_CallableReturningPromptString[StateT, ContextT]],
666+
AgentMiddleware[StateT, ContextT],
667+
]: ...
668+
669+
670+
def dynamic_prompt(
671+
func: _CallableReturningPromptString[StateT, ContextT] | None = None,
672+
) -> (
673+
Callable[
674+
[_CallableReturningPromptString[StateT, ContextT]],
675+
AgentMiddleware[StateT, ContextT],
676+
]
677+
| AgentMiddleware[StateT, ContextT]
678+
):
679+
"""Decorator used to dynamically generate system prompts for the model.
680+
681+
This is a convenience decorator that creates middleware using `modify_model_request`
682+
specifically for dynamic prompt generation. The decorated function should return
683+
a string that will be set as the system prompt for the model request.
684+
685+
Args:
686+
func: The function to be decorated. Must accept:
687+
`request: ModelRequest, state: StateT, runtime: Runtime[ContextT]` -
688+
Model request, state, and runtime context
689+
690+
Returns:
691+
Either an AgentMiddleware instance (if func is provided) or a decorator function
692+
that can be applied to a function.
693+
694+
The decorated function should return:
695+
- `str` - The system prompt to use for the model request
696+
697+
Examples:
698+
Basic usage with dynamic content:
699+
```python
700+
@dynamic_prompt
701+
def my_prompt(request: ModelRequest, state: AgentState, runtime: Runtime) -> str:
702+
user_name = runtime.context.get("user_name", "User")
703+
return f"You are a helpful assistant helping {user_name}."
704+
```
705+
706+
Using state to customize the prompt:
707+
```python
708+
@dynamic_prompt
709+
def context_aware_prompt(request: ModelRequest, state: AgentState, runtime: Runtime) -> str:
710+
msg_count = len(state["messages"])
711+
if msg_count > 10:
712+
return "You are in a long conversation. Be concise."
713+
return "You are a helpful assistant."
714+
```
715+
716+
Using with agent:
717+
```python
718+
agent = create_agent(model, middleware=[my_prompt])
719+
```
720+
"""
721+
722+
def decorator(
723+
func: _CallableReturningPromptString[StateT, ContextT],
724+
) -> AgentMiddleware[StateT, ContextT]:
725+
is_async = iscoroutinefunction(func)
726+
727+
if is_async:
728+
729+
async def async_wrapped(
730+
self: AgentMiddleware[StateT, ContextT], # noqa: ARG001
731+
request: ModelRequest,
732+
state: StateT,
733+
runtime: Runtime[ContextT],
734+
) -> ModelRequest:
735+
prompt = await func(request, state, runtime) # type: ignore[misc]
736+
request.system_prompt = prompt
737+
return request
738+
739+
middleware_name = cast("str", getattr(func, "__name__", "DynamicPromptMiddleware"))
740+
741+
return type(
742+
middleware_name,
743+
(AgentMiddleware,),
744+
{
745+
"state_schema": AgentState,
746+
"tools": [],
747+
"amodify_model_request": async_wrapped,
748+
},
749+
)()
750+
751+
def wrapped(
752+
self: AgentMiddleware[StateT, ContextT], # noqa: ARG001
753+
request: ModelRequest,
754+
state: StateT,
755+
runtime: Runtime[ContextT],
756+
) -> ModelRequest:
757+
prompt = cast("str", func(request, state, runtime))
758+
request.system_prompt = prompt
759+
return request
760+
761+
middleware_name = cast("str", getattr(func, "__name__", "DynamicPromptMiddleware"))
762+
763+
return type(
764+
middleware_name,
765+
(AgentMiddleware,),
766+
{
767+
"state_schema": AgentState,
768+
"tools": [],
769+
"modify_model_request": wrapped,
770+
},
771+
)()
772+
773+
if func is not None:
774+
return decorator(func)
775+
return decorator

libs/langchain_v1/tests/unit_tests/agents/test_middleware_decorators.py

Lines changed: 143 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
ModelRequest,
1717
before_model,
1818
after_model,
19+
dynamic_prompt,
1920
modify_model_request,
2021
hook_config,
2122
)
@@ -572,3 +573,145 @@ async def async_after_with_jumps(state: AgentState, runtime: Runtime) -> dict[st
572573
)
573574

574575
assert agent_mixed.compile().get_graph().draw_mermaid() == snapshot
576+
577+
578+
def test_dynamic_prompt_decorator() -> None:
579+
"""Test dynamic_prompt decorator with basic usage."""
580+
581+
@dynamic_prompt
582+
def my_prompt(request: ModelRequest, state: AgentState, runtime: Runtime) -> str:
583+
return "Dynamic test prompt"
584+
585+
assert isinstance(my_prompt, AgentMiddleware)
586+
assert my_prompt.state_schema == AgentState
587+
assert my_prompt.tools == []
588+
assert my_prompt.__class__.__name__ == "my_prompt"
589+
590+
# Verify it modifies the request correctly
591+
original_request = ModelRequest(
592+
model="test-model",
593+
system_prompt="Original",
594+
messages=[HumanMessage("Hello")],
595+
tool_choice=None,
596+
tools=[],
597+
response_format=None,
598+
)
599+
result = my_prompt.modify_model_request(
600+
original_request, {"messages": [HumanMessage("Hello")]}, None
601+
)
602+
assert result.system_prompt == "Dynamic test prompt"
603+
604+
605+
def test_dynamic_prompt_uses_state() -> None:
606+
"""Test that dynamic_prompt can use state information."""
607+
608+
@dynamic_prompt
609+
def custom_prompt(request: ModelRequest, state: AgentState, runtime: Runtime) -> str:
610+
msg_count = len(state["messages"])
611+
return f"Prompt with {msg_count} messages"
612+
613+
# Verify it uses state correctly
614+
original_request = ModelRequest(
615+
model="test-model",
616+
system_prompt="Original",
617+
messages=[HumanMessage("Hello")],
618+
tool_choice=None,
619+
tools=[],
620+
response_format=None,
621+
)
622+
result = custom_prompt.modify_model_request(
623+
original_request, {"messages": [HumanMessage("Hello"), HumanMessage("World")]}, None
624+
)
625+
assert result.system_prompt == "Prompt with 2 messages"
626+
627+
628+
def test_dynamic_prompt_integration() -> None:
629+
"""Test dynamic_prompt decorator in a full agent."""
630+
631+
prompt_calls = 0
632+
633+
@dynamic_prompt
634+
def context_aware_prompt(request: ModelRequest, state: AgentState, runtime: Runtime) -> str:
635+
nonlocal prompt_calls
636+
prompt_calls += 1
637+
return f"you are a helpful assistant."
638+
639+
agent = create_agent(model=FakeToolCallingModel(), middleware=[context_aware_prompt])
640+
agent = agent.compile()
641+
642+
result = agent.invoke({"messages": [HumanMessage("Hello")]})
643+
644+
assert prompt_calls == 1
645+
assert result["messages"][-1].content == "you are a helpful assistant.-Hello"
646+
647+
648+
async def test_async_dynamic_prompt_decorator() -> None:
649+
"""Test dynamic_prompt decorator with async function."""
650+
651+
@dynamic_prompt
652+
async def async_prompt(request: ModelRequest, state: AgentState, runtime: Runtime) -> str:
653+
return "Async dynamic prompt"
654+
655+
assert isinstance(async_prompt, AgentMiddleware)
656+
assert async_prompt.state_schema == AgentState
657+
assert async_prompt.tools == []
658+
assert async_prompt.__class__.__name__ == "async_prompt"
659+
660+
661+
async def test_async_dynamic_prompt_integration() -> None:
662+
"""Test async dynamic_prompt decorator in a full agent."""
663+
664+
prompt_calls = 0
665+
666+
@dynamic_prompt
667+
async def async_context_prompt(
668+
request: ModelRequest, state: AgentState, runtime: Runtime
669+
) -> str:
670+
nonlocal prompt_calls
671+
prompt_calls += 1
672+
return f"Async assistant."
673+
674+
agent = create_agent(model=FakeToolCallingModel(), middleware=[async_context_prompt])
675+
agent = agent.compile()
676+
677+
result = await agent.ainvoke({"messages": [HumanMessage("Hello")]})
678+
assert prompt_calls == 1
679+
assert result["messages"][-1].content == "Async assistant.-Hello"
680+
681+
682+
def test_dynamic_prompt_overwrites_system_prompt() -> None:
683+
"""Test that dynamic_prompt overwrites the original system_prompt."""
684+
685+
@dynamic_prompt
686+
def override_prompt(request: ModelRequest, state: AgentState, runtime: Runtime) -> str:
687+
return "Overridden prompt."
688+
689+
agent = create_agent(
690+
model=FakeToolCallingModel(),
691+
system_prompt="Original static prompt",
692+
middleware=[override_prompt],
693+
)
694+
agent = agent.compile()
695+
696+
result = agent.invoke({"messages": [HumanMessage("Hello")]})
697+
assert result["messages"][-1].content == "Overridden prompt.-Hello"
698+
699+
700+
def test_dynamic_prompt_multiple_in_sequence() -> None:
701+
"""Test multiple dynamic_prompt decorators in sequence (last wins)."""
702+
703+
@dynamic_prompt
704+
def first_prompt(request: ModelRequest, state: AgentState, runtime: Runtime) -> str:
705+
return "First prompt."
706+
707+
@dynamic_prompt
708+
def second_prompt(request: ModelRequest, state: AgentState, runtime: Runtime) -> str:
709+
return "Second prompt."
710+
711+
# When used together, the last middleware in the list should win
712+
# since they're both modify_model_request hooks executed in sequence
713+
agent = create_agent(model=FakeToolCallingModel(), middleware=[first_prompt, second_prompt])
714+
agent = agent.compile()
715+
716+
result = agent.invoke({"messages": [HumanMessage("Hello")]})
717+
assert result["messages"][-1].content == "Second prompt.-Hello"

0 commit comments

Comments
 (0)