Skip to content

Commit 2a7cdb5

Browse files
committed
Merge remote-tracking branch 'upstream/main' into pr-tool-output-naming
2 parents 6315c3b + a253fad commit 2a7cdb5

File tree

3 files changed

+55
-12
lines changed

3 files changed

+55
-12
lines changed

pydantic_ai_slim/pydantic_ai/_agent_graph.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -588,7 +588,11 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa
588588
# as the empty response and request will not create any items in the API payload,
589589
# in the hope the model will return a non-empty response this time.
590590
ctx.state.increment_retries(ctx.deps.max_result_retries, model_settings=ctx.deps.model_settings)
591-
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[]))
591+
run_context = build_run_context(ctx)
592+
instructions = await ctx.deps.get_instructions(run_context)
593+
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](
594+
_messages.ModelRequest(parts=[], instructions=instructions)
595+
)
592596
return
593597

594598
text = ''
@@ -652,7 +656,11 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa
652656
ctx.state.increment_retries(
653657
ctx.deps.max_result_retries, error=e, model_settings=ctx.deps.model_settings
654658
)
655-
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[e.tool_retry]))
659+
run_context = build_run_context(ctx)
660+
instructions = await ctx.deps.get_instructions(run_context)
661+
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](
662+
_messages.ModelRequest(parts=[e.tool_retry], instructions=instructions)
663+
)
656664

657665
self._events_iterator = _run_stream()
658666

tests/test_a2a.py

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -560,11 +560,11 @@ def track_messages(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon
560560
task1_id = result1['id']
561561
context_id = result1['context_id']
562562

563-
# Wait for first task to complete
564-
await anyio.sleep(0.1)
565-
task1 = await a2a_client.get_task(task1_id)
566-
assert 'result' in task1
567-
assert task1['result']['status']['state'] == 'completed'
563+
while task1 := await a2a_client.get_task(task1_id): # pragma: no branch
564+
if 'result' in task1 and task1['result']['status']['state'] == 'completed':
565+
result1 = task1['result']
566+
break
567+
await anyio.sleep(0.1)
568568

569569
# Verify the model received at least one message
570570
assert len(messages_received) == 1
@@ -668,11 +668,13 @@ def return_thinking_response(_: list[ModelMessage], info: AgentInfo) -> ModelRes
668668
task_id = result['id']
669669

670670
# Wait for completion
671-
await anyio.sleep(0.1)
672-
task = await a2a_client.get_task(task_id)
671+
while task := await a2a_client.get_task(task_id): # pragma: no branch
672+
if 'result' in task and task['result']['status']['state'] == 'completed':
673+
result = task['result']
674+
break
675+
await anyio.sleep(0.1)
673676

674-
assert 'result' in task
675-
assert task['result'] == snapshot(
677+
assert result == snapshot(
676678
{
677679
'id': IsStr(),
678680
'context_id': IsStr(),

tests/test_agent.py

Lines changed: 34 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1872,7 +1872,14 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse:
18721872
tool_call_id=IsStr(),
18731873
timestamp=IsDatetime(),
18741874
)
1875-
]
1875+
],
1876+
instructions="""\
1877+
Always respond with a JSON object that's compatible with this schema:
1878+
1879+
{"additionalProperties": false, "properties": {"city": {"type": "string"}}, "required": ["city"], "type": "object", "title": "get_weather"}
1880+
1881+
Don't include any text or Markdown fencing before or after.\
1882+
""",
18761883
),
18771884
ModelResponse(
18781885
parts=[TextPart(content='{"city": "Mexico City"}')],
@@ -3888,6 +3895,32 @@ def empty_instructions() -> str:
38883895
)
38893896

38903897

3898+
def test_multi_agent_instructions_with_structured_output():
3899+
"""Test that Agent2 uses its own instructions when called with Agent1's history.
3900+
3901+
Reproduces issue #3207: when running agents sequentially with no user_prompt
3902+
and structured output, Agent2's instructions were ignored.
3903+
"""
3904+
3905+
class Output(BaseModel):
3906+
text: str
3907+
3908+
agent1 = Agent('test', instructions='Agent 1 instructions')
3909+
agent2 = Agent('test', instructions='Agent 2 instructions', output_type=Output)
3910+
3911+
result1 = agent1.run_sync('Hello')
3912+
3913+
# TestModel doesn't support structured output, so this will fail with retries
3914+
# But we can still verify that Agent2's instructions are used in retry requests
3915+
with capture_run_messages() as messages:
3916+
with pytest.raises(UnexpectedModelBehavior):
3917+
agent2.run_sync(message_history=result1.new_messages())
3918+
3919+
# Verify Agent2's retry requests used Agent2's instructions (not Agent1's)
3920+
requests = [m for m in messages if isinstance(m, ModelRequest)]
3921+
assert any(r.instructions == 'Agent 2 instructions' for r in requests)
3922+
3923+
38913924
def test_empty_final_response():
38923925
def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse:
38933926
if len(messages) == 1:

0 commit comments

Comments
 (0)