Skip to content

Commit b5c0910

Browse files
committed
Merge remote-tracking branch 'origin/main' into fix-split-thinking-tags-v2
2 parents 0f876de + a253fad commit b5c0910

File tree

3 files changed

+55
-12
lines changed

3 files changed

+55
-12
lines changed

pydantic_ai_slim/pydantic_ai/_agent_graph.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -588,7 +588,11 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa
588588
# as the empty response and request will not create any items in the API payload,
589589
# in the hope the model will return a non-empty response this time.
590590
ctx.state.increment_retries(ctx.deps.max_result_retries, model_settings=ctx.deps.model_settings)
591-
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[]))
591+
run_context = build_run_context(ctx)
592+
instructions = await ctx.deps.get_instructions(run_context)
593+
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](
594+
_messages.ModelRequest(parts=[], instructions=instructions)
595+
)
592596
return
593597

594598
text = ''
@@ -652,7 +656,11 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa
652656
ctx.state.increment_retries(
653657
ctx.deps.max_result_retries, error=e, model_settings=ctx.deps.model_settings
654658
)
655-
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[e.tool_retry]))
659+
run_context = build_run_context(ctx)
660+
instructions = await ctx.deps.get_instructions(run_context)
661+
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](
662+
_messages.ModelRequest(parts=[e.tool_retry], instructions=instructions)
663+
)
656664

657665
self._events_iterator = _run_stream()
658666

tests/test_a2a.py

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -560,11 +560,11 @@ def track_messages(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon
560560
task1_id = result1['id']
561561
context_id = result1['context_id']
562562

563-
# Wait for first task to complete
564-
await anyio.sleep(0.1)
565-
task1 = await a2a_client.get_task(task1_id)
566-
assert 'result' in task1
567-
assert task1['result']['status']['state'] == 'completed'
563+
while task1 := await a2a_client.get_task(task1_id): # pragma: no branch
564+
if 'result' in task1 and task1['result']['status']['state'] == 'completed':
565+
result1 = task1['result']
566+
break
567+
await anyio.sleep(0.1)
568568

569569
# Verify the model received at least one message
570570
assert len(messages_received) == 1
@@ -668,11 +668,13 @@ def return_thinking_response(_: list[ModelMessage], info: AgentInfo) -> ModelRes
668668
task_id = result['id']
669669

670670
# Wait for completion
671-
await anyio.sleep(0.1)
672-
task = await a2a_client.get_task(task_id)
671+
while task := await a2a_client.get_task(task_id): # pragma: no branch
672+
if 'result' in task and task['result']['status']['state'] == 'completed':
673+
result = task['result']
674+
break
675+
await anyio.sleep(0.1)
673676

674-
assert 'result' in task
675-
assert task['result'] == snapshot(
677+
assert result == snapshot(
676678
{
677679
'id': IsStr(),
678680
'context_id': IsStr(),

tests/test_agent.py

Lines changed: 34 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1836,7 +1836,14 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse:
18361836
tool_call_id=IsStr(),
18371837
timestamp=IsDatetime(),
18381838
)
1839-
]
1839+
],
1840+
instructions="""\
1841+
Always respond with a JSON object that's compatible with this schema:
1842+
1843+
{"additionalProperties": false, "properties": {"city": {"type": "string"}}, "required": ["city"], "type": "object", "title": "get_weather"}
1844+
1845+
Don't include any text or Markdown fencing before or after.\
1846+
""",
18401847
),
18411848
ModelResponse(
18421849
parts=[TextPart(content='{"city": "Mexico City"}')],
@@ -3852,6 +3859,32 @@ def empty_instructions() -> str:
38523859
)
38533860

38543861

3862+
def test_multi_agent_instructions_with_structured_output():
3863+
"""Test that Agent2 uses its own instructions when called with Agent1's history.
3864+
3865+
Reproduces issue #3207: when running agents sequentially with no user_prompt
3866+
and structured output, Agent2's instructions were ignored.
3867+
"""
3868+
3869+
class Output(BaseModel):
3870+
text: str
3871+
3872+
agent1 = Agent('test', instructions='Agent 1 instructions')
3873+
agent2 = Agent('test', instructions='Agent 2 instructions', output_type=Output)
3874+
3875+
result1 = agent1.run_sync('Hello')
3876+
3877+
# TestModel doesn't support structured output, so this will fail with retries
3878+
# But we can still verify that Agent2's instructions are used in retry requests
3879+
with capture_run_messages() as messages:
3880+
with pytest.raises(UnexpectedModelBehavior):
3881+
agent2.run_sync(message_history=result1.new_messages())
3882+
3883+
# Verify Agent2's retry requests used Agent2's instructions (not Agent1's)
3884+
requests = [m for m in messages if isinstance(m, ModelRequest)]
3885+
assert any(r.instructions == 'Agent 2 instructions' for r in requests)
3886+
3887+
38553888
def test_empty_final_response():
38563889
def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse:
38573890
if len(messages) == 1:

0 commit comments

Comments
 (0)