Skip to content

Commit a7dd57a

Browse files
committed
Merge branch 'main' of https://github.com/pydantic/pydantic-ai into adding_run_id_to_modelmessage
2 parents 2424017 + 86b645f commit a7dd57a

File tree

4 files changed

+83
-2
lines changed

4 files changed

+83
-2
lines changed

docs/models/overview.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,12 @@ You can use [`FallbackModel`][pydantic_ai.models.fallback.FallbackModel] to atte
8686
in sequence until one successfully returns a result. Under the hood, Pydantic AI automatically switches
8787
from one model to the next if the current model returns a 4xx or 5xx status code.
8888

89+
!!! note
90+
91+
The provider SDKs on which Models are based (like OpenAI, Anthropic, etc.) often have built-in retry logic that can delay the `FallbackModel` from activating.
92+
93+
When using `FallbackModel`, it's recommended to disable provider SDK retries to ensure immediate fallback, for example by setting `max_retries=0` on a [custom OpenAI client](openai.md#custom-openai-client).
94+
8995
In the following example, the agent first makes a request to the OpenAI model (which fails due to an invalid API key),
9096
and then falls back to the Anthropic model.
9197

pydantic_ai_slim/pydantic_ai/models/google.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -677,7 +677,7 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
677677
provider_name=self.provider_name,
678678
)
679679

680-
if part.text is not None:
680+
if part.text:
681681
if part.thought:
682682
yield self._parts_manager.handle_thinking_delta(vendor_part_id='thinking', content=part.text)
683683
else:
@@ -822,7 +822,7 @@ def _process_response_from_parts(
822822
elif part.code_execution_result is not None:
823823
assert code_execution_tool_call_id is not None
824824
item = _map_code_execution_result(part.code_execution_result, provider_name, code_execution_tool_call_id)
825-
elif part.text is not None:
825+
elif part.text:
826826
if part.thought:
827827
item = ThinkingPart(content=part.text)
828828
else:

pydantic_ai_slim/pydantic_ai/run.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,36 @@ def result(self) -> AgentRunResult[OutputDataT] | None:
135135
self._traceparent(required=False),
136136
)
137137

138+
def all_messages(self) -> list[_messages.ModelMessage]:
139+
"""Return all messages for the run so far.
140+
141+
Messages from older runs are included.
142+
"""
143+
return self.ctx.state.message_history
144+
145+
def all_messages_json(self, *, output_tool_return_content: str | None = None) -> bytes:
146+
"""Return all messages from [`all_messages`][pydantic_ai.agent.AgentRun.all_messages] as JSON bytes.
147+
148+
Returns:
149+
JSON bytes representing the messages.
150+
"""
151+
return _messages.ModelMessagesTypeAdapter.dump_json(self.all_messages())
152+
153+
def new_messages(self) -> list[_messages.ModelMessage]:
154+
"""Return new messages for the run so far.
155+
156+
Messages from older runs are excluded.
157+
"""
158+
return self.all_messages()[self.ctx.deps.new_message_index :]
159+
160+
def new_messages_json(self) -> bytes:
161+
"""Return new messages from [`new_messages`][pydantic_ai.agent.AgentRun.new_messages] as JSON bytes.
162+
163+
Returns:
164+
JSON bytes representing the new messages.
165+
"""
166+
return _messages.ModelMessagesTypeAdapter.dump_json(self.new_messages())
167+
138168
def __aiter__(
139169
self,
140170
) -> AsyncIterator[_agent_graph.AgentNode[AgentDepsT, OutputDataT] | End[FinalResult[OutputDataT]]]:

tests/test_agent.py

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6078,3 +6078,48 @@ def delete_file() -> None:
60786078
assert result.output == snapshot(
60796079
DeferredToolRequests(approvals=[ToolCallPart(tool_name='delete_file', tool_call_id=IsStr())])
60806080
)
6081+
6082+
6083+
async def test_message_history():
6084+
def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse:
6085+
return ModelResponse(parts=[TextPart('ok here is text')])
6086+
6087+
agent = Agent(FunctionModel(llm))
6088+
6089+
async with agent.iter(
6090+
message_history=[
6091+
ModelRequest(parts=[UserPromptPart(content='Hello')]),
6092+
],
6093+
) as run:
6094+
async for _ in run:
6095+
pass
6096+
assert run.new_messages() == snapshot(
6097+
[
6098+
ModelResponse(
6099+
parts=[TextPart(content='ok here is text')],
6100+
usage=RequestUsage(input_tokens=51, output_tokens=4),
6101+
model_name='function:llm:',
6102+
timestamp=IsDatetime(),
6103+
),
6104+
]
6105+
)
6106+
assert run.new_messages_json().startswith(b'[{"parts":[{"content":"ok here is text",')
6107+
assert run.all_messages() == snapshot(
6108+
[
6109+
ModelRequest(
6110+
parts=[
6111+
UserPromptPart(
6112+
content='Hello',
6113+
timestamp=IsDatetime(),
6114+
)
6115+
]
6116+
),
6117+
ModelResponse(
6118+
parts=[TextPart(content='ok here is text')],
6119+
usage=RequestUsage(input_tokens=51, output_tokens=4),
6120+
model_name='function:llm:',
6121+
timestamp=IsDatetime(),
6122+
),
6123+
]
6124+
)
6125+
assert run.all_messages_json().startswith(b'[{"parts":[{"content":"Hello",')

0 commit comments

Comments
 (0)