Skip to content

Commit 41336ac

Browse files
authored
Add unique run_id to run, run result, and message (request, response) classes (pydantic#3366)
1 parent fd8387c commit 41336ac

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+1931
-560
lines changed

docs/agents.md

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -320,7 +320,8 @@ async def main():
320320
content='What is the capital of France?',
321321
timestamp=datetime.datetime(...),
322322
)
323-
]
323+
],
324+
run_id='...',
324325
)
325326
),
326327
CallToolsNode(
@@ -329,6 +330,7 @@ async def main():
329330
usage=RequestUsage(input_tokens=56, output_tokens=7),
330331
model_name='gpt-5',
331332
timestamp=datetime.datetime(...),
333+
run_id='...',
332334
)
333335
),
334336
End(data=FinalResult(output='The capital of France is Paris.')),
@@ -382,7 +384,8 @@ async def main():
382384
content='What is the capital of France?',
383385
timestamp=datetime.datetime(...),
384386
)
385-
]
387+
],
388+
run_id='...',
386389
)
387390
),
388391
CallToolsNode(
@@ -391,6 +394,7 @@ async def main():
391394
usage=RequestUsage(input_tokens=56, output_tokens=7),
392395
model_name='gpt-5',
393396
timestamp=datetime.datetime(...),
397+
run_id='...',
394398
)
395399
),
396400
End(data=FinalResult(output='The capital of France is Paris.')),
@@ -1044,7 +1048,8 @@ with capture_run_messages() as messages: # (2)!
10441048
content='Please get me the volume of a box with size 6.',
10451049
timestamp=datetime.datetime(...),
10461050
)
1047-
]
1051+
],
1052+
run_id='...',
10481053
),
10491054
ModelResponse(
10501055
parts=[
@@ -1057,6 +1062,7 @@ with capture_run_messages() as messages: # (2)!
10571062
usage=RequestUsage(input_tokens=62, output_tokens=4),
10581063
model_name='gpt-5',
10591064
timestamp=datetime.datetime(...),
1065+
run_id='...',
10601066
),
10611067
ModelRequest(
10621068
parts=[
@@ -1066,7 +1072,8 @@ with capture_run_messages() as messages: # (2)!
10661072
tool_call_id='pyd_ai_tool_call_id',
10671073
timestamp=datetime.datetime(...),
10681074
)
1069-
]
1075+
],
1076+
run_id='...',
10701077
),
10711078
ModelResponse(
10721079
parts=[
@@ -1079,6 +1086,7 @@ with capture_run_messages() as messages: # (2)!
10791086
usage=RequestUsage(input_tokens=72, output_tokens=8),
10801087
model_name='gpt-5',
10811088
timestamp=datetime.datetime(...),
1089+
run_id='...',
10821090
),
10831091
]
10841092
"""

docs/api/models/function.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,8 @@ async def model_function(
2929
content='Testing my agent...',
3030
timestamp=datetime.datetime(...),
3131
)
32-
]
32+
],
33+
run_id='...',
3334
)
3435
]
3536
"""

docs/deferred-tools.md

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,8 @@ print(result.all_messages())
106106
content='Delete `__init__.py`, write `Hello, world!` to `README.md`, and clear `.env`',
107107
timestamp=datetime.datetime(...),
108108
)
109-
]
109+
],
110+
run_id='...',
110111
),
111112
ModelResponse(
112113
parts=[
@@ -129,6 +130,7 @@ print(result.all_messages())
129130
usage=RequestUsage(input_tokens=63, output_tokens=21),
130131
model_name='gpt-5',
131132
timestamp=datetime.datetime(...),
133+
run_id='...',
132134
),
133135
ModelRequest(
134136
parts=[
@@ -138,7 +140,8 @@ print(result.all_messages())
138140
tool_call_id='update_file_readme',
139141
timestamp=datetime.datetime(...),
140142
)
141-
]
143+
],
144+
run_id='...',
142145
),
143146
ModelRequest(
144147
parts=[
@@ -154,7 +157,8 @@ print(result.all_messages())
154157
tool_call_id='delete_file',
155158
timestamp=datetime.datetime(...),
156159
),
157-
]
160+
],
161+
run_id='...',
158162
),
159163
ModelResponse(
160164
parts=[
@@ -165,6 +169,7 @@ print(result.all_messages())
165169
usage=RequestUsage(input_tokens=79, output_tokens=39),
166170
model_name='gpt-5',
167171
timestamp=datetime.datetime(...),
172+
run_id='...',
168173
),
169174
]
170175
"""
@@ -275,7 +280,8 @@ async def main():
275280
content='Calculate the answer to the ultimate question of life, the universe, and everything',
276281
timestamp=datetime.datetime(...),
277282
)
278-
]
283+
],
284+
run_id='...',
279285
),
280286
ModelResponse(
281287
parts=[
@@ -290,6 +296,7 @@ async def main():
290296
usage=RequestUsage(input_tokens=63, output_tokens=13),
291297
model_name='gpt-5',
292298
timestamp=datetime.datetime(...),
299+
run_id='...',
293300
),
294301
ModelRequest(
295302
parts=[
@@ -299,7 +306,8 @@ async def main():
299306
tool_call_id='pyd_ai_tool_call_id',
300307
timestamp=datetime.datetime(...),
301308
)
302-
]
309+
],
310+
run_id='...',
303311
),
304312
ModelResponse(
305313
parts=[
@@ -310,6 +318,7 @@ async def main():
310318
usage=RequestUsage(input_tokens=64, output_tokens=28),
311319
model_name='gpt-5',
312320
timestamp=datetime.datetime(...),
321+
run_id='...',
313322
),
314323
]
315324
"""

docs/durable_execution/temporal.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ As workflows and activities run in separate processes, any values passed between
172172

173173
To account for these limitations, tool functions and the [event stream handler](#streaming) running inside activities receive a limited version of the agent's [`RunContext`][pydantic_ai.tools.RunContext], and it's your responsibility to make sure that the [dependencies](../dependencies.md) object provided to [`TemporalAgent.run()`][pydantic_ai.durable_exec.temporal.TemporalAgent.run] can be serialized using Pydantic.
174174

175-
Specifically, only the `deps`, `retries`, `tool_call_id`, `tool_name`, `tool_call_approved`, `retry`, `max_retries`, `run_step` and `partial_output` fields are available by default, and trying to access `model`, `usage`, `prompt`, `messages`, or `tracer` will raise an error.
175+
Specifically, only the `deps`, `run_id`, `retries`, `tool_call_id`, `tool_name`, `tool_call_approved`, `retry`, `max_retries`, `run_step` and `partial_output` fields are available by default, and trying to access `model`, `usage`, `prompt`, `messages`, or `tracer` will raise an error.
176176
If you need one or more of these attributes to be available inside activities, you can create a [`TemporalRunContext`][pydantic_ai.durable_exec.temporal.TemporalRunContext] subclass with custom `serialize_run_context` and `deserialize_run_context` class methods and pass it to [`TemporalAgent`][pydantic_ai.durable_exec.temporal.TemporalAgent] as `run_context_type`.
177177

178178
### Streaming

docs/message-history.md

Lines changed: 20 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,8 @@ print(result.all_messages())
5050
content='Tell me a joke.',
5151
timestamp=datetime.datetime(...),
5252
),
53-
]
53+
],
54+
run_id='...',
5455
),
5556
ModelResponse(
5657
parts=[
@@ -61,6 +62,7 @@ print(result.all_messages())
6162
usage=RequestUsage(input_tokens=60, output_tokens=12),
6263
model_name='gpt-5',
6364
timestamp=datetime.datetime(...),
65+
run_id='...',
6466
),
6567
]
6668
"""
@@ -92,7 +94,8 @@ async def main():
9294
content='Tell me a joke.',
9395
timestamp=datetime.datetime(...),
9496
),
95-
]
97+
],
98+
run_id='...',
9699
)
97100
]
98101
"""
@@ -118,7 +121,8 @@ async def main():
118121
content='Tell me a joke.',
119122
timestamp=datetime.datetime(...),
120123
),
121-
]
124+
],
125+
run_id='...',
122126
),
123127
ModelResponse(
124128
parts=[
@@ -129,6 +133,7 @@ async def main():
129133
usage=RequestUsage(input_tokens=50, output_tokens=12),
130134
model_name='gpt-5',
131135
timestamp=datetime.datetime(...),
136+
run_id='...',
132137
),
133138
]
134139
"""
@@ -172,7 +177,8 @@ print(result2.all_messages())
172177
content='Tell me a joke.',
173178
timestamp=datetime.datetime(...),
174179
),
175-
]
180+
],
181+
run_id='...',
176182
),
177183
ModelResponse(
178184
parts=[
@@ -183,14 +189,16 @@ print(result2.all_messages())
183189
usage=RequestUsage(input_tokens=60, output_tokens=12),
184190
model_name='gpt-5',
185191
timestamp=datetime.datetime(...),
192+
run_id='...',
186193
),
187194
ModelRequest(
188195
parts=[
189196
UserPromptPart(
190197
content='Explain?',
191198
timestamp=datetime.datetime(...),
192199
)
193-
]
200+
],
201+
run_id='...',
194202
),
195203
ModelResponse(
196204
parts=[
@@ -201,6 +209,7 @@ print(result2.all_messages())
201209
usage=RequestUsage(input_tokens=61, output_tokens=26),
202210
model_name='gpt-5',
203211
timestamp=datetime.datetime(...),
212+
run_id='...',
204213
),
205214
]
206215
"""
@@ -293,7 +302,8 @@ print(result2.all_messages())
293302
content='Tell me a joke.',
294303
timestamp=datetime.datetime(...),
295304
),
296-
]
305+
],
306+
run_id='...',
297307
),
298308
ModelResponse(
299309
parts=[
@@ -304,14 +314,16 @@ print(result2.all_messages())
304314
usage=RequestUsage(input_tokens=60, output_tokens=12),
305315
model_name='gpt-5',
306316
timestamp=datetime.datetime(...),
317+
run_id='...',
307318
),
308319
ModelRequest(
309320
parts=[
310321
UserPromptPart(
311322
content='Explain?',
312323
timestamp=datetime.datetime(...),
313324
)
314-
]
325+
],
326+
run_id='...',
315327
),
316328
ModelResponse(
317329
parts=[
@@ -322,6 +334,7 @@ print(result2.all_messages())
322334
usage=RequestUsage(input_tokens=61, output_tokens=26),
323335
model_name='gemini-2.5-pro',
324336
timestamp=datetime.datetime(...),
337+
run_id='...',
325338
),
326339
]
327340
"""

docs/testing.md

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,8 @@ async def test_forecast():
127127
content='What will the weather be like in London on 2024-11-28?',
128128
timestamp=IsNow(tz=timezone.utc), # (7)!
129129
),
130-
]
130+
],
131+
run_id=IsStr(),
131132
),
132133
ModelResponse(
133134
parts=[
@@ -146,6 +147,7 @@ async def test_forecast():
146147
),
147148
model_name='test',
148149
timestamp=IsNow(tz=timezone.utc),
150+
run_id=IsStr(),
149151
),
150152
ModelRequest(
151153
parts=[
@@ -156,6 +158,7 @@ async def test_forecast():
156158
timestamp=IsNow(tz=timezone.utc),
157159
),
158160
],
161+
run_id=IsStr(),
159162
),
160163
ModelResponse(
161164
parts=[
@@ -169,6 +172,7 @@ async def test_forecast():
169172
),
170173
model_name='test',
171174
timestamp=IsNow(tz=timezone.utc),
175+
run_id=IsStr(),
172176
),
173177
]
174178
```

docs/tools.md

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,8 @@ print(dice_result.all_messages())
8787
content='My guess is 4',
8888
timestamp=datetime.datetime(...),
8989
),
90-
]
90+
],
91+
run_id='...',
9192
),
9293
ModelResponse(
9394
parts=[
@@ -98,6 +99,7 @@ print(dice_result.all_messages())
9899
usage=RequestUsage(input_tokens=90, output_tokens=2),
99100
model_name='gemini-2.5-flash',
100101
timestamp=datetime.datetime(...),
102+
run_id='...',
101103
),
102104
ModelRequest(
103105
parts=[
@@ -107,7 +109,8 @@ print(dice_result.all_messages())
107109
tool_call_id='pyd_ai_tool_call_id',
108110
timestamp=datetime.datetime(...),
109111
)
110-
]
112+
],
113+
run_id='...',
111114
),
112115
ModelResponse(
113116
parts=[
@@ -118,6 +121,7 @@ print(dice_result.all_messages())
118121
usage=RequestUsage(input_tokens=91, output_tokens=4),
119122
model_name='gemini-2.5-flash',
120123
timestamp=datetime.datetime(...),
124+
run_id='...',
121125
),
122126
ModelRequest(
123127
parts=[
@@ -127,7 +131,8 @@ print(dice_result.all_messages())
127131
tool_call_id='pyd_ai_tool_call_id',
128132
timestamp=datetime.datetime(...),
129133
)
130-
]
134+
],
135+
run_id='...',
131136
),
132137
ModelResponse(
133138
parts=[
@@ -138,6 +143,7 @@ print(dice_result.all_messages())
138143
usage=RequestUsage(input_tokens=92, output_tokens=12),
139144
model_name='gemini-2.5-flash',
140145
timestamp=datetime.datetime(...),
146+
run_id='...',
141147
),
142148
]
143149
"""

0 commit comments

Comments
 (0)