|
1 | 1 | import sys |
| 2 | +from datetime import datetime, timezone |
2 | 3 |
|
3 | 4 | import pytest |
| 5 | +from inline_snapshot import snapshot |
| 6 | + |
| 7 | +from pydantic_ai.messages import ( |
| 8 | + AudioUrl, |
| 9 | + BinaryContent, |
| 10 | + DocumentUrl, |
| 11 | + ImageUrl, |
| 12 | + ModelMessagesTypeAdapter, |
| 13 | + ModelRequest, |
| 14 | + ModelResponse, |
| 15 | + RequestUsage, |
| 16 | + TextPart, |
| 17 | + ThinkingPartDelta, |
| 18 | + UserPromptPart, |
| 19 | + VideoUrl, |
| 20 | +) |
4 | 21 |
|
5 | | -from pydantic_ai.messages import AudioUrl, BinaryContent, DocumentUrl, ImageUrl, ThinkingPartDelta, VideoUrl |
| 22 | +from .conftest import IsNow |
6 | 23 |
|
7 | 24 |
|
8 | 25 | def test_image_url(): |
@@ -325,3 +342,63 @@ def test_thinking_part_delta_apply_to_thinking_part_delta(): |
325 | 342 | result = content_delta.apply(original_delta) |
326 | 343 | assert isinstance(result, ThinkingPartDelta) |
327 | 344 | assert result.content_delta == 'new_content' |
| 345 | + |
| 346 | + |
| 347 | +def test_pre_usage_refactor_messages_deserializable(): |
| 348 | + # https://github.com/pydantic/pydantic-ai/pull/2378 changed the `ModelResponse` fields, |
| 349 | + # but we as tell people to store those in the DB we want to be very careful not to break deserialization. |
| 350 | + data = [ |
| 351 | + { |
| 352 | + 'parts': [ |
| 353 | + { |
| 354 | + 'content': 'What is the capital of Mexico?', |
| 355 | + 'timestamp': datetime.now(tz=timezone.utc), |
| 356 | + 'part_kind': 'user-prompt', |
| 357 | + } |
| 358 | + ], |
| 359 | + 'instructions': None, |
| 360 | + 'kind': 'request', |
| 361 | + }, |
| 362 | + { |
| 363 | + 'parts': [{'content': 'Mexico City.', 'part_kind': 'text'}], |
| 364 | + 'usage': { |
| 365 | + 'requests': 1, |
| 366 | + 'request_tokens': 13, |
| 367 | + 'response_tokens': 76, |
| 368 | + 'total_tokens': 89, |
| 369 | + 'details': None, |
| 370 | + }, |
| 371 | + 'model_name': 'gpt-5-2025-08-07', |
| 372 | + 'timestamp': datetime.now(tz=timezone.utc), |
| 373 | + 'kind': 'response', |
| 374 | + 'vendor_details': { |
| 375 | + 'finish_reason': 'STOP', |
| 376 | + }, |
| 377 | + 'vendor_id': 'chatcmpl-CBpEXeCfDAW4HRcKQwbqsRDn7u7C5', |
| 378 | + }, |
| 379 | + ] |
| 380 | + messages = ModelMessagesTypeAdapter.validate_python(data) |
| 381 | + assert messages == snapshot( |
| 382 | + [ |
| 383 | + ModelRequest( |
| 384 | + parts=[ |
| 385 | + UserPromptPart( |
| 386 | + content='What is the capital of Mexico?', |
| 387 | + timestamp=IsNow(tz=timezone.utc), |
| 388 | + ) |
| 389 | + ] |
| 390 | + ), |
| 391 | + ModelResponse( |
| 392 | + parts=[TextPart(content='Mexico City.')], |
| 393 | + usage=RequestUsage( |
| 394 | + input_tokens=13, |
| 395 | + output_tokens=76, |
| 396 | + details={}, |
| 397 | + ), |
| 398 | + model_name='gpt-5-2025-08-07', |
| 399 | + timestamp=IsNow(tz=timezone.utc), |
| 400 | + provider_details={'finish_reason': 'STOP'}, |
| 401 | + provider_response_id='chatcmpl-CBpEXeCfDAW4HRcKQwbqsRDn7u7C5', |
| 402 | + ), |
| 403 | + ] |
| 404 | + ) |
0 commit comments