Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
interactions:
- request:
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '180'
content-type:
- application/json
host:
- api.openai.com
method: POST
parsed_body:
messages:
- content: Where do you want to go today?
role: assistant
- content: Answer in 5 words only. Who is Tux?
role: user
model: gpt-4.1-mini
stream: false
uri: https://api.openai.com/v1/chat/completions
response:
headers:
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
connection:
- keep-alive
content-length:
- '841'
content-type:
- application/json
openai-organization:
- deeplytalented
openai-processing-ms:
- '423'
openai-project:
- proj_1aziXuKoVAC897wPxnvH0q7Z
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
transfer-encoding:
- chunked
parsed_body:
choices:
- finish_reason: stop
index: 0
logprobs: null
message:
annotations: []
content: Linux mascot, a penguin character.
refusal: null
role: assistant
created: 1763805700
id: chatcmpl-Ceeiy4ivEE0hcL1EX5ZfLuW5xNUXB
model: gpt-4.1-mini-2025-04-14
object: chat.completion
service_tier: default
system_fingerprint: fp_9766e549b2
usage:
completion_tokens: 8
completion_tokens_details:
accepted_prediction_tokens: 0
audio_tokens: 0
reasoning_tokens: 0
rejected_prediction_tokens: 0
prompt_tokens: 31
prompt_tokens_details:
audio_tokens: 0
cached_tokens: 0
total_tokens: 39
status:
code: 200
message: OK
version: 1
51 changes: 51 additions & 0 deletions tests/models/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -1215,6 +1215,57 @@ async def get_capital(country: str) -> str:
assert result.output == snapshot('The capital of England is London.')


async def test_message_history_can_start_with_model_response(allow_model_requests: None, openai_api_key: str):
"""Test that an agent run with message_history starting with ModelResponse is executed correctly."""

openai_model = OpenAIChatModel('gpt-4.1-mini', provider=OpenAIProvider(api_key=openai_api_key))

message_history = [ModelResponse(parts=[TextPart('Where do you want to go today?')])]

agent = Agent(model=openai_model)

result = await agent.run('Answer in 5 words only. Who is Tux?', message_history=message_history)

assert result.output == snapshot('Linux mascot, a penguin character.')
assert result.all_messages() == snapshot(
[
ModelResponse(
parts=[TextPart(content='Where do you want to go today?')],
timestamp=IsDatetime(),
),
ModelRequest(
parts=[
UserPromptPart(
content='Answer in 5 words only. Who is Tux?',
timestamp=IsDatetime(),
)
],
run_id=IsStr(),
),
ModelResponse(
parts=[TextPart(content='Linux mascot, a penguin character.')],
usage=RequestUsage(
input_tokens=31,
output_tokens=8,
details={
'accepted_prediction_tokens': 0,
'audio_tokens': 0,
'reasoning_tokens': 0,
'rejected_prediction_tokens': 0,
},
),
model_name='gpt-4.1-mini-2025-04-14',
timestamp=IsDatetime(),
provider_name='openai',
provider_details={'finish_reason': 'stop'},
provider_response_id='chatcmpl-Ceeiy4ivEE0hcL1EX5ZfLuW5xNUXB',
finish_reason='stop',
run_id=IsStr(),
),
]
)


async def test_extra_headers(allow_model_requests: None, openai_api_key: str):
# This test doesn't do anything, it's just here to ensure that calls with `extra_headers` don't cause errors, including type.
m = OpenAIChatModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))
Expand Down
Loading