File tree Expand file tree Collapse file tree 3 files changed +32
-13
lines changed
pydantic_ai_slim/pydantic_ai
tests/models/cassettes/test_openai_responses Expand file tree Collapse file tree 3 files changed +32
-13
lines changed Original file line number Diff line number Diff line change @@ -980,10 +980,15 @@ async def _responses_create(
980980 text = text or {}
981981 text ['verbosity' ] = verbosity
982982
983- unsupported_model_settings = OpenAIModelProfile .from_profile (self .profile ).openai_unsupported_model_settings
983+ profile = OpenAIModelProfile .from_profile (self .profile )
984+ unsupported_model_settings = profile .openai_unsupported_model_settings
984985 for setting in unsupported_model_settings :
985986 model_settings .pop (setting , None )
986987
988+ include : list [responses .ResponseIncludable ] | None = None
989+ if profile .openai_supports_encrypted_reasoning_content :
990+ include = ['reasoning.encrypted_content' ]
991+
987992 try :
988993 extra_headers = model_settings .get ('extra_headers' , {})
989994 extra_headers .setdefault ('User-Agent' , get_user_agent ())
@@ -1004,7 +1009,7 @@ async def _responses_create(
10041009 reasoning = reasoning ,
10051010 user = model_settings .get ('openai_user' , NOT_GIVEN ),
10061011 text = text or NOT_GIVEN ,
1007- include = [ 'reasoning.encrypted_content' ] ,
1012+ include = include or NOT_GIVEN ,
10081013 extra_headers = extra_headers ,
10091014 extra_body = model_settings .get ('extra_body' ),
10101015 )
Original file line number Diff line number Diff line change @@ -41,6 +41,9 @@ class OpenAIModelProfile(ModelProfile):
4141 openai_chat_supports_web_search : bool = False
4242 """Whether the model supports web search in Chat Completions API."""
4343
44+ openai_supports_encrypted_reasoning_content : bool = False
45+ """Whether the model supports including encrypted reasoning content in the response."""
46+
4447 def __post_init__ (self ): # pragma: no cover
4548 if not self .openai_supports_sampling_settings :
4649 warnings .warn (
@@ -84,6 +87,7 @@ def openai_model_profile(model_name: str) -> ModelProfile:
8487 openai_unsupported_model_settings = openai_unsupported_model_settings ,
8588 openai_system_prompt_role = openai_system_prompt_role ,
8689 openai_chat_supports_web_search = supports_web_search ,
90+ openai_supports_encrypted_reasoning_content = is_reasoning_model ,
8791 )
8892
8993
Original file line number Diff line number Diff line change @@ -8,7 +8,7 @@ interactions:
88 connection :
99 - keep-alive
1010 content-length :
11- - ' 105 '
11+ - ' 102 '
1212 content-type :
1313 - application/json
1414 host :
@@ -18,8 +18,8 @@ interactions:
1818 input :
1919 - content : What is the capital of France?
2020 role : user
21- instructions : ' '
2221 model : gpt-4o
22+ stream : false
2323 uri : https://api.openai.com/v1/responses
2424 response :
2525 headers :
@@ -28,61 +28,71 @@ interactions:
2828 connection :
2929 - keep-alive
3030 content-length :
31- - ' 1183 '
31+ - ' 1385 '
3232 content-type :
3333 - application/json
3434 openai-organization :
3535 - pydantic-28gund
3636 openai-processing-ms :
37- - ' 323'
37+ - ' 1304'
38+ openai-project :
39+ - proj_dKobscVY9YJxeEaDJen54e3d
3840 openai-version :
3941 - ' 2020-10-01'
4042 strict-transport-security :
4143 - max-age=31536000; includeSubDomains; preload
4244 transfer-encoding :
4345 - chunked
4446 parsed_body :
45- created_at : 1743075630
47+ background : false
48+ created_at : 1757604033
4649 error : null
47- id : resp_67e5392e0fa48191b4c2e1ec82b33e7a006bc99764149f64
50+ id : resp_68c2e8c147ac819491bcd667055eadbc02e845978fbbb592
4851 incomplete_details : null
49- instructions : ' '
52+ instructions : null
5053 max_output_tokens : null
54+ max_tool_calls : null
5155 metadata : {}
5256 model : gpt-4o-2024-08-06
5357 object : response
5458 output :
5559 - content :
5660 - annotations : []
61+ logprobs : []
5762 text : The capital of France is Paris.
5863 type : output_text
59- id : msg_67e5392e466c8191ad4ef0744bded009006bc99764149f64
64+ id : msg_68c2e8c26114819489dae5b59fceb7bf02e845978fbbb592
6065 role : assistant
6166 status : completed
6267 type : message
6368 parallel_tool_calls : true
6469 previous_response_id : null
70+ prompt_cache_key : null
6571 reasoning :
6672 effort : null
67- generate_summary : null
73+ summary : null
74+ safety_identifier : null
75+ service_tier : default
6876 status : completed
6977 store : true
7078 temperature : 1.0
7179 text :
7280 format :
7381 type : text
82+ verbosity : medium
7483 tool_choice : auto
7584 tools : []
85+ top_logprobs : 0
7686 top_p : 1.0
7787 truncation : disabled
7888 usage :
79- input_tokens : 32
89+ input_tokens : 14
8090 input_tokens_details :
8191 cached_tokens : 0
8292 output_tokens : 8
8393 output_tokens_details :
8494 reasoning_tokens : 0
85- total_tokens : 40
95+ total_tokens : 22
8696 user : null
8797 status :
8898 code : 200
You can’t perform that action at this time.
0 commit comments