File tree Expand file tree Collapse file tree 3 files changed +32
-13
lines changed
pydantic_ai_slim/pydantic_ai
tests/models/cassettes/test_openai_responses Expand file tree Collapse file tree 3 files changed +32
-13
lines changed Original file line number Diff line number Diff line change @@ -980,10 +980,15 @@ async def _responses_create(
980
980
text = text or {}
981
981
text ['verbosity' ] = verbosity
982
982
983
- unsupported_model_settings = OpenAIModelProfile .from_profile (self .profile ).openai_unsupported_model_settings
983
+ profile = OpenAIModelProfile .from_profile (self .profile )
984
+ unsupported_model_settings = profile .openai_unsupported_model_settings
984
985
for setting in unsupported_model_settings :
985
986
model_settings .pop (setting , None )
986
987
988
+ include : list [responses .ResponseIncludable ] | None = None
989
+ if profile .openai_supports_encrypted_reasoning_content :
990
+ include = ['reasoning.encrypted_content' ]
991
+
987
992
try :
988
993
extra_headers = model_settings .get ('extra_headers' , {})
989
994
extra_headers .setdefault ('User-Agent' , get_user_agent ())
@@ -1004,7 +1009,7 @@ async def _responses_create(
1004
1009
reasoning = reasoning ,
1005
1010
user = model_settings .get ('openai_user' , NOT_GIVEN ),
1006
1011
text = text or NOT_GIVEN ,
1007
- include = [ 'reasoning.encrypted_content' ] ,
1012
+ include = include or NOT_GIVEN ,
1008
1013
extra_headers = extra_headers ,
1009
1014
extra_body = model_settings .get ('extra_body' ),
1010
1015
)
Original file line number Diff line number Diff line change @@ -41,6 +41,9 @@ class OpenAIModelProfile(ModelProfile):
41
41
openai_chat_supports_web_search : bool = False
42
42
"""Whether the model supports web search in Chat Completions API."""
43
43
44
+ openai_supports_encrypted_reasoning_content : bool = False
45
+ """Whether the model supports including encrypted reasoning content in the response."""
46
+
44
47
def __post_init__ (self ): # pragma: no cover
45
48
if not self .openai_supports_sampling_settings :
46
49
warnings .warn (
@@ -84,6 +87,7 @@ def openai_model_profile(model_name: str) -> ModelProfile:
84
87
openai_unsupported_model_settings = openai_unsupported_model_settings ,
85
88
openai_system_prompt_role = openai_system_prompt_role ,
86
89
openai_chat_supports_web_search = supports_web_search ,
90
+ openai_supports_encrypted_reasoning_content = is_reasoning_model ,
87
91
)
88
92
89
93
Original file line number Diff line number Diff line change @@ -8,7 +8,7 @@ interactions:
8
8
connection :
9
9
- keep-alive
10
10
content-length :
11
- - ' 105 '
11
+ - ' 102 '
12
12
content-type :
13
13
- application/json
14
14
host :
@@ -18,8 +18,8 @@ interactions:
18
18
input :
19
19
- content : What is the capital of France?
20
20
role : user
21
- instructions : ' '
22
21
model : gpt-4o
22
+ stream : false
23
23
uri : https://api.openai.com/v1/responses
24
24
response :
25
25
headers :
@@ -28,61 +28,71 @@ interactions:
28
28
connection :
29
29
- keep-alive
30
30
content-length :
31
- - ' 1183 '
31
+ - ' 1385 '
32
32
content-type :
33
33
- application/json
34
34
openai-organization :
35
35
- pydantic-28gund
36
36
openai-processing-ms :
37
- - ' 323'
37
+ - ' 1304'
38
+ openai-project :
39
+ - proj_dKobscVY9YJxeEaDJen54e3d
38
40
openai-version :
39
41
- ' 2020-10-01'
40
42
strict-transport-security :
41
43
- max-age=31536000; includeSubDomains; preload
42
44
transfer-encoding :
43
45
- chunked
44
46
parsed_body :
45
- created_at : 1743075630
47
+ background : false
48
+ created_at : 1757604033
46
49
error : null
47
- id : resp_67e5392e0fa48191b4c2e1ec82b33e7a006bc99764149f64
50
+ id : resp_68c2e8c147ac819491bcd667055eadbc02e845978fbbb592
48
51
incomplete_details : null
49
- instructions : ' '
52
+ instructions : null
50
53
max_output_tokens : null
54
+ max_tool_calls : null
51
55
metadata : {}
52
56
model : gpt-4o-2024-08-06
53
57
object : response
54
58
output :
55
59
- content :
56
60
- annotations : []
61
+ logprobs : []
57
62
text : The capital of France is Paris.
58
63
type : output_text
59
- id : msg_67e5392e466c8191ad4ef0744bded009006bc99764149f64
64
+ id : msg_68c2e8c26114819489dae5b59fceb7bf02e845978fbbb592
60
65
role : assistant
61
66
status : completed
62
67
type : message
63
68
parallel_tool_calls : true
64
69
previous_response_id : null
70
+ prompt_cache_key : null
65
71
reasoning :
66
72
effort : null
67
- generate_summary : null
73
+ summary : null
74
+ safety_identifier : null
75
+ service_tier : default
68
76
status : completed
69
77
store : true
70
78
temperature : 1.0
71
79
text :
72
80
format :
73
81
type : text
82
+ verbosity : medium
74
83
tool_choice : auto
75
84
tools : []
85
+ top_logprobs : 0
76
86
top_p : 1.0
77
87
truncation : disabled
78
88
usage :
79
- input_tokens : 32
89
+ input_tokens : 14
80
90
input_tokens_details :
81
91
cached_tokens : 0
82
92
output_tokens : 8
83
93
output_tokens_details :
84
94
reasoning_tokens : 0
85
- total_tokens : 40
95
+ total_tokens : 22
86
96
user : null
87
97
status :
88
98
code : 200
You can’t perform that action at this time.
0 commit comments