Skip to content

Commit 66ff387

Browse files
authored
Upgrade deps, fix tests (#987)
1 parent 23f2bf9 commit 66ff387

File tree

2 files changed

+598
-519
lines changed

2 files changed

+598
-519
lines changed

tests/otel_integrations/test_openai_agents.py

Lines changed: 52 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -452,9 +452,12 @@ def random_number() -> int:
452452
'frequency_penalty': None,
453453
'presence_penalty': None,
454454
'tool_choice': None,
455-
'parallel_tool_calls': False,
455+
'parallel_tool_calls': None,
456456
'truncation': None,
457457
'max_tokens': None,
458+
'reasoning': None,
459+
'metadata': None,
460+
'store': None,
458461
},
459462
'response': {
460463
'id': 'resp_67ced68228748191b31ea5d9172a7b4b',
@@ -742,9 +745,12 @@ def random_number() -> int:
742745
'frequency_penalty': None,
743746
'presence_penalty': None,
744747
'tool_choice': None,
745-
'parallel_tool_calls': False,
748+
'parallel_tool_calls': None,
746749
'truncation': None,
747750
'max_tokens': None,
751+
'reasoning': None,
752+
'metadata': None,
753+
'store': None,
748754
},
749755
'response': {
750756
'id': 'resp_67ced68425f48191a5fb0c2b61cb27dd',
@@ -1080,9 +1086,12 @@ async def zero_guardrail(_context: Any, _agent: Agent[Any], inp: Any) -> Guardra
10801086
'frequency_penalty': None,
10811087
'presence_penalty': None,
10821088
'tool_choice': None,
1083-
'parallel_tool_calls': False,
1089+
'parallel_tool_calls': None,
10841090
'truncation': None,
10851091
'max_tokens': None,
1092+
'reasoning': None,
1093+
'metadata': None,
1094+
'store': None,
10861095
},
10871096
'response': {
10881097
'id': 'resp_67cee263c6e0819184efdc0fe2624cc8',
@@ -1399,9 +1408,12 @@ async def test_chat_completions(exporter: TestExporter):
13991408
'frequency_penalty': None,
14001409
'presence_penalty': None,
14011410
'tool_choice': None,
1402-
'parallel_tool_calls': False,
1411+
'parallel_tool_calls': None,
14031412
'truncation': None,
14041413
'max_tokens': None,
1414+
'reasoning': None,
1415+
'metadata': None,
1416+
'store': None,
14051417
'base_url': 'https://api.openai.com/v1/',
14061418
},
14071419
'usage': {'input_tokens': 11, 'output_tokens': 8},
@@ -1752,9 +1764,12 @@ async def test_responses_simple(exporter: TestExporter):
17521764
'frequency_penalty': None,
17531765
'presence_penalty': None,
17541766
'tool_choice': None,
1755-
'parallel_tool_calls': False,
1767+
'parallel_tool_calls': None,
17561768
'truncation': None,
17571769
'max_tokens': None,
1770+
'reasoning': None,
1771+
'metadata': None,
1772+
'store': None,
17581773
},
17591774
'response': {
17601775
'id': 'resp_67ceee053cdc81919f39173ee02cb88e',
@@ -1937,9 +1952,12 @@ async def test_responses_simple(exporter: TestExporter):
19371952
'frequency_penalty': None,
19381953
'presence_penalty': None,
19391954
'tool_choice': None,
1940-
'parallel_tool_calls': False,
1955+
'parallel_tool_calls': None,
19411956
'truncation': None,
19421957
'max_tokens': None,
1958+
'reasoning': None,
1959+
'metadata': None,
1960+
'store': None,
19431961
},
19441962
'response': {
19451963
'id': 'resp_67ceee0623ac819190454bc7af968938',
@@ -2182,9 +2200,12 @@ async def test_file_search(exporter: TestExporter):
21822200
'frequency_penalty': None,
21832201
'presence_penalty': None,
21842202
'tool_choice': None,
2185-
'parallel_tool_calls': False,
2203+
'parallel_tool_calls': None,
21862204
'truncation': None,
21872205
'max_tokens': None,
2206+
'reasoning': None,
2207+
'metadata': None,
2208+
'store': None,
21882209
},
21892210
'response': {
21902211
'id': 'resp_67ceff39d5e88191885004de76d26e43',
@@ -2446,9 +2467,12 @@ async def test_file_search(exporter: TestExporter):
24462467
'frequency_penalty': None,
24472468
'presence_penalty': None,
24482469
'tool_choice': None,
2449-
'parallel_tool_calls': False,
2470+
'parallel_tool_calls': None,
24502471
'truncation': None,
24512472
'max_tokens': None,
2473+
'reasoning': None,
2474+
'metadata': None,
2475+
'store': None,
24522476
},
24532477
'response': {
24542478
'id': 'resp_67ceff3c84548191b620a2cf4c2e37f2',
@@ -2747,9 +2771,12 @@ def tool():
27472771
'frequency_penalty': None,
27482772
'presence_penalty': None,
27492773
'tool_choice': None,
2750-
'parallel_tool_calls': False,
2774+
'parallel_tool_calls': None,
27512775
'truncation': None,
27522776
'max_tokens': None,
2777+
'reasoning': None,
2778+
'metadata': None,
2779+
'store': None,
27532780
},
27542781
'gen_ai.request.model': 'gpt-4o',
27552782
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',
@@ -2972,9 +2999,12 @@ def tool():
29722999
'frequency_penalty': None,
29733000
'presence_penalty': None,
29743001
'tool_choice': None,
2975-
'parallel_tool_calls': False,
3002+
'parallel_tool_calls': None,
29763003
'truncation': None,
29773004
'max_tokens': None,
3005+
'reasoning': None,
3006+
'metadata': None,
3007+
'store': None,
29783008
},
29793009
'gen_ai.request.model': 'gpt-4o',
29803010
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',
@@ -3350,9 +3380,12 @@ async def test_voice_pipeline(exporter: TestExporter, vcr_allow_bytes: None):
33503380
'frequency_penalty': None,
33513381
'presence_penalty': None,
33523382
'tool_choice': None,
3353-
'parallel_tool_calls': False,
3383+
'parallel_tool_calls': None,
33543384
'truncation': None,
33553385
'max_tokens': None,
3386+
'reasoning': None,
3387+
'metadata': None,
3388+
'store': None,
33563389
},
33573390
'gen_ai.request.model': 'gpt-4o',
33583391
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',
@@ -3860,9 +3893,12 @@ def random_number() -> int: # type: ignore
38603893
'frequency_penalty': None,
38613894
'presence_penalty': None,
38623895
'tool_choice': None,
3863-
'parallel_tool_calls': False,
3896+
'parallel_tool_calls': None,
38643897
'truncation': None,
38653898
'max_tokens': None,
3899+
'reasoning': None,
3900+
'metadata': None,
3901+
'store': None,
38663902
},
38673903
'gen_ai.request.model': 'gpt-4o',
38683904
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',
@@ -4127,9 +4163,12 @@ def random_number() -> int: # type: ignore
41274163
'frequency_penalty': None,
41284164
'presence_penalty': None,
41294165
'tool_choice': None,
4130-
'parallel_tool_calls': False,
4166+
'parallel_tool_calls': None,
41314167
'truncation': None,
41324168
'max_tokens': None,
4169+
'reasoning': None,
4170+
'metadata': None,
4171+
'store': None,
41334172
},
41344173
'gen_ai.request.model': 'gpt-4o',
41354174
'logfire.msg_template': 'Responses API with {gen_ai.request.model!r}',

0 commit comments

Comments
 (0)