@@ -61,11 +61,12 @@ def test_data_analysis_workflow(self, **kwargs):
6161 # Create vector store and upload
6262 vector_store = openai_client .vector_stores .create (name = "SalesDataStore" )
6363 print (f"Vector store created (id: { vector_store .id } )" )
64-
64+
6565 from io import BytesIO
66- txt_file = BytesIO (txt_content .encode ('utf-8' ))
66+
67+ txt_file = BytesIO (txt_content .encode ("utf-8" ))
6768 txt_file .name = "sales_data.txt"
68-
69+
6970 file = openai_client .vector_stores .files .upload_and_poll (
7071 vector_store_id = vector_store .id ,
7172 file = txt_file ,
@@ -108,31 +109,31 @@ def test_data_analysis_workflow(self, **kwargs):
108109
109110 # Request analysis
110111 print ("\n Asking agent to analyze the sales data..." )
111-
112+
112113 response = openai_client .responses .create (
113114 input = "Analyze the sales data and calculate the total revenue for each product. Then save the results." ,
114115 extra_body = {"agent" : {"name" : agent .name , "type" : "agent_reference" }},
115116 )
116-
117+
117118 print (f"Initial response completed (id: { response .id } )" )
118-
119+
119120 # Check if function was called
120121 function_calls_found = 0
121122 input_list : ResponseInputParam = []
122-
123+
123124 for item in response .output :
124125 if item .type == "function_call" :
125126 function_calls_found += 1
126127 print (f"Function call detected (id: { item .call_id } , name: { item .name } )" )
127-
128+
128129 assert item .name == "save_analysis_results"
129-
130+
130131 arguments = json .loads (item .arguments )
131132 print (f"Function arguments: { arguments } " )
132-
133+
133134 assert "summary" in arguments
134135 assert len (arguments ["summary" ]) > 20
135-
136+
136137 input_list .append (
137138 FunctionCallOutput (
138139 type = "function_call_output" ,
@@ -142,7 +143,7 @@ def test_data_analysis_workflow(self, **kwargs):
142143 )
143144
144145 assert function_calls_found > 0 , "Expected save_analysis_results function to be called"
145-
146+
146147 # Send function results back
147148 if input_list :
148149 response = openai_client .responses .create (
@@ -214,23 +215,23 @@ def test_empty_vector_store_handling(self, **kwargs):
214215
215216 # Request analysis of non-existent file
216217 print ("\n Asking agent to find non-existent data..." )
217-
218+
218219 response = openai_client .responses .create (
219220 input = "Find and analyze the quarterly sales report." ,
220221 extra_body = {"agent" : {"name" : agent .name , "type" : "agent_reference" }},
221222 )
222-
223+
223224 response_text = response .output_text
224225 print (f"Response: '{ response_text [:200 ] if response_text else '(empty)' } ...'" )
225-
226+
226227 # Verify agent didn't crash
227228 assert response .id is not None , "Agent should return a valid response"
228229 assert len (response .output ) >= 0 , "Agent should return output items"
229-
230+
230231 # If there's text, it should be meaningful
231232 if response_text :
232233 assert len (response_text ) > 10 , "Non-empty response should be meaningful"
233-
234+
234235 print ("\n ✓ Agent handled missing data gracefully" )
235236
236237 # Teardown
@@ -267,11 +268,12 @@ def calculate_sum(numbers):
267268
268269 # Create vector store and upload
269270 vector_store = openai_client .vector_stores .create (name = "CodeStore" )
270-
271+
271272 from io import BytesIO
272- code_file = BytesIO (python_code .encode ('utf-8' ))
273+
274+ code_file = BytesIO (python_code .encode ("utf-8" ))
273275 code_file .name = "sample_code.py"
274-
276+
275277 file = openai_client .vector_stores .files .upload_and_poll (
276278 vector_store_id = vector_store .id ,
277279 file = code_file ,
@@ -313,22 +315,23 @@ def calculate_sum(numbers):
313315
314316 # Request code analysis
315317 print ("\n Asking agent to find and analyze the Python code..." )
316-
318+
317319 response = openai_client .responses .create (
318320 input = "Find the Python code file and tell me what the calculate_sum function does." ,
319321 extra_body = {"agent" : {"name" : agent .name , "type" : "agent_reference" }},
320322 )
321-
323+
322324 response_text = response .output_text
323325 print (f"Response: { response_text [:300 ]} ..." )
324-
326+
325327 # Verify agent found and analyzed the code
326328 assert len (response_text ) > 50 , "Expected detailed analysis"
327-
329+
328330 response_lower = response_text .lower ()
329- assert any (keyword in response_lower for keyword in ["sum" , "calculate" , "function" , "numbers" , "code" , "python" ]), \
330- "Expected response to discuss the code"
331-
331+ assert any (
332+ keyword in response_lower for keyword in ["sum" , "calculate" , "function" , "numbers" , "code" , "python" ]
333+ ), "Expected response to discuss the code"
334+
332335 print ("\n ✓ Agent successfully found code file using File Search" )
333336
334337 # Teardown
@@ -344,7 +347,7 @@ def calculate_sum(numbers):
344347 def test_multi_turn_search_and_save_workflow (self , ** kwargs ):
345348 """
346349 Test multi-turn workflow: search documents, ask follow-ups, save findings.
347-
350+
348351 This tests:
349352 - File Search across multiple turns
350353 - Function calls interspersed with searches
@@ -383,13 +386,14 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs):
383386 # Create vector store and upload documents
384387 vector_store = openai_client .vector_stores .create (name = "ResearchStore" )
385388 print (f"Vector store created: { vector_store .id } " )
386-
389+
387390 from io import BytesIO
388- file1 = BytesIO (doc1_content .encode ('utf-8' ))
391+
392+ file1 = BytesIO (doc1_content .encode ("utf-8" ))
389393 file1 .name = "ml_healthcare.txt"
390- file2 = BytesIO (doc2_content .encode (' utf-8' ))
394+ file2 = BytesIO (doc2_content .encode (" utf-8" ))
391395 file2 .name = "ai_ethics.txt"
392-
396+
393397 uploaded1 = openai_client .vector_stores .files .upload_and_poll (
394398 vector_store_id = vector_store .id ,
395399 file = file1 ,
@@ -437,32 +441,32 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs):
437441 input = "What does the research say about machine learning in healthcare?" ,
438442 extra_body = {"agent" : {"name" : agent .name , "type" : "agent_reference" }},
439443 )
440-
444+
441445 response_1_text = response_1 .output_text
442446 print (f"Response 1: { response_1_text [:200 ]} ..." )
443447 assert "95" in response_1_text or "accuracy" in response_1_text .lower ()
444-
448+
445449 # Turn 2: Follow-up for specifics
446450 print ("\n --- Turn 2: Follow-up for details ---" )
447451 response_2 = openai_client .responses .create (
448452 input = "What specific applications are mentioned?" ,
449453 previous_response_id = response_1 .id ,
450454 extra_body = {"agent" : {"name" : agent .name , "type" : "agent_reference" }},
451455 )
452-
456+
453457 response_2_text = response_2 .output_text
454458 print (f"Response 2: { response_2_text [:200 ]} ..." )
455459 response_2_lower = response_2_text .lower ()
456460 assert any (keyword in response_2_lower for keyword in ["imaging" , "drug" , "risk" , "prediction" ])
457-
461+
458462 # Turn 3: Save the finding
459463 print ("\n --- Turn 3: Save finding ---" )
460464 response_3 = openai_client .responses .create (
461465 input = "Please save that finding about ML accuracy." ,
462466 previous_response_id = response_2 .id ,
463467 extra_body = {"agent" : {"name" : agent .name , "type" : "agent_reference" }},
464468 )
465-
469+
466470 # Handle function call
467471 input_list : ResponseInputParam = []
468472 function_called = False
@@ -471,38 +475,38 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs):
471475 function_called = True
472476 print (f"Function called: { item .name } with args: { item .arguments } " )
473477 assert item .name == "save_finding"
474-
478+
475479 args = json .loads (item .arguments )
476480 assert "topic" in args and "finding" in args
477481 print (f" Topic: { args ['topic' ]} " )
478482 print (f" Finding: { args ['finding' ][:100 ]} ..." )
479-
483+
480484 input_list .append (
481485 FunctionCallOutput (
482486 type = "function_call_output" ,
483487 call_id = item .call_id ,
484488 output = json .dumps ({"status" : "saved" , "id" : "finding_001" }),
485489 )
486490 )
487-
491+
488492 assert function_called , "Expected save_finding to be called"
489-
493+
490494 # Send function result
491495 response_3 = openai_client .responses .create (
492496 input = input_list ,
493497 previous_response_id = response_3 .id ,
494498 extra_body = {"agent" : {"name" : agent .name , "type" : "agent_reference" }},
495499 )
496500 print (f"Response 3: { response_3 .output_text [:150 ]} ..." )
497-
501+
498502 # Turn 4: Switch to different topic (AI ethics)
499503 print ("\n --- Turn 4: New search topic ---" )
500504 response_4 = openai_client .responses .create (
501505 input = "Now tell me about AI ethics concerns mentioned in the research." ,
502506 previous_response_id = response_3 .id ,
503507 extra_body = {"agent" : {"name" : agent .name , "type" : "agent_reference" }},
504508 )
505-
509+
506510 response_4_text = response_4 .output_text
507511 print (f"Response 4: { response_4_text [:200 ]} ..." )
508512 response_4_lower = response_4_text .lower ()
0 commit comments