Skip to content

Commit 728e798

Browse files
committed
run black to fix formatting
1 parent 6e5bd03 commit 728e798

17 files changed

+341
-308
lines changed

sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -69,17 +69,17 @@ def test_calculate_and_save(self, **kwargs):
6969
description="Agent with Code Interpreter and Function Tool.",
7070
)
7171
print(f"Agent created (id: {agent.id})")
72-
72+
7373
# Use the agent
7474
response = openai_client.responses.create(
7575
input="Calculate 5 + 3 and save the result.",
7676
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
7777
)
7878
print(f"Response received (id: {response.id})")
79-
79+
8080
assert response.id is not None
8181
print("✓ Code Interpreter + Function Tool works!")
82-
82+
8383
# Cleanup
8484
project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version)
8585

@@ -129,16 +129,16 @@ def test_generate_data_and_report(self, **kwargs):
129129
description="Agent for data generation and reporting.",
130130
)
131131
print(f"Agent created (id: {agent.id})")
132-
132+
133133
# Request data generation and report
134134
response = openai_client.responses.create(
135135
input="Generate a list of 10 random numbers between 1 and 100, calculate their average, and create a report.",
136136
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
137137
)
138-
138+
139139
print(f"Response received (id: {response.id})")
140140
assert response.id is not None
141141
print("✓ Data generation and reporting works!")
142-
142+
143143
# Cleanup
144144
project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version)

sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py

Lines changed: 19 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -41,11 +41,12 @@ def test_find_and_analyze_data(self, **kwargs):
4141
# Create data file
4242
txt_content = "Sample data: 10, 20, 30, 40, 50"
4343
vector_store = openai_client.vector_stores.create(name="DataStore")
44-
44+
4545
from io import BytesIO
46-
txt_file = BytesIO(txt_content.encode('utf-8'))
46+
47+
txt_file = BytesIO(txt_content.encode("utf-8"))
4748
txt_file.name = "data.txt"
48-
49+
4950
file = openai_client.vector_stores.files.upload_and_poll(
5051
vector_store_id=vector_store.id,
5152
file=txt_file,
@@ -66,18 +67,18 @@ def test_find_and_analyze_data(self, **kwargs):
6667
description="Agent with File Search and Code Interpreter.",
6768
)
6869
print(f"Agent created (id: {agent.id})")
69-
70+
7071
# Use the agent
7172
response = openai_client.responses.create(
7273
input="Find the data file and calculate the average.",
7374
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
7475
)
7576
print(f"Response received (id: {response.id})")
76-
77+
7778
assert response.id is not None
7879
assert len(response.output_text) > 20
7980
print("✓ File Search + Code Interpreter works!")
80-
81+
8182
# Cleanup
8283
project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version)
8384
openai_client.vector_stores.delete(vector_store.id)
@@ -109,11 +110,12 @@ def test_analyze_code_file(self, **kwargs):
109110
"""
110111

111112
vector_store = openai_client.vector_stores.create(name="CodeAnalysisStore")
112-
113+
113114
from io import BytesIO
114-
code_file = BytesIO(python_code.encode('utf-8'))
115+
116+
code_file = BytesIO(python_code.encode("utf-8"))
115117
code_file.name = "fibonacci.py"
116-
118+
117119
file = openai_client.vector_stores.files.upload_and_poll(
118120
vector_store_id=vector_store.id,
119121
file=code_file,
@@ -134,23 +136,24 @@ def test_analyze_code_file(self, **kwargs):
134136
description="Agent for code analysis.",
135137
)
136138
print(f"Agent created (id: {agent.id})")
137-
139+
138140
# Request analysis
139141
response = openai_client.responses.create(
140142
input="Find the fibonacci code and explain what it does. What is the computational complexity?",
141143
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
142144
)
143-
145+
144146
response_text = response.output_text
145147
print(f"Response: {response_text[:300]}...")
146-
148+
147149
assert len(response_text) > 50
148150
response_lower = response_lower = response_text.lower()
149-
assert any(keyword in response_lower for keyword in ["fibonacci", "recursive", "complexity", "exponential"]), \
150-
"Expected analysis of fibonacci algorithm"
151-
151+
assert any(
152+
keyword in response_lower for keyword in ["fibonacci", "recursive", "complexity", "exponential"]
153+
), "Expected analysis of fibonacci algorithm"
154+
152155
print("✓ Code file analysis completed")
153-
156+
154157
# Cleanup
155158
project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version)
156159
openai_client.vector_stores.delete(vector_store.id)

sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py

Lines changed: 47 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -61,11 +61,12 @@ def test_data_analysis_workflow(self, **kwargs):
6161
# Create vector store and upload
6262
vector_store = openai_client.vector_stores.create(name="SalesDataStore")
6363
print(f"Vector store created (id: {vector_store.id})")
64-
64+
6565
from io import BytesIO
66-
txt_file = BytesIO(txt_content.encode('utf-8'))
66+
67+
txt_file = BytesIO(txt_content.encode("utf-8"))
6768
txt_file.name = "sales_data.txt"
68-
69+
6970
file = openai_client.vector_stores.files.upload_and_poll(
7071
vector_store_id=vector_store.id,
7172
file=txt_file,
@@ -108,31 +109,31 @@ def test_data_analysis_workflow(self, **kwargs):
108109

109110
# Request analysis
110111
print("\nAsking agent to analyze the sales data...")
111-
112+
112113
response = openai_client.responses.create(
113114
input="Analyze the sales data and calculate the total revenue for each product. Then save the results.",
114115
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
115116
)
116-
117+
117118
print(f"Initial response completed (id: {response.id})")
118-
119+
119120
# Check if function was called
120121
function_calls_found = 0
121122
input_list: ResponseInputParam = []
122-
123+
123124
for item in response.output:
124125
if item.type == "function_call":
125126
function_calls_found += 1
126127
print(f"Function call detected (id: {item.call_id}, name: {item.name})")
127-
128+
128129
assert item.name == "save_analysis_results"
129-
130+
130131
arguments = json.loads(item.arguments)
131132
print(f"Function arguments: {arguments}")
132-
133+
133134
assert "summary" in arguments
134135
assert len(arguments["summary"]) > 20
135-
136+
136137
input_list.append(
137138
FunctionCallOutput(
138139
type="function_call_output",
@@ -142,7 +143,7 @@ def test_data_analysis_workflow(self, **kwargs):
142143
)
143144

144145
assert function_calls_found > 0, "Expected save_analysis_results function to be called"
145-
146+
146147
# Send function results back
147148
if input_list:
148149
response = openai_client.responses.create(
@@ -214,23 +215,23 @@ def test_empty_vector_store_handling(self, **kwargs):
214215

215216
# Request analysis of non-existent file
216217
print("\nAsking agent to find non-existent data...")
217-
218+
218219
response = openai_client.responses.create(
219220
input="Find and analyze the quarterly sales report.",
220221
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
221222
)
222-
223+
223224
response_text = response.output_text
224225
print(f"Response: '{response_text[:200] if response_text else '(empty)'}...'")
225-
226+
226227
# Verify agent didn't crash
227228
assert response.id is not None, "Agent should return a valid response"
228229
assert len(response.output) >= 0, "Agent should return output items"
229-
230+
230231
# If there's text, it should be meaningful
231232
if response_text:
232233
assert len(response_text) > 10, "Non-empty response should be meaningful"
233-
234+
234235
print("\n✓ Agent handled missing data gracefully")
235236

236237
# Teardown
@@ -267,11 +268,12 @@ def calculate_sum(numbers):
267268

268269
# Create vector store and upload
269270
vector_store = openai_client.vector_stores.create(name="CodeStore")
270-
271+
271272
from io import BytesIO
272-
code_file = BytesIO(python_code.encode('utf-8'))
273+
274+
code_file = BytesIO(python_code.encode("utf-8"))
273275
code_file.name = "sample_code.py"
274-
276+
275277
file = openai_client.vector_stores.files.upload_and_poll(
276278
vector_store_id=vector_store.id,
277279
file=code_file,
@@ -313,22 +315,23 @@ def calculate_sum(numbers):
313315

314316
# Request code analysis
315317
print("\nAsking agent to find and analyze the Python code...")
316-
318+
317319
response = openai_client.responses.create(
318320
input="Find the Python code file and tell me what the calculate_sum function does.",
319321
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
320322
)
321-
323+
322324
response_text = response.output_text
323325
print(f"Response: {response_text[:300]}...")
324-
326+
325327
# Verify agent found and analyzed the code
326328
assert len(response_text) > 50, "Expected detailed analysis"
327-
329+
328330
response_lower = response_text.lower()
329-
assert any(keyword in response_lower for keyword in ["sum", "calculate", "function", "numbers", "code", "python"]), \
330-
"Expected response to discuss the code"
331-
331+
assert any(
332+
keyword in response_lower for keyword in ["sum", "calculate", "function", "numbers", "code", "python"]
333+
), "Expected response to discuss the code"
334+
332335
print("\n✓ Agent successfully found code file using File Search")
333336

334337
# Teardown
@@ -344,7 +347,7 @@ def calculate_sum(numbers):
344347
def test_multi_turn_search_and_save_workflow(self, **kwargs):
345348
"""
346349
Test multi-turn workflow: search documents, ask follow-ups, save findings.
347-
350+
348351
This tests:
349352
- File Search across multiple turns
350353
- Function calls interspersed with searches
@@ -383,13 +386,14 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs):
383386
# Create vector store and upload documents
384387
vector_store = openai_client.vector_stores.create(name="ResearchStore")
385388
print(f"Vector store created: {vector_store.id}")
386-
389+
387390
from io import BytesIO
388-
file1 = BytesIO(doc1_content.encode('utf-8'))
391+
392+
file1 = BytesIO(doc1_content.encode("utf-8"))
389393
file1.name = "ml_healthcare.txt"
390-
file2 = BytesIO(doc2_content.encode('utf-8'))
394+
file2 = BytesIO(doc2_content.encode("utf-8"))
391395
file2.name = "ai_ethics.txt"
392-
396+
393397
uploaded1 = openai_client.vector_stores.files.upload_and_poll(
394398
vector_store_id=vector_store.id,
395399
file=file1,
@@ -437,32 +441,32 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs):
437441
input="What does the research say about machine learning in healthcare?",
438442
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
439443
)
440-
444+
441445
response_1_text = response_1.output_text
442446
print(f"Response 1: {response_1_text[:200]}...")
443447
assert "95" in response_1_text or "accuracy" in response_1_text.lower()
444-
448+
445449
# Turn 2: Follow-up for specifics
446450
print("\n--- Turn 2: Follow-up for details ---")
447451
response_2 = openai_client.responses.create(
448452
input="What specific applications are mentioned?",
449453
previous_response_id=response_1.id,
450454
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
451455
)
452-
456+
453457
response_2_text = response_2.output_text
454458
print(f"Response 2: {response_2_text[:200]}...")
455459
response_2_lower = response_2_text.lower()
456460
assert any(keyword in response_2_lower for keyword in ["imaging", "drug", "risk", "prediction"])
457-
461+
458462
# Turn 3: Save the finding
459463
print("\n--- Turn 3: Save finding ---")
460464
response_3 = openai_client.responses.create(
461465
input="Please save that finding about ML accuracy.",
462466
previous_response_id=response_2.id,
463467
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
464468
)
465-
469+
466470
# Handle function call
467471
input_list: ResponseInputParam = []
468472
function_called = False
@@ -471,38 +475,38 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs):
471475
function_called = True
472476
print(f"Function called: {item.name} with args: {item.arguments}")
473477
assert item.name == "save_finding"
474-
478+
475479
args = json.loads(item.arguments)
476480
assert "topic" in args and "finding" in args
477481
print(f" Topic: {args['topic']}")
478482
print(f" Finding: {args['finding'][:100]}...")
479-
483+
480484
input_list.append(
481485
FunctionCallOutput(
482486
type="function_call_output",
483487
call_id=item.call_id,
484488
output=json.dumps({"status": "saved", "id": "finding_001"}),
485489
)
486490
)
487-
491+
488492
assert function_called, "Expected save_finding to be called"
489-
493+
490494
# Send function result
491495
response_3 = openai_client.responses.create(
492496
input=input_list,
493497
previous_response_id=response_3.id,
494498
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
495499
)
496500
print(f"Response 3: {response_3.output_text[:150]}...")
497-
501+
498502
# Turn 4: Switch to different topic (AI ethics)
499503
print("\n--- Turn 4: New search topic ---")
500504
response_4 = openai_client.responses.create(
501505
input="Now tell me about AI ethics concerns mentioned in the research.",
502506
previous_response_id=response_3.id,
503507
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
504508
)
505-
509+
506510
response_4_text = response_4.output_text
507511
print(f"Response 4: {response_4_text[:200]}...")
508512
response_4_lower = response_4_text.lower()

0 commit comments

Comments
 (0)