|
| 1 | +# pylint: disable=too-many-lines,line-too-long,useless-suppression |
| 2 | +# ------------------------------------ |
| 3 | +# Copyright (c) Microsoft Corporation. |
| 4 | +# Licensed under the MIT License. |
| 5 | +# ------------------------------------ |
| 6 | +# cSpell:disable |
| 7 | + |
| 8 | +import os |
| 9 | +from io import BytesIO |
| 10 | +from test_base import TestBase, servicePreparer |
| 11 | +from devtools_testutils.aio import recorded_by_proxy_async |
| 12 | +from devtools_testutils import RecordedTransport |
| 13 | +from azure.ai.projects.models import PromptAgentDefinition, FileSearchTool |
| 14 | + |
| 15 | + |
| 16 | +class TestAgentFileSearchAsync(TestBase): |
| 17 | + |
| 18 | + @servicePreparer() |
| 19 | + @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) |
| 20 | + async def test_agent_file_search_async(self, **kwargs): |
| 21 | + |
| 22 | + model = self.test_agents_params["model_deployment_name"] |
| 23 | + |
| 24 | + async with ( |
| 25 | + self.create_async_client(operation_group="agents", **kwargs) as project_client, |
| 26 | + project_client.get_openai_client() as openai_client, |
| 27 | + ): |
| 28 | + # Get the path to the test file |
| 29 | + asset_file_path = os.path.abspath( |
| 30 | + os.path.join(os.path.dirname(__file__), "../../../samples/agents/assets/product_info.md") |
| 31 | + ) |
| 32 | + |
| 33 | + assert os.path.exists(asset_file_path), f"Test file not found at: {asset_file_path}" |
| 34 | + print(f"Using test file: {asset_file_path}") |
| 35 | + |
| 36 | + # Create vector store for file search |
| 37 | + vector_store = await openai_client.vector_stores.create(name="ProductInfoStore") |
| 38 | + print(f"Vector store created (id: {vector_store.id})") |
| 39 | + assert vector_store.id |
| 40 | + |
| 41 | + # Upload file to vector store |
| 42 | + with open(asset_file_path, "rb") as f: |
| 43 | + file = await openai_client.vector_stores.files.upload_and_poll( |
| 44 | + vector_store_id=vector_store.id, |
| 45 | + file=f, |
| 46 | + ) |
| 47 | + |
| 48 | + print(f"File uploaded (id: {file.id}, status: {file.status})") |
| 49 | + assert file.id |
| 50 | + assert file.status == "completed", f"Expected file status 'completed', got '{file.status}'" |
| 51 | + |
| 52 | + # Create agent with file search tool |
| 53 | + agent_name = "file-search-agent" |
| 54 | + agent = await project_client.agents.create_version( |
| 55 | + agent_name=agent_name, |
| 56 | + definition=PromptAgentDefinition( |
| 57 | + model=model, |
| 58 | + instructions="You are a helpful assistant that can search through uploaded documents to answer questions.", |
| 59 | + tools=[FileSearchTool(vector_store_ids=[vector_store.id])], |
| 60 | + ), |
| 61 | + description="Agent for testing file search capabilities.", |
| 62 | + ) |
| 63 | + self._validate_agent_version(agent, expected_name=agent_name) |
| 64 | + |
| 65 | + # Ask a question about the uploaded document |
| 66 | + print("\nAsking agent about the product information...") |
| 67 | + |
| 68 | + response = await openai_client.responses.create( |
| 69 | + input="What products are mentioned in the document?", |
| 70 | + extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, |
| 71 | + ) |
| 72 | + |
| 73 | + print(f"Response completed (id: {response.id})") |
| 74 | + assert response.id |
| 75 | + assert response.output is not None |
| 76 | + assert len(response.output) > 0 |
| 77 | + |
| 78 | + # Get the response text |
| 79 | + response_text = response.output_text |
| 80 | + print(f"\nAgent's response: {response_text[:300]}...") |
| 81 | + |
| 82 | + # Verify we got a meaningful response |
| 83 | + assert len(response_text) > 50, "Expected a substantial response from the agent" |
| 84 | + |
| 85 | + # The response should mention finding information (indicating file search was used) |
| 86 | + # We can't assert exact product names without knowing the file content, |
| 87 | + # but we can verify the agent provided an answer |
| 88 | + print("\n✓ Agent successfully used file search tool to answer question from uploaded document") |
| 89 | + |
| 90 | + # Teardown |
| 91 | + print("\nCleaning up...") |
| 92 | + await project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) |
| 93 | + print("Agent deleted") |
| 94 | + |
| 95 | + await openai_client.vector_stores.delete(vector_store.id) |
| 96 | + print("Vector store deleted") |
| 97 | + |
| 98 | + @servicePreparer() |
| 99 | + @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) |
| 100 | + async def test_agent_file_search_multi_turn_conversation_async(self, **kwargs): |
| 101 | + """ |
| 102 | + Test multi-turn conversation with File Search (async version). |
| 103 | +
|
| 104 | + This test verifies that an agent can maintain context across multiple turns |
| 105 | + while using File Search to answer follow-up questions. |
| 106 | + """ |
| 107 | + |
| 108 | + model = self.test_agents_params["model_deployment_name"] |
| 109 | + |
| 110 | + async with ( |
| 111 | + self.create_async_client(operation_group="agents", **kwargs) as project_client, |
| 112 | + project_client.get_openai_client() as openai_client, |
| 113 | + ): |
| 114 | + # Create a document with information about products |
| 115 | + product_info = """Product Catalog: |
| 116 | +
|
| 117 | +Widget A: |
| 118 | +- Price: $150 |
| 119 | +- Category: Electronics |
| 120 | +- Stock: 50 units |
| 121 | +- Rating: 4.5/5 stars |
| 122 | +
|
| 123 | +Widget B: |
| 124 | +- Price: $220 |
| 125 | +- Category: Electronics |
| 126 | +- Stock: 30 units |
| 127 | +- Rating: 4.8/5 stars |
| 128 | +
|
| 129 | +Widget C: |
| 130 | +- Price: $95 |
| 131 | +- Category: Home & Garden |
| 132 | +- Stock: 100 units |
| 133 | +- Rating: 4.2/5 stars |
| 134 | +""" |
| 135 | + |
| 136 | + # Create vector store and upload document |
| 137 | + vector_store = await openai_client.vector_stores.create(name="ProductCatalog") |
| 138 | + print(f"Vector store created: {vector_store.id}") |
| 139 | + |
| 140 | + product_file = BytesIO(product_info.encode("utf-8")) |
| 141 | + product_file.name = "products.txt" |
| 142 | + |
| 143 | + file = await openai_client.vector_stores.files.upload_and_poll( |
| 144 | + vector_store_id=vector_store.id, |
| 145 | + file=product_file, |
| 146 | + ) |
| 147 | + print(f"Product catalog uploaded: {file.id}") |
| 148 | + |
| 149 | + # Create agent with File Search |
| 150 | + agent = await project_client.agents.create_version( |
| 151 | + agent_name="product-catalog-agent", |
| 152 | + definition=PromptAgentDefinition( |
| 153 | + model=model, |
| 154 | + instructions="You are a product information assistant. Use file search to answer questions about products.", |
| 155 | + tools=[FileSearchTool(vector_store_ids=[vector_store.id])], |
| 156 | + ), |
| 157 | + description="Agent for multi-turn product queries.", |
| 158 | + ) |
| 159 | + print(f"Agent created: {agent.id}") |
| 160 | + |
| 161 | + # Turn 1: Ask about price |
| 162 | + print("\n--- Turn 1: Initial query ---") |
| 163 | + response_1 = await openai_client.responses.create( |
| 164 | + input="What is the price of Widget B?", |
| 165 | + extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, |
| 166 | + ) |
| 167 | + |
| 168 | + response_1_text = response_1.output_text |
| 169 | + print(f"Response 1: {response_1_text[:200]}...") |
| 170 | + assert "$220" in response_1_text or "220" in response_1_text, "Response should mention Widget B's price" |
| 171 | + |
| 172 | + # Turn 2: Follow-up question (requires context from turn 1) |
| 173 | + print("\n--- Turn 2: Follow-up query (testing context retention) ---") |
| 174 | + response_2 = await openai_client.responses.create( |
| 175 | + input="What about its stock level?", |
| 176 | + previous_response_id=response_1.id, |
| 177 | + extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, |
| 178 | + ) |
| 179 | + |
| 180 | + response_2_text = response_2.output_text |
| 181 | + print(f"Response 2: {response_2_text[:200]}...") |
| 182 | + assert ( |
| 183 | + "30" in response_2_text or "thirty" in response_2_text.lower() |
| 184 | + ), "Response should mention Widget B's stock (30 units)" |
| 185 | + |
| 186 | + # Turn 3: Another follow-up (compare with different product) |
| 187 | + print("\n--- Turn 3: Comparison query ---") |
| 188 | + response_3 = await openai_client.responses.create( |
| 189 | + input="How does that compare to Widget A's stock?", |
| 190 | + previous_response_id=response_2.id, |
| 191 | + extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, |
| 192 | + ) |
| 193 | + |
| 194 | + response_3_text = response_3.output_text |
| 195 | + print(f"Response 3: {response_3_text[:200]}...") |
| 196 | + assert ( |
| 197 | + "50" in response_3_text or "fifty" in response_3_text.lower() |
| 198 | + ), "Response should mention Widget A's stock (50 units)" |
| 199 | + |
| 200 | + # Turn 4: New topic (testing topic switching) |
| 201 | + print("\n--- Turn 4: Topic switch ---") |
| 202 | + response_4 = await openai_client.responses.create( |
| 203 | + input="Which widget has the highest rating?", |
| 204 | + previous_response_id=response_3.id, |
| 205 | + extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, |
| 206 | + ) |
| 207 | + |
| 208 | + response_4_text = response_4.output_text |
| 209 | + print(f"Response 4: {response_4_text[:200]}...") |
| 210 | + assert ( |
| 211 | + "widget b" in response_4_text.lower() or "4.8" in response_4_text |
| 212 | + ), "Response should identify Widget B as highest rated (4.8/5)" |
| 213 | + |
| 214 | + print("\n✓ Multi-turn conversation successful!") |
| 215 | + print(" - Context maintained across turns") |
| 216 | + print(" - Follow-up questions handled correctly") |
| 217 | + print(" - Topic switching works") |
| 218 | + |
| 219 | + # Cleanup |
| 220 | + await project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) |
| 221 | + await openai_client.vector_stores.delete(vector_store.id) |
| 222 | + print("Cleanup completed") |
0 commit comments