diff --git a/sdk/ai/azure-ai-agents/CHANGELOG.md b/sdk/ai/azure-ai-agents/CHANGELOG.md index 1d35690931eb..0eabaf89afe0 100644 --- a/sdk/ai/azure-ai-agents/CHANGELOG.md +++ b/sdk/ai/azure-ai-agents/CHANGELOG.md @@ -7,6 +7,8 @@ ### Features Added +- Add `RunStepDetailsActivity`, describing MCP function parameters. + ### Bugs Fixed ### Sample updates diff --git a/sdk/ai/azure-ai-agents/README.md b/sdk/ai/azure-ai-agents/README.md index e7898de986d9..4c724661c0b8 100644 --- a/sdk/ai/azure-ai-agents/README.md +++ b/sdk/ai/azure-ai-agents/README.md @@ -474,7 +474,9 @@ The tool approval flow looks like this: # Create and process agent run in thread with MCP tools mcp_tool.update_headers("SuperSecret", "123456") # mcp_tool.set_approval_mode("never") # Uncomment to disable approval requirement -run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id, tool_resources=mcp_tool.resources) +run = agents_client.runs.create( + thread_id=thread.id, agent_id=agent.id, tool_resources=mcp_tool.resources +) print(f"Created run, ID: {run.id}") while run.status in ["queued", "in_progress", "requires_action"]: @@ -1083,8 +1085,7 @@ for run_step in agents_client.run_steps.list(thread_id=thread.id, run_id=run.id, for tool_call in run_step.step_details.tool_calls: if isinstance(tool_call, RunStepConnectedAgentToolCall): print( - f"\tAgent: {tool_call.connected_agent.name} " - f"query: {tool_call.connected_agent.arguments} ", + f"\tAgent: {tool_call.connected_agent.name} " f"query: {tool_call.connected_agent.arguments} ", f"output: {tool_call.connected_agent.output}", ) ``` diff --git a/sdk/ai/azure-ai-agents/apiview-properties.json b/sdk/ai/azure-ai-agents/apiview-properties.json index 91d949c6d0c4..ee40bdfb9621 100644 --- a/sdk/ai/azure-ai-agents/apiview-properties.json +++ b/sdk/ai/azure-ai-agents/apiview-properties.json @@ -1,6 +1,8 @@ { "CrossLanguagePackageId": "Azure.AI.Agents", "CrossLanguageDefinitionId": { + "azure.ai.agents.models.ActivityFunctionDefinition": "Azure.AI.Agents.ActivityFunctionDefinition", + "azure.ai.agents.models.ActivityFunctionParameters": "Azure.AI.Agents.ActivityFunctionParameters", "azure.ai.agents.models.Agent": "Azure.AI.Agents.Agent", "azure.ai.agents.models.AgentErrorDetail": "Azure.AI.Agents.AgentErrorDetail", "azure.ai.agents.models.AgentsNamedToolChoice": "Azure.AI.Agents.AgentsNamedToolChoice", @@ -42,6 +44,7 @@ "azure.ai.agents.models.FileSearchToolDefinition": "Azure.AI.Agents.FileSearchToolDefinition", "azure.ai.agents.models.FileSearchToolDefinitionDetails": "Azure.AI.Agents.FileSearchToolDefinitionDetails", "azure.ai.agents.models.FileSearchToolResource": "Azure.AI.Agents.FileSearchToolResource", + "azure.ai.agents.models.FunctionArgument": "Azure.AI.Agents.FunctionArgument", "azure.ai.agents.models.FunctionDefinition": "Azure.AI.Agents.FunctionDefinition", "azure.ai.agents.models.FunctionName": "Azure.AI.Agents.FunctionName", "azure.ai.agents.models.FunctionToolDefinition": "Azure.AI.Agents.FunctionToolDefinition", @@ -103,6 +106,8 @@ "azure.ai.agents.models.RunCompletionUsage": "Azure.AI.Agents.RunCompletionUsage", "azure.ai.agents.models.RunError": "Azure.AI.Agents.RunError", "azure.ai.agents.models.RunStep": "Azure.AI.Agents.RunStep", + "azure.ai.agents.models.RunStepDetails": "Azure.AI.Agents.RunStepDetails", + "azure.ai.agents.models.RunStepActivityDetails": "Azure.AI.Agents.RunStepActivityDetails", "azure.ai.agents.models.RunStepToolCall": "Azure.AI.Agents.RunStepToolCall", "azure.ai.agents.models.RunStepAzureAISearchToolCall": "Azure.AI.Agents.RunStepAzureAISearchToolCall", "azure.ai.agents.models.RunStepBingCustomSearchToolCall": "Azure.AI.Agents.RunStepBingCustomSearchToolCall", @@ -143,7 +148,7 @@ "azure.ai.agents.models.RunStepDeltaOpenAPIObject": "Azure.AI.Agents.RunStepDeltaOpenAPIObject", "azure.ai.agents.models.RunStepDeltaOpenAPIToolCall": "Azure.AI.Agents.RunStepDeltaOpenAPIToolCall", "azure.ai.agents.models.RunStepDeltaToolCallObject": "Azure.AI.Agents.RunStepDeltaToolCallObject", - "azure.ai.agents.models.RunStepDetails": "Azure.AI.Agents.RunStepDetails", + "azure.ai.agents.models.RunStepDetailsActivity": "Azure.AI.Agents.RunStepDetailsActivity", "azure.ai.agents.models.RunStepError": "Azure.AI.Agents.RunStepError", "azure.ai.agents.models.RunStepFileSearchToolCall": "Azure.AI.Agents.RunStepFileSearchToolCall", "azure.ai.agents.models.RunStepFileSearchToolCallResult": "Azure.AI.Agents.RunStepFileSearchToolCallResult", diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/_utils/model_base.py b/sdk/ai/azure-ai-agents/azure/ai/agents/_utils/model_base.py index aaa6692b2346..e9ac32aaf9d4 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/_utils/model_base.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/_utils/model_base.py @@ -29,6 +29,7 @@ from azure.core import CaseInsensitiveEnumMeta from azure.core.pipeline import PipelineResponse from azure.core.serialization import _Null +from azure.core.rest import HttpResponse _LOGGER = logging.getLogger(__name__) @@ -940,13 +941,13 @@ def _deserialize( def _failsafe_deserialize( deserializer: typing.Any, - value: typing.Any, + response: HttpResponse, module: typing.Optional[str] = None, rf: typing.Optional["_RestField"] = None, format: typing.Optional[str] = None, ) -> typing.Any: try: - return _deserialize(deserializer, value, module, rf, format) + return _deserialize(deserializer, response.json(), module, rf, format) except DeserializationError: _LOGGER.warning( "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True @@ -956,10 +957,10 @@ def _failsafe_deserialize( def _failsafe_deserialize_xml( deserializer: typing.Any, - value: typing.Any, + response: HttpResponse, ) -> typing.Any: try: - return _deserialize_xml(deserializer, value) + return _deserialize_xml(deserializer, response.text()) except DeserializationError: _LOGGER.warning( "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/aio/operations/_operations.py b/sdk/ai/azure-ai-agents/azure/ai/agents/aio/operations/_operations.py index cbf041efa9a5..ff1adf0233de 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/aio/operations/_operations.py @@ -250,7 +250,7 @@ async def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -337,7 +337,7 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -392,7 +392,7 @@ async def get(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -556,7 +556,7 @@ async def update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -617,7 +617,7 @@ async def _delete_thread(self, thread_id: str, **kwargs: Any) -> _models._models except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -825,7 +825,7 @@ async def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -977,7 +977,7 @@ async def get(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.T except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -1138,7 +1138,7 @@ async def update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -1531,7 +1531,7 @@ async def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -1622,7 +1622,7 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -1680,7 +1680,7 @@ async def get(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Threa except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -1841,7 +1841,7 @@ async def update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2014,7 +2014,7 @@ async def submit_tool_outputs( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2078,7 +2078,7 @@ async def cancel(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Th except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2177,7 +2177,7 @@ async def get( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2279,7 +2279,7 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -2355,7 +2355,7 @@ async def list( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2429,7 +2429,7 @@ async def _upload_file( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2490,7 +2490,7 @@ async def _delete_file(self, file_id: str, **kwargs: Any) -> _models._models.Fil except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2553,7 +2553,7 @@ async def get(self, file_id: str, **kwargs: Any) -> _models.FileInfo: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2614,7 +2614,7 @@ async def _get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[ except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) deserialized = response.iter_bytes() @@ -2716,7 +2716,7 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -2890,7 +2890,7 @@ async def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2951,7 +2951,7 @@ async def get(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3113,7 +3113,7 @@ async def modify( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3177,7 +3177,7 @@ async def _delete_vector_store( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3293,7 +3293,7 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -3445,7 +3445,7 @@ async def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3509,7 +3509,7 @@ async def get(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _model except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3577,7 +3577,7 @@ async def _delete_vector_store_file( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3755,7 +3755,7 @@ async def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3819,7 +3819,7 @@ async def get(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _mode except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3884,7 +3884,7 @@ async def cancel(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _m except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3984,7 +3984,7 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -4215,7 +4215,7 @@ async def create_agent( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -4302,7 +4302,7 @@ async def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -4357,7 +4357,7 @@ async def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -4604,7 +4604,7 @@ async def update_agent( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -4665,7 +4665,7 @@ async def _delete_agent(self, agent_id: str, **kwargs: Any) -> _models._models.A except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -4985,7 +4985,7 @@ async def create_thread_and_run( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/models/__init__.py b/sdk/ai/azure-ai-agents/azure/ai/agents/models/__init__.py index 580ed0213375..a05e78ae67c5 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/models/__init__.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/models/__init__.py @@ -15,6 +15,8 @@ from ._models import ( # type: ignore AISearchIndexResource, + ActivityFunctionDefinition, + ActivityFunctionParameters, Agent, AgentErrorDetail, AgentThread, @@ -54,6 +56,7 @@ FileSearchToolDefinition, FileSearchToolDefinitionDetails, FileSearchToolResource, + FunctionArgument, FunctionDefinition, FunctionName, FunctionToolDefinition, @@ -115,6 +118,7 @@ RunCompletionUsage, RunError, RunStep, + RunStepActivityDetails, RunStepAzureAISearchToolCall, RunStepBingCustomSearchToolCall, RunStepBingGroundingToolCall, @@ -155,6 +159,7 @@ RunStepDeltaToolCall, RunStepDeltaToolCallObject, RunStepDetails, + RunStepDetailsActivity, RunStepError, RunStepFileSearchToolCall, RunStepFileSearchToolCallResult, @@ -247,6 +252,8 @@ __all__ = [ "AISearchIndexResource", + "ActivityFunctionDefinition", + "ActivityFunctionParameters", "Agent", "AgentErrorDetail", "AgentThread", @@ -286,6 +293,7 @@ "FileSearchToolDefinition", "FileSearchToolDefinitionDetails", "FileSearchToolResource", + "FunctionArgument", "FunctionDefinition", "FunctionName", "FunctionToolDefinition", @@ -347,6 +355,7 @@ "RunCompletionUsage", "RunError", "RunStep", + "RunStepActivityDetails", "RunStepAzureAISearchToolCall", "RunStepBingCustomSearchToolCall", "RunStepBingGroundingToolCall", @@ -387,6 +396,7 @@ "RunStepDeltaToolCall", "RunStepDeltaToolCallObject", "RunStepDetails", + "RunStepDetailsActivity", "RunStepError", "RunStepFileSearchToolCall", "RunStepFileSearchToolCallResult", diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/models/_enums.py b/sdk/ai/azure-ai-agents/azure/ai/agents/models/_enums.py index d62db671633e..ae1f9984e439 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/models/_enums.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/models/_enums.py @@ -401,6 +401,8 @@ class RunStepType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Represents a run step to create a message.""" TOOL_CALLS = "tool_calls" """Represents a run step that calls tools.""" + ACTIVITIES = "activities" + """Represents a run step with activities information.""" class RunStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/models/_models.py b/sdk/ai/azure-ai-agents/azure/ai/agents/models/_models.py index a71811f6d4c4..7aa7ca6468cf 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/models/_models.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/models/_models.py @@ -25,6 +25,91 @@ from .. import _types, models as _models +class ActivityFunctionDefinition(_Model): + """The activity definition information for a function. + + :ivar description: A description of what the function does, used by the model to choose when + and how to call the function. + :vartype description: str + :ivar parameters: The parameters the functions accepts, described as a JSON Schema object. + Required. + :vartype parameters: ~azure.ai.agents.models.ActivityFunctionParameters + """ + + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A description of what the function does, used by the model to choose when and how to call the + function.""" + parameters: "_models.ActivityFunctionParameters" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The parameters the functions accepts, described as a JSON Schema object. Required.""" + + @overload + def __init__( + self, + *, + parameters: "_models.ActivityFunctionParameters", + description: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ActivityFunctionParameters(_Model): + """The parameters used for activity function definition. + + :ivar type: The parameter type, it is always object. Required. Default value is "object". + :vartype type: str + :ivar properties: The dictionary of function arguments. Required. + :vartype properties: dict[str, ~azure.ai.agents.models.FunctionArgument] + :ivar required: The list of the required parameters. Required. + :vartype required: list[str] + :ivar additional_properties: If true the function has additional parameters. + :vartype additional_properties: bool + """ + + type: Literal["object"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The parameter type, it is always object. Required. Default value is \"object\".""" + properties: Dict[str, "_models.FunctionArgument"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The dictionary of function arguments. Required.""" + required: List[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The list of the required parameters. Required.""" + additional_properties: Optional[bool] = rest_field( + name="additionalProperties", visibility=["read", "create", "update", "delete", "query"] + ) + """If true the function has additional parameters.""" + + @overload + def __init__( + self, + *, + properties: Dict[str, "_models.FunctionArgument"], + required: List[str], + additional_properties: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.type: Literal["object"] = "object" + + class Agent(_Model): """Represents an agent that can call the model and use tools. @@ -1791,6 +1876,39 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) +class FunctionArgument(_Model): + """The function argument and description. + + :ivar type: The type of an argument, for example 'string' or 'number'. Required. + :vartype type: str + :ivar description: The argument description. + :vartype description: str + """ + + type: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The type of an argument, for example 'string' or 'number'. Required.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The argument description.""" + + @overload + def __init__( + self, + *, + type: str, + description: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + class FunctionDefinition(_Model): """The input definition information for a function. @@ -4021,7 +4139,7 @@ class RunStep(_Model): "thread.run.step". :vartype object: str :ivar type: The type of run step, which can be either message_creation or tool_calls. Required. - Known values are: "message_creation" and "tool_calls". + Known values are: "message_creation", "tool_calls", and "activities". :vartype type: str or ~azure.ai.agents.models.RunStepType :ivar agent_id: The ID of the agent associated with the run step. Required. :vartype agent_id: str @@ -4066,7 +4184,7 @@ class RunStep(_Model): \"thread.run.step\".""" type: Union[str, "_models.RunStepType"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The type of run step, which can be either message_creation or tool_calls. Required. Known - values are: \"message_creation\" and \"tool_calls\".""" + values are: \"message_creation\", \"tool_calls\", and \"activities\".""" agent_id: str = rest_field(name="assistant_id", visibility=["read", "create", "update", "delete", "query"]) """The ID of the agent associated with the run step. Required.""" thread_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -4143,6 +4261,76 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.object: Literal["thread.run.step"] = "thread.run.step" +class RunStepDetails(_Model): + """An abstract representation of the details for a run step. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepActivityDetails, RunStepMessageCreationDetails, RunStepToolCallDetails + + :ivar type: The object type. Required. Known values are: "message_creation", "tool_calls", and + "activities". + :vartype type: str or ~azure.ai.agents.models.RunStepType + """ + + __mapping__: Dict[str, _Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Known values are: \"message_creation\", \"tool_calls\", and + \"activities\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepActivityDetails(RunStepDetails, discriminator="activities"): + """The detailed information associated with a run step activities. + + :ivar type: The object type, which is always 'activities'. Required. Represents a run step with + activities information. + :vartype type: str or ~azure.ai.agents.models.ACTIVITIES + :ivar activities: A list of activities for this run step. Required. + :vartype activities: list[~azure.ai.agents.models.RunStepDetailsActivity] + """ + + type: Literal[RunStepType.ACTIVITIES] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'activities'. Required. Represents a run step with activities + information.""" + activities: List["_models.RunStepDetailsActivity"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """A list of activities for this run step. Required.""" + + @overload + def __init__( + self, + *, + activities: List["_models.RunStepDetailsActivity"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=RunStepType.ACTIVITIES, **kwargs) + + class RunStepToolCall(_Model): """An abstract representation of a detailed tool call as recorded within a run step for an existing run. @@ -5460,7 +5648,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class RunStepDeltaMcpToolCall(RunStepDeltaToolCall, discriminator="mcp"): - """Represents the function data in a streaming run step MCP call.*. + """Represents the function data in a streaming run step MCP call. :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. :vartype id: str @@ -5668,25 +5856,39 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, type="tool_calls", **kwargs) -class RunStepDetails(_Model): - """An abstract representation of the details for a run step. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepMessageCreationDetails, RunStepToolCallDetails +class RunStepDetailsActivity(_Model): + """Represents the list of activities, associated with the given step. - :ivar type: The object type. Required. Known values are: "message_creation" and "tool_calls". - :vartype type: str or ~azure.ai.agents.models.RunStepType + :ivar type: The activity type, which is always 'mcp_list_tools'. Required. Default value is + "mcp_list_tools". + :vartype type: str + :ivar id: The activity ID. Required. + :vartype id: str + :ivar server_label: Server label. Required. + :vartype server_label: str + :ivar tools: The supported function list. Required. + :vartype tools: dict[str, ~azure.ai.agents.models.ActivityFunctionDefinition] """ - __mapping__: Dict[str, _Model] = {} - type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) - """The object type. Required. Known values are: \"message_creation\" and \"tool_calls\".""" + type: Literal["mcp_list_tools"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The activity type, which is always 'mcp_list_tools'. Required. Default value is + \"mcp_list_tools\".""" + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The activity ID. Required.""" + server_label: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Server label. Required.""" + tools: Dict[str, "_models.ActivityFunctionDefinition"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The supported function list. Required.""" @overload def __init__( self, *, - type: str, + id: str, # pylint: disable=redefined-builtin + server_label: str, + tools: Dict[str, "_models.ActivityFunctionDefinition"], ) -> None: ... @overload @@ -5698,6 +5900,7 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) + self.type: Literal["mcp_list_tools"] = "mcp_list_tools" class RunStepError(_Model): diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/operations/_operations.py b/sdk/ai/azure-ai-agents/azure/ai/agents/operations/_operations.py index 5b2f46f1423f..b70f9c497553 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/operations/_operations.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/operations/_operations.py @@ -1368,7 +1368,7 @@ def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -1455,7 +1455,7 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -1510,7 +1510,7 @@ def get(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -1674,7 +1674,7 @@ def update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -1735,7 +1735,7 @@ def _delete_thread(self, thread_id: str, **kwargs: Any) -> _models._models.Threa except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -1943,7 +1943,7 @@ def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2095,7 +2095,7 @@ def get(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadM except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2256,7 +2256,7 @@ def update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2649,7 +2649,7 @@ def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2740,7 +2740,7 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -2798,7 +2798,7 @@ def get(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -2959,7 +2959,7 @@ def update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3132,7 +3132,7 @@ def submit_tool_outputs( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3196,7 +3196,7 @@ def cancel(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRu except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3295,7 +3295,7 @@ def get( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3397,7 +3397,7 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -3473,7 +3473,7 @@ def list( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3545,7 +3545,7 @@ def _upload_file(self, body: Union[_models._models.UploadFileRequest, JSON], **k except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3606,7 +3606,7 @@ def _delete_file(self, file_id: str, **kwargs: Any) -> _models._models.FileDelet except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3669,7 +3669,7 @@ def get(self, file_id: str, **kwargs: Any) -> _models.FileInfo: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -3730,7 +3730,7 @@ def _get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) deserialized = response.iter_bytes() @@ -3832,7 +3832,7 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -4004,7 +4004,7 @@ def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -4065,7 +4065,7 @@ def get(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -4227,7 +4227,7 @@ def modify( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -4289,7 +4289,7 @@ def _delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models._ except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -4405,7 +4405,7 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -4557,7 +4557,7 @@ def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -4621,7 +4621,7 @@ def get(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.Vect except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -4689,7 +4689,7 @@ def _delete_vector_store_file( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -4867,7 +4867,7 @@ def create( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -4931,7 +4931,7 @@ def get(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models.Vec except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -4996,7 +4996,7 @@ def cancel(self, vector_store_id: str, batch_id: str, **kwargs: Any) -> _models. except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -5096,7 +5096,7 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -5325,7 +5325,7 @@ def create_agent( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -5412,7 +5412,7 @@ def get_next(_continuation_token=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -5467,7 +5467,7 @@ def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -5714,7 +5714,7 @@ def update_agent( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -5775,7 +5775,7 @@ def _delete_agent(self, agent_id: str, **kwargs: Any) -> _models._models.AgentDe except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: @@ -6095,7 +6095,7 @@ def create_thread_and_run( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.AgentV1Error, response.json()) + error = _failsafe_deserialize(_models.AgentV1Error, response) raise HttpResponseError(response=response, model=error) if _stream: diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_enterprise_file_search_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_enterprise_file_search_async.py index bbd54d09696d..e5bb57d0ff22 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_enterprise_file_search_async.py @@ -86,10 +86,9 @@ async def main(): if msg.text_messages: last_text = msg.text_messages[-1].text.value for annotation in msg.text_messages[-1].text.annotations: - - citation = ( - vector_store_files.get( - annotation.file_citation.file_id, annotation.file_citation.file_id) + + citation = vector_store_files.get( + annotation.file_citation.file_id, annotation.file_citation.file_id ) last_text = last_text.replace(annotation.text, f" [{citation}]") print(f"{msg.role}: {last_text}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_mcp.py b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_mcp.py index 35f15babecca..02850212ef32 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_mcp.py +++ b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_mcp.py @@ -16,7 +16,7 @@ Before running the sample: - pip install azure-ai-projects azure-ai-agents azure-identity + pip install azure-ai-projects azure-ai-agents>=1.2.0b3 azure-identity Set these environment variables with your own values: 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview @@ -40,6 +40,7 @@ MessageDeltaTextContent, MessageDeltaTextUrlCitationAnnotation, RequiredMcpToolCall, + RunStepActivityDetails, SubmitToolApprovalAction, ToolApproval, ) @@ -83,14 +84,18 @@ print(f"Created thread, thread ID {thread.id}") message = agents_client.messages.create( - thread_id=thread.id, role=MessageRole.USER, content="Please summarize the Azure REST API specifications Readme" + thread_id=thread.id, + role=MessageRole.USER, + content="Please summarize the Azure REST API specifications Readme", ) print(f"Created message, message ID {message.id}") # Process Agent run and stream events back to the client. It may take a few minutes for the agent to complete the run. mcp_tool.update_headers("SuperSecret", "123456") # mcp_tool.set_approval_mode("never") # Uncomment to disable approval requirement - with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id, tool_resources=mcp_tool.resources) as stream: + with agents_client.runs.stream( + thread_id=thread.id, agent_id=agent.id, tool_resources=mcp_tool.resources + ) as stream: for event_type, event_data, _ in stream: @@ -155,6 +160,33 @@ elif isinstance(event_data, RunStep): print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + # Check if there are tool calls in the step details + step_details = event_data.get("step_details", {}) + tool_calls = step_details.get("tool_calls", []) + + if tool_calls: + print(" MCP Tool calls:") + for call in tool_calls: + print(f" Tool Call ID: {call.get('id')}") + print(f" Type: {call.get('type')}") + + if isinstance(step_details, RunStepActivityDetails): + for activity in step_details.activities: + for function_name, function_definition in activity.tools.items(): + print( + f' The function {function_name} with description "{function_definition.description}" will be called.:' + ) + if len(function_definition.parameters) > 0: + print(" Function parameters:") + for argument, func_argument in function_definition.parameters.properties.items(): + print(f" {argument}") + print(f" Type: {func_argument.type}") + print(f" Description: {func_argument.description}") + else: + print("This function has no parameters") + + print() # add an extra newline between steps + elif event_type == AgentStreamEvent.ERROR: print(f"An error occurred. Data: {event_data}") @@ -169,7 +201,9 @@ agents_client.delete_agent(agent.id) print("Deleted agent") - response_message = agents_client.messages.get_last_message_by_role(thread_id=thread.id, role=MessageRole.AGENT) + response_message = agents_client.messages.get_last_message_by_role( + thread_id=thread.id, role=MessageRole.AGENT + ) if response_message: for text_message in response_message.text_messages: print(f"Agent response: {text_message.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_enterprise_file_search.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_enterprise_file_search.py index 798db0bedf4b..6e75f59cc017 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_enterprise_file_search.py +++ b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_enterprise_file_search.py @@ -101,10 +101,7 @@ if msg.text_messages: last_text = msg.text_messages[-1].text.value for annotation in msg.text_messages[-1].text.annotations: - - citation = ( - vector_store_files.get( - annotation.file_citation.file_id, annotation.file_citation.file_id) - ) + + citation = vector_store_files.get(annotation.file_citation.file_id, annotation.file_citation.file_id) last_text = last_text.replace(annotation.text, f" [{citation}]") print(f"{msg.role}: {last_text}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_mcp.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_mcp.py index 10893dec8099..bbaa61d58212 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_mcp.py +++ b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_mcp.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure-ai-projects azure-ai-agents azure-identity --pre + pip install azure-ai-projects azure-ai-agents>=1.2.0b3 azure-identity --pre Set these environment variables with your own values: 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview @@ -29,7 +29,14 @@ import os, time from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import McpTool, RequiredMcpToolCall, SubmitToolApprovalAction, ToolApproval +from azure.ai.agents.models import ( + ListSortOrder, + McpTool, + RequiredMcpToolCall, + RunStepActivityDetails, + SubmitToolApprovalAction, + ToolApproval, +) # Get MCP server configuration from environment variables mcp_server_url = os.environ.get("MCP_SERVER_URL", "https://gitmcp.io/Azure/azure-rest-api-specs") @@ -86,7 +93,9 @@ # Create and process agent run in thread with MCP tools mcp_tool.update_headers("SuperSecret", "123456") # mcp_tool.set_approval_mode("never") # Uncomment to disable approval requirement - run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id, tool_resources=mcp_tool.resources) + run = agents_client.runs.create( + thread_id=thread.id, agent_id=agent.id, tool_resources=mcp_tool.resources + ) print(f"Created run, ID: {run.id}") while run.status in ["queued", "in_progress", "requires_action"]: @@ -145,10 +154,25 @@ print(f" Tool Call ID: {call.get('id')}") print(f" Type: {call.get('type')}") + if isinstance(step_details, RunStepActivityDetails): + for activity in step_details.activities: + for function_name, function_definition in activity.tools.items(): + print( + f' The function {function_name} with description "{function_definition.description}" will be called.:' + ) + if len(function_definition.parameters) > 0: + print(" Function parameters:") + for argument, func_argument in function_definition.parameters.properties.items(): + print(f" {argument}") + print(f" Type: {func_argument.type}") + print(f" Description: {func_argument.description}") + else: + print("This function has no parameters") + print() # add an extra newline between steps # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id) + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) print("\nConversation:") print("-" * 50) for msg in messages: diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_multiple_connected_agents.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_multiple_connected_agents.py index 87a97e6f2074..7e09b1504962 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_multiple_connected_agents.py +++ b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_multiple_connected_agents.py @@ -157,8 +157,7 @@ for tool_call in run_step.step_details.tool_calls: if isinstance(tool_call, RunStepConnectedAgentToolCall): print( - f"\tAgent: {tool_call.connected_agent.name} " - f"query: {tool_call.connected_agent.arguments} ", + f"\tAgent: {tool_call.connected_agent.name} " f"query: {tool_call.connected_agent.arguments} ", f"output: {tool_call.connected_agent.output}", ) # [END list_tool_calls] diff --git a/sdk/ai/azure-ai-agents/tests/conftest.py b/sdk/ai/azure-ai-agents/tests/conftest.py index 9b1e37bc06d7..dfc7b135408a 100644 --- a/sdk/ai/azure-ai-agents/tests/conftest.py +++ b/sdk/ai/azure-ai-agents/tests/conftest.py @@ -165,7 +165,6 @@ def azure_workspace_triad_sanitizer(): value="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.CognitiveServices/accounts/00000/projects/00000/connections/00000", ) - # Sanitize API key from service response (/tests/connections) add_body_key_sanitizer(json_path="properties.credentials.key", value="Sanitized") diff --git a/sdk/ai/azure-ai-agents/tests/test_agents_client.py b/sdk/ai/azure-ai-agents/tests/test_agents_client.py index ba46248000f9..d8b652dc5c36 100644 --- a/sdk/ai/azure-ai-agents/tests/test_agents_client.py +++ b/sdk/ai/azure-ai-agents/tests/test_agents_client.py @@ -86,7 +86,7 @@ TestAgentClientBase, agentClientPreparer, fetch_current_datetime_recordings, - fetch_current_datetime_live + fetch_current_datetime_live, ) # Statically defined user functions for fast reference @@ -2125,7 +2125,8 @@ def test_file_search_add_vector_store(self, **kwargs): print(f"Uploaded file, file ID: {openai_file.id}") openai_vectorstore = client.vector_stores.create_and_poll( - file_ids=[openai_file.id], name="my_vectorstore", polling_interval=self._sleep_time()) + file_ids=[openai_file.id], name="my_vectorstore", polling_interval=self._sleep_time() + ) print(f"Created vector store, vector store ID: {openai_vectorstore.id}") file_search.add_vector_store(openai_vectorstore.id) @@ -2256,8 +2257,7 @@ def _do_test_create_vector_store(self, streaming, **kwargs): ) ] vector_store = ai_client.vector_stores.create_and_poll( - file_ids=file_ids, data_sources=ds, name="my_vectorstore", - polling_interval=self._sleep_time() + file_ids=file_ids, data_sources=ds, name="my_vectorstore", polling_interval=self._sleep_time() ) assert vector_store.id self._test_file_search(ai_client, vector_store, file_id, streaming) @@ -2350,8 +2350,9 @@ def _do_test_create_vector_store_add_file(self, streaming, **kwargs): asset_identifier=kwargs["azure_ai_agents_tests_data_path"], asset_type="uri_asset", ) - vector_store = ai_client.vector_stores.create_and_poll(file_ids=[], name="sample_vector_store", - polling_interval=self._sleep_time()) + vector_store = ai_client.vector_stores.create_and_poll( + file_ids=[], name="sample_vector_store", polling_interval=self._sleep_time() + ) assert vector_store.id vector_store_file = ai_client.vector_store_files.create( vector_store_id=vector_store.id, data_source=ds, file_id=file_id @@ -2404,12 +2405,12 @@ def _do_test_create_vector_store_batch(self, streaming, **kwargs): asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = ai_client.vector_stores.create_and_poll(file_ids=[], name="sample_vector_store", - polling_interval=self._sleep_time()) + vector_store = ai_client.vector_stores.create_and_poll( + file_ids=[], name="sample_vector_store", polling_interval=self._sleep_time() + ) assert vector_store.id vector_store_file_batch = ai_client.vector_store_file_batches.create_and_poll( - vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids, - polling_interval=self._sleep_time() + vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids, polling_interval=self._sleep_time() ) assert vector_store_file_batch.id self._test_file_search(ai_client, vector_store, file_id, streaming) @@ -2780,7 +2781,7 @@ def test_azure_ai_search_tool(self, **kwargs): """Test using the AzureAISearchTool with an agent.""" azure_search_tool = self._get_azure_ai_search_tool(**kwargs) with self.create_client(by_endpoint=True, **kwargs) as client: - assert isinstance(client, AgentsClient) + assert isinstance(client, AgentsClient) self._do_test_tool( client=client, @@ -2793,7 +2794,7 @@ def test_azure_ai_search_tool(self, **kwargs): uri_annotation=MessageTextUrlCitationDetails( url="www.microsoft.com", title="product_info_7.md", - ) + ), ) @agentClientPreparer() @@ -2813,7 +2814,7 @@ def test_azure_ai_search_tool_streaming(self, **kwargs): uri_annotation=MessageTextUrlCitationDetails( url="www.microsoft.com", title="product_info_7.md", - ) + ), ) @agentClientPreparer() @@ -2840,7 +2841,8 @@ def _do_test_include_file_search_results(self, use_stream, include_content, **kw ) ] vector_store = ai_client.vector_stores.create_and_poll( - file_ids=[], data_sources=ds, name="my_vectorstore", polling_interval=self._sleep_time()) + file_ids=[], data_sources=ds, name="my_vectorstore", polling_interval=self._sleep_time() + ) # vector_store = await ai_client.vector_stores.get('vs_M9oxKG7JngORHcYNBGVZ6Iz3') assert vector_store.id @@ -3280,8 +3282,7 @@ def _get_file_search_tool_and_file_id(self, client, **kwargs): ) ] vector_store = client.vector_stores.create_and_poll( - data_sources=ds, name="my_vectorstore", - polling_interval=self._sleep_time() + data_sources=ds, name="my_vectorstore", polling_interval=self._sleep_time() ) file_id = None for fle in client.vector_store_files.list(vector_store.id): @@ -3312,8 +3313,8 @@ def test_file_search_tool(self, **kwargs): text="test", file_citation=MessageTextFileCitationDetails( file_id=file_id, - ) - ) + ), + ), ) finally: client.vector_stores.delete(file_search_tool.resources.file_search.vector_store_ids[0]) @@ -3340,21 +3341,23 @@ def test_file_search_tool_streaming(self, **kwargs): text="test", file_citation=MessageTextFileCitationDetails( file_id=file_id, - ) - ) + ), + ), ) finally: client.vector_stores.delete(file_search_tool.resources.file_search.vector_store_ids[0]) def _get_open_api_tool(self): """Helper method to get the openAPI tool.""" - weather_asset_file_path = os.path.join( - os.path.dirname(__file__), "assets", "weather_openapi.json") + weather_asset_file_path = os.path.join(os.path.dirname(__file__), "assets", "weather_openapi.json") auth = OpenApiAnonymousAuthDetails() with open(weather_asset_file_path, "r") as f: openapi_weather = jsonref.load(f) return OpenApiTool( - name="get_weather", spec=openapi_weather, description="Retrieve weather information for a location", auth=auth + name="get_weather", + spec=openapi_weather, + description="Retrieve weather information for a location", + auth=auth, ) @agentClientPreparer() @@ -3397,7 +3400,7 @@ def test_bing_grounding_tool(self, **kwargs): """Test Bing grounding tool call in non-streaming Scenario.""" with self.create_client(**kwargs, by_endpoint=True) as client: model_name = "gpt-4o" - openapi_tool = BingGroundingTool(connection_id=kwargs.get('azure_ai_agents_tests_bing_connection_id')) + openapi_tool = BingGroundingTool(connection_id=kwargs.get("azure_ai_agents_tests_bing_connection_id")) self._do_test_tool( client=client, @@ -3409,7 +3412,7 @@ def test_bing_grounding_tool(self, **kwargs): uri_annotation=MessageTextUrlCitationDetails( url="*", title="*", - ) + ), ) @agentClientPreparer() @@ -3418,7 +3421,7 @@ def test_bing_grounding_tool_streaming(self, **kwargs): """Test Bing grounding tool call in streaming Scenario.""" with self.create_client(**kwargs, by_endpoint=True) as client: model_name = "gpt-4o" - openapi_tool = BingGroundingTool(connection_id=kwargs.get('azure_ai_agents_tests_bing_connection_id')) + openapi_tool = BingGroundingTool(connection_id=kwargs.get("azure_ai_agents_tests_bing_connection_id")) self._do_test_tool_streaming( client=client, @@ -3430,7 +3433,7 @@ def test_bing_grounding_tool_streaming(self, **kwargs): uri_annotation=MessageTextUrlCitationDetails( url="*", title="*", - ) + ), ) def _do_test_tool( @@ -3447,7 +3450,8 @@ def _do_test_tool( minimal_text_length=1, uri_annotation=None, file_annotation=None, - **kwargs): + **kwargs, + ): """ The helper method to test the non-interactive tools in the non-streaming scenarios. @@ -3513,7 +3517,7 @@ def _do_test_tool( text = "\n".join([t.text.value.lower() for t in text_messages]) if specific_message_text: assert specific_message_text in text, f"{specific_message_text} was not found in {text}." - + # Search for the specific URL and title in the message annotation. if uri_annotation is not None: has_annotation = False @@ -3522,14 +3526,14 @@ def _do_test_tool( if has_annotation: break assert has_annotation, f"The annotation [{uri_annotation.title}]({uri_annotation.url}) was not found." - + # Search for the file annotation. if file_annotation: has_annotation = False for message in agent_messages: has_annotation = self._has_file_annotation(message, file_annotation) if has_annotation: - break + break assert has_annotation, f"The annotation {file_annotation} was not found." if expected_class is not None: @@ -3610,11 +3614,15 @@ def _do_test_tool_streaming( if event_data.role == MessageRole.AGENT: # Search for the specific URL and title in the message annotation. if not has_uri_annotation: - has_uri_annotation = has_uri_annotation or self._has_url_annotation(event_data, uri_annotation) - + has_uri_annotation = has_uri_annotation or self._has_url_annotation( + event_data, uri_annotation + ) + # Search for the file annotation. if not has_file_annotation: - has_file_annotation = has_file_annotation or self._has_file_annotation(event_data, file_annotation) + has_file_annotation = has_file_annotation or self._has_file_annotation( + event_data, file_annotation + ) elif isinstance(event_data, RunStepDeltaChunk): if expected_delta_class is not None: @@ -3646,7 +3654,9 @@ def _do_test_tool_streaming( assert is_completed, "The stream was not completed." assert is_run_step_created, "No run steps were created." - assert has_uri_annotation, f"The annotation [{uri_annotation.title}]({uri_annotation.url}) was not found." + assert ( + has_uri_annotation + ), f"The annotation [{uri_annotation.title}]({uri_annotation.url}) was not found." assert has_file_annotation, f"The annotation {file_annotation} was not found." # Assertions on messages messages = list(client.messages.list(thread_id=thread.id)) diff --git a/sdk/ai/azure-ai-agents/tests/test_agents_client_async.py b/sdk/ai/azure-ai-agents/tests/test_agents_client_async.py index ce0dea9945e8..a7604629bbd2 100644 --- a/sdk/ai/azure-ai-agents/tests/test_agents_client_async.py +++ b/sdk/ai/azure-ai-agents/tests/test_agents_client_async.py @@ -78,7 +78,7 @@ TestAgentClientBase, agentClientPreparer, fetch_current_datetime_recordings, - fetch_current_datetime_live + fetch_current_datetime_live, ) # TODO clean this up / get rid of anything not in use @@ -2150,8 +2150,7 @@ async def _do_test_create_vector_store(self, streaming, **kwargs): ) ] vector_store = await ai_client.vector_stores.create_and_poll( - file_ids=file_ids, data_sources=ds, name="my_vectorstore", - polling_interval=self._sleep_time() + file_ids=file_ids, data_sources=ds, name="my_vectorstore", polling_interval=self._sleep_time() ) assert vector_store.id await self._test_file_search(ai_client, vector_store, file_id, streaming) @@ -2198,7 +2197,8 @@ async def _do_test_create_vector_store_add_file(self, streaming, **kwargs): asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) vector_store = await ai_client.vector_stores.create_and_poll( - file_ids=[], name="sample_vector_store", polling_interval=self._sleep_time()) + file_ids=[], name="sample_vector_store", polling_interval=self._sleep_time() + ) assert vector_store.id vector_store_file = await ai_client.vector_store_files.create( vector_store_id=vector_store.id, data_source=ds, file_id=file_id @@ -2252,11 +2252,11 @@ async def _do_test_create_vector_store_batch(self, streaming, **kwargs): ) ] vector_store = await ai_client.vector_stores.create_and_poll( - file_ids=[], name="sample_vector_store", polling_interval=self._sleep_time()) + file_ids=[], name="sample_vector_store", polling_interval=self._sleep_time() + ) assert vector_store.id vector_store_file_batch = await ai_client.vector_store_file_batches.create_and_poll( - vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids, - polling_interval=self._sleep_time() + vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids, polling_interval=self._sleep_time() ) assert vector_store_file_batch.id await self._test_file_search(ai_client, vector_store, file_id, streaming) @@ -2778,8 +2778,7 @@ async def _do_test_include_file_search_results(self, use_stream, include_content ) ] vector_store = await ai_client.vector_stores.create_and_poll( - file_ids=[], data_sources=ds, name="my_vectorstore", - polling_interval=self._sleep_time() + file_ids=[], data_sources=ds, name="my_vectorstore", polling_interval=self._sleep_time() ) # vector_store = await ai_client.vector_stores.get('vs_M9oxKG7JngORHcYNBGVZ6Iz3') assert vector_store.id @@ -2957,7 +2956,7 @@ async def test_azure_ai_search_tool(self, **kwargs): """Test using the AzureAISearchTool with an agent.""" azure_search_tool = self._get_azure_ai_search_tool(**kwargs) async with self.create_client(by_endpoint=True, **kwargs) as client: - assert isinstance(client, AgentsClient) + assert isinstance(client, AgentsClient) await self._do_test_tool( client=client, @@ -2970,7 +2969,7 @@ async def test_azure_ai_search_tool(self, **kwargs): uri_annotation=MessageTextUrlCitationDetails( url="www.microsoft.com", title="product_info_7.md", - ) + ), ) @agentClientPreparer() @@ -2990,7 +2989,7 @@ async def test_azure_ai_search_tool_streaming(self, **kwargs): uri_annotation=MessageTextUrlCitationDetails( url="www.microsoft.com", title="product_info_7.md", - ) + ), ) @agentClientPreparer() @@ -3093,7 +3092,7 @@ async def test_connected_agent_tool(self, **kwargs): ) finally: await client.delete_agent(connected_agent.connected_agent.id) - + async def _get_file_search_tool_and_file_id(self, client, **kwargs): """Helper method to get the file search tool.""" ds = [ @@ -3134,8 +3133,8 @@ async def test_file_search_tool(self, **kwargs): text="test", file_citation=MessageTextFileCitationDetails( file_id=file_id, - ) - ) + ), + ), ) finally: await client.vector_stores.delete(file_search_tool.resources.file_search.vector_store_ids[0]) @@ -3162,21 +3161,23 @@ async def test_file_search_tool_streaming(self, **kwargs): text="test", file_citation=MessageTextFileCitationDetails( file_id=file_id, - ) - ) + ), + ), ) finally: await client.vector_stores.delete(file_search_tool.resources.file_search.vector_store_ids[0]) def _get_open_api_tool(self): """Helper method to get the openAPI tool.""" - weather_asset_file_path = os.path.join( - os.path.dirname(__file__), "assets", "weather_openapi.json") + weather_asset_file_path = os.path.join(os.path.dirname(__file__), "assets", "weather_openapi.json") auth = OpenApiAnonymousAuthDetails() with open(weather_asset_file_path, "r") as f: openapi_weather = jsonref.load(f) return OpenApiTool( - name="get_weather", spec=openapi_weather, description="Retrieve weather information for a location", auth=auth + name="get_weather", + spec=openapi_weather, + description="Retrieve weather information for a location", + auth=auth, ) @agentClientPreparer() @@ -3219,7 +3220,7 @@ async def test_bing_grounding_tool(self, **kwargs): """Test Bing grounding tool call in non-streaming Scenario.""" async with self.create_client(by_endpoint=True, **kwargs) as client: model_name = "gpt-4o" - openapi_tool = BingGroundingTool(connection_id=kwargs.get('azure_ai_agents_tests_bing_connection_id')) + openapi_tool = BingGroundingTool(connection_id=kwargs.get("azure_ai_agents_tests_bing_connection_id")) await self._do_test_tool( client=client, @@ -3231,7 +3232,7 @@ async def test_bing_grounding_tool(self, **kwargs): uri_annotation=MessageTextUrlCitationDetails( url="*", title="*", - ) + ), ) @agentClientPreparer() @@ -3240,7 +3241,7 @@ async def test_bing_grounding_tool_streaming(self, **kwargs): """Test Bing grounding tool call in streaming Scenario.""" async with self.create_client(by_endpoint=True, **kwargs) as client: model_name = "gpt-4o" - openapi_tool = BingGroundingTool(connection_id=kwargs.get('azure_ai_agents_tests_bing_connection_id')) + openapi_tool = BingGroundingTool(connection_id=kwargs.get("azure_ai_agents_tests_bing_connection_id")) await self._do_test_tool_streaming( client=client, @@ -3252,7 +3253,7 @@ async def test_bing_grounding_tool_streaming(self, **kwargs): uri_annotation=MessageTextUrlCitationDetails( url="*", title="*", - ) + ), ) async def _do_test_tool( @@ -3345,7 +3346,7 @@ async def _do_test_tool( if has_annotation: break assert has_annotation, f"The annotation [{uri_annotation.title}]({uri_annotation.url}) was not found." - + # Search for the file annotation. if file_annotation: has_annotation = False @@ -3427,16 +3428,20 @@ async def _do_test_tool_streaming( if isinstance(event_data, MessageDeltaChunk): received_message = True - + elif isinstance(event_data, ThreadMessage): if event_data.role == MessageRole.AGENT: # Search for the specific URL and title in the message annotation. if not has_uri_annotation: - has_uri_annotation = has_uri_annotation or self._has_url_annotation(event_data, uri_annotation) - + has_uri_annotation = has_uri_annotation or self._has_url_annotation( + event_data, uri_annotation + ) + # Search for the file annotation. if not has_file_annotation: - has_file_annotation = has_file_annotation or self._has_file_annotation(event_data, file_annotation) + has_file_annotation = has_file_annotation or self._has_file_annotation( + event_data, file_annotation + ) elif isinstance(event_data, RunStepDeltaChunk): if expected_delta_class is not None: @@ -3467,8 +3472,10 @@ async def _do_test_tool_streaming( assert got_expected_delta, f"The delta tool call of type {expected_delta_class} was not found." assert is_completed, "The stream was not completed." assert is_run_step_created, "No run steps were created." - - assert has_uri_annotation, f"The annotation [{uri_annotation.title}]({uri_annotation.url}) was not found." + + assert ( + has_uri_annotation + ), f"The annotation [{uri_annotation.title}]({uri_annotation.url}) was not found." assert has_file_annotation, f"The annotation {file_annotation} was not found." messages = [message async for message in client.messages.list(thread_id=thread.id)] assert len(messages) > 1 diff --git a/sdk/ai/azure-ai-agents/tests/test_agents_client_base.py b/sdk/ai/azure-ai-agents/tests/test_agents_client_base.py index 7c972b498c2c..d77fb9a28271 100644 --- a/sdk/ai/azure-ai-agents/tests/test_agents_client_base.py +++ b/sdk/ai/azure-ai-agents/tests/test_agents_client_base.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression import sys import logging import functools @@ -79,11 +80,7 @@ def _sleep_time(cls, sleep: int = 1) -> int: return sleep if is_live() else 0 @classmethod - def _has_url_annotation( - cls, - message: ThreadMessage, - uri_annotation: MessageTextUrlCitationDetails - ) -> bool: + def _has_url_annotation(cls, message: ThreadMessage, uri_annotation: MessageTextUrlCitationDetails) -> bool: """ Return True if the message contains required URL annotation. @@ -95,17 +92,17 @@ def _has_url_annotation( url_annotations = message.url_citation_annotations if url_annotations: for url in url_annotations: - if ((uri_annotation.url == '*' and url.url_citation.url) or url.url_citation.url == uri_annotation.url) and\ - ((uri_annotation.title == '*' and url.url_citation.title) or url.url_citation.title == uri_annotation.title): + if ( + (uri_annotation.url == "*" and url.url_citation.url) or url.url_citation.url == uri_annotation.url + ) and ( + (uri_annotation.title == "*" and url.url_citation.title) + or url.url_citation.title == uri_annotation.title + ): return True return False @classmethod - def _has_file_annotation( - cls, - message: ThreadMessage, - file_annotation: MessageTextFileCitationDetails - ) -> bool: + def _has_file_annotation(cls, message: ThreadMessage, file_annotation: MessageTextFileCitationDetails) -> bool: """ Return True if the message contains required file annotation @@ -129,4 +126,4 @@ def _validate_run_step_browser_automation_tool_call(cls, tool_call: RunStepBrows assert len(tool_call.browser_automation.steps) > 1 assert tool_call.browser_automation.steps[0].last_step_result assert tool_call.browser_automation.steps[0].current_state - assert tool_call.browser_automation.steps[0].next_step \ No newline at end of file + assert tool_call.browser_automation.steps[0].next_step diff --git a/sdk/ai/azure-ai-agents/tsp-location.yaml b/sdk/ai/azure-ai-agents/tsp-location.yaml index b88118a192cc..f00316254952 100644 --- a/sdk/ai/azure-ai-agents/tsp-location.yaml +++ b/sdk/ai/azure-ai-agents/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Agents -commit: 511fb938a3f30e21347f0dd27e63f78e3ce42e5e +commit: 5470a13587ba49e4fb0980073ca6ba468ef837c3 repo: Azure/azure-rest-api-specs additionalDirectories: