diff --git a/sdk/ai/azure-ai-projects/_meta.json b/sdk/ai/azure-ai-projects/_meta.json new file mode 100644 index 000000000000..a44cebf1dfae --- /dev/null +++ b/sdk/ai/azure-ai-projects/_meta.json @@ -0,0 +1,6 @@ +{ + "commit": "799efd99712203ab387f69ec3d55378d36bc62ea", + "repository_url": "https://github.com/Azure/azure-rest-api-specs", + "typespec_src": "specification/ai/Azure.AI.Projects", + "@azure-tools/typespec-python": "0.38.4" +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/apiview-properties.json b/sdk/ai/azure-ai-projects/apiview-properties.json new file mode 100644 index 000000000000..717fcc10068b --- /dev/null +++ b/sdk/ai/azure-ai-projects/apiview-properties.json @@ -0,0 +1,253 @@ +{ + "CrossLanguagePackageId": "Azure.AI.Projects", + "CrossLanguageDefinitionId": { + "azure.ai.projects.models.Agent": "Azure.AI.Projects.Agents.Agent", + "azure.ai.projects.models.AgentDeletionStatus": "Azure.AI.Projects.Agents.AgentDeletionStatus", + "azure.ai.projects.models.AgentsApiResponseFormat": "Azure.AI.Projects.Agents.AgentsApiResponseFormat", + "azure.ai.projects.models.AgentsNamedToolChoice": "Azure.AI.Projects.Agents.AgentsNamedToolChoice", + "azure.ai.projects.models.AgentThread": "Azure.AI.Projects.Agents.AgentThread", + "azure.ai.projects.models.AgentThreadCreationOptions": "Azure.AI.Projects.Agents.AgentThreadCreationOptions", + "azure.ai.projects.models.InputData": "Azure.AI.Projects.InputData", + "azure.ai.projects.models.ApplicationInsightsConfiguration": "Azure.AI.Projects.ApplicationInsightsConfiguration", + "azure.ai.projects.models.AzureAISearchResource": "Azure.AI.Projects.Agents.AzureAISearchResource", + "azure.ai.projects.models.ToolDefinition": "Azure.AI.Projects.Agents.ToolDefinition", + "azure.ai.projects.models.AzureAISearchToolDefinition": "Azure.AI.Projects.Agents.AzureAISearchToolDefinition", + "azure.ai.projects.models.AzureFunctionBinding": "Azure.AI.Projects.Agents.AzureFunctionBinding", + "azure.ai.projects.models.AzureFunctionDefinition": "Azure.AI.Projects.Agents.AzureFunctionDefinition", + "azure.ai.projects.models.AzureFunctionStorageQueue": "Azure.AI.Projects.Agents.AzureFunctionStorageQueue", + "azure.ai.projects.models.AzureFunctionToolDefinition": "Azure.AI.Projects.Agents.AzureFunctionToolDefinition", + "azure.ai.projects.models.BingGroundingToolDefinition": "Azure.AI.Projects.Agents.BingGroundingToolDefinition", + "azure.ai.projects.models.CodeInterpreterToolDefinition": "Azure.AI.Projects.Agents.CodeInterpreterToolDefinition", + "azure.ai.projects.models.CodeInterpreterToolResource": "Azure.AI.Projects.Agents.CodeInterpreterToolResource", + "azure.ai.projects.models.Trigger": "Azure.AI.Projects.Trigger", + "azure.ai.projects.models.CronTrigger": "Azure.AI.Projects.CronTrigger", + "azure.ai.projects.models.Dataset": "Azure.AI.Projects.Dataset", + "azure.ai.projects.models.Evaluation": "Azure.AI.Projects.Evaluation", + "azure.ai.projects.models.EvaluationSchedule": "Azure.AI.Projects.EvaluationSchedule", + "azure.ai.projects.models.EvaluatorConfiguration": "Azure.AI.Projects.EvaluatorConfiguration", + "azure.ai.projects.models.FileDeletionStatus": "Azure.AI.Projects.Agents.FileDeletionStatus", + "azure.ai.projects.models.FileListResponse": "Azure.AI.Projects.Agents.FileListResponse", + "azure.ai.projects.models.FileSearchRankingOptions": "Azure.AI.Projects.Agents.FileSearchRankingOptions", + "azure.ai.projects.models.FileSearchToolCallContent": "Azure.AI.Projects.Agents.FileSearchToolCallContent", + "azure.ai.projects.models.FileSearchToolDefinition": "Azure.AI.Projects.Agents.FileSearchToolDefinition", + "azure.ai.projects.models.FileSearchToolDefinitionDetails": "Azure.AI.Projects.Agents.FileSearchToolDefinitionDetails", + "azure.ai.projects.models.FileSearchToolResource": "Azure.AI.Projects.Agents.FileSearchToolResource", + "azure.ai.projects.models.FunctionDefinition": "Azure.AI.Projects.Agents.FunctionDefinition", + "azure.ai.projects.models.FunctionName": "Azure.AI.Projects.Agents.FunctionName", + "azure.ai.projects.models.FunctionToolDefinition": "Azure.AI.Projects.Agents.FunctionToolDefinition", + "azure.ai.projects.models.IncompleteRunDetails": "Azure.AI.Projects.Agents.IncompleteRunDetails", + "azure.ai.projects.models.IndexResource": "Azure.AI.Projects.Agents.IndexResource", + "azure.ai.projects.models.MessageAttachment": "Azure.AI.Projects.Agents.MessageAttachment", + "azure.ai.projects.models.MessageContent": "Azure.AI.Projects.Agents.MessageContent", + "azure.ai.projects.models.MessageDelta": "Azure.AI.Projects.Agents.MessageDelta", + "azure.ai.projects.models.MessageDeltaChunk": "Azure.AI.Projects.Agents.MessageDeltaChunk", + "azure.ai.projects.models.MessageDeltaContent": "Azure.AI.Projects.Agents.MessageDeltaContent", + "azure.ai.projects.models.MessageDeltaImageFileContent": "Azure.AI.Projects.Agents.MessageDeltaImageFileContent", + "azure.ai.projects.models.MessageDeltaImageFileContentObject": "Azure.AI.Projects.Agents.MessageDeltaImageFileContentObject", + "azure.ai.projects.models.MessageDeltaTextAnnotation": "Azure.AI.Projects.Agents.MessageDeltaTextAnnotation", + "azure.ai.projects.models.MessageDeltaTextContent": "Azure.AI.Projects.Agents.MessageDeltaTextContent", + "azure.ai.projects.models.MessageDeltaTextContentObject": "Azure.AI.Projects.Agents.MessageDeltaTextContentObject", + "azure.ai.projects.models.MessageDeltaTextFileCitationAnnotation": "Azure.AI.Projects.Agents.MessageDeltaTextFileCitationAnnotation", + "azure.ai.projects.models.MessageDeltaTextFileCitationAnnotationObject": "Azure.AI.Projects.Agents.MessageDeltaTextFileCitationAnnotationObject", + "azure.ai.projects.models.MessageDeltaTextFilePathAnnotation": "Azure.AI.Projects.Agents.MessageDeltaTextFilePathAnnotation", + "azure.ai.projects.models.MessageDeltaTextFilePathAnnotationObject": "Azure.AI.Projects.Agents.MessageDeltaTextFilePathAnnotationObject", + "azure.ai.projects.models.MessageDeltaTextUrlCitationAnnotation": "Azure.AI.Projects.Agents.MessageDeltaTextUrlCitationAnnotation", + "azure.ai.projects.models.MessageDeltaTextUrlCitationDetails": "Azure.AI.Projects.Agents.MessageDeltaTextUrlCitationDetails", + "azure.ai.projects.models.MessageImageFileContent": "Azure.AI.Projects.Agents.MessageImageFileContent", + "azure.ai.projects.models.MessageImageFileDetails": "Azure.AI.Projects.Agents.MessageImageFileDetails", + "azure.ai.projects.models.MessageIncompleteDetails": "Azure.AI.Projects.Agents.MessageIncompleteDetails", + "azure.ai.projects.models.MessageTextAnnotation": "Azure.AI.Projects.Agents.MessageTextAnnotation", + "azure.ai.projects.models.MessageTextContent": "Azure.AI.Projects.Agents.MessageTextContent", + "azure.ai.projects.models.MessageTextDetails": "Azure.AI.Projects.Agents.MessageTextDetails", + "azure.ai.projects.models.MessageTextFileCitationAnnotation": "Azure.AI.Projects.Agents.MessageTextFileCitationAnnotation", + "azure.ai.projects.models.MessageTextFileCitationDetails": "Azure.AI.Projects.Agents.MessageTextFileCitationDetails", + "azure.ai.projects.models.MessageTextFilePathAnnotation": "Azure.AI.Projects.Agents.MessageTextFilePathAnnotation", + "azure.ai.projects.models.MessageTextFilePathDetails": "Azure.AI.Projects.Agents.MessageTextFilePathDetails", + "azure.ai.projects.models.MessageTextUrlCitationAnnotation": "Azure.AI.Projects.Agents.MessageTextUrlCitationAnnotation", + "azure.ai.projects.models.MessageTextUrlCitationDetails": "Azure.AI.Projects.Agents.MessageTextUrlCitationDetails", + "azure.ai.projects.models.MicrosoftFabricToolDefinition": "Azure.AI.Projects.Agents.MicrosoftFabricToolDefinition", + "azure.ai.projects.models.OpenAIFile": "Azure.AI.Projects.Agents.OpenAIFile", + "azure.ai.projects.models.OpenAIPageableListOfAgent": "Azure.AI.Projects.Agents.OpenAIPageableListOf", + "azure.ai.projects.models.OpenAIPageableListOfRunStep": "Azure.AI.Projects.Agents.OpenAIPageableListOf", + "azure.ai.projects.models.OpenAIPageableListOfThreadMessage": "Azure.AI.Projects.Agents.OpenAIPageableListOf", + "azure.ai.projects.models.OpenAIPageableListOfThreadRun": "Azure.AI.Projects.Agents.OpenAIPageableListOf", + "azure.ai.projects.models.OpenAIPageableListOfVectorStore": "Azure.AI.Projects.Agents.OpenAIPageableListOf", + "azure.ai.projects.models.OpenAIPageableListOfVectorStoreFile": "Azure.AI.Projects.Agents.OpenAIPageableListOf", + "azure.ai.projects.models.OpenApiAuthDetails": "Azure.AI.Projects.Agents.OpenApiAuthDetails", + "azure.ai.projects.models.OpenApiAnonymousAuthDetails": "Azure.AI.Projects.Agents.OpenApiAnonymousAuthDetails", + "azure.ai.projects.models.OpenApiConnectionAuthDetails": "Azure.AI.Projects.Agents.OpenApiConnectionAuthDetails", + "azure.ai.projects.models.OpenApiConnectionSecurityScheme": "Azure.AI.Projects.Agents.OpenApiConnectionSecurityScheme", + "azure.ai.projects.models.OpenApiFunctionDefinition": "Azure.AI.Projects.Agents.OpenApiFunctionDefinition", + "azure.ai.projects.models.OpenApiManagedAuthDetails": "Azure.AI.Projects.Agents.OpenApiManagedAuthDetails", + "azure.ai.projects.models.OpenApiManagedSecurityScheme": "Azure.AI.Projects.Agents.OpenApiManagedSecurityScheme", + "azure.ai.projects.models.OpenApiToolDefinition": "Azure.AI.Projects.Agents.OpenApiToolDefinition", + "azure.ai.projects.models.RecurrenceSchedule": "Azure.AI.Projects.RecurrenceSchedule", + "azure.ai.projects.models.RecurrenceTrigger": "Azure.AI.Projects.RecurrenceTrigger", + "azure.ai.projects.models.RequiredAction": "Azure.AI.Projects.Agents.RequiredAction", + "azure.ai.projects.models.RequiredToolCall": "Azure.AI.Projects.Agents.RequiredToolCall", + "azure.ai.projects.models.RequiredFunctionToolCall": "Azure.AI.Projects.Agents.RequiredFunctionToolCall", + "azure.ai.projects.models.RequiredFunctionToolCallDetails": "Azure.AI.Projects.Agents.RequiredFunctionToolCallDetails", + "azure.ai.projects.models.ResponseFormatJsonSchema": "Azure.AI.Projects.Agents.ResponseFormatJsonSchema", + "azure.ai.projects.models.ResponseFormatJsonSchemaType": "Azure.AI.Projects.Agents.ResponseFormatJsonSchemaType", + "azure.ai.projects.models.RunCompletionUsage": "Azure.AI.Projects.Agents.RunCompletionUsage", + "azure.ai.projects.models.RunError": "Azure.AI.Projects.Agents.RunError", + "azure.ai.projects.models.RunStep": "Azure.AI.Projects.Agents.RunStep", + "azure.ai.projects.models.RunStepToolCall": "Azure.AI.Projects.Agents.RunStepToolCall", + "azure.ai.projects.models.RunStepAzureAISearchToolCall": "Azure.AI.Projects.Agents.RunStepAzureAISearchToolCall", + "azure.ai.projects.models.RunStepBingGroundingToolCall": "Azure.AI.Projects.Agents.RunStepBingGroundingToolCall", + "azure.ai.projects.models.RunStepCodeInterpreterToolCallOutput": "Azure.AI.Projects.Agents.RunStepCodeInterpreterToolCallOutput", + "azure.ai.projects.models.RunStepCodeInterpreterImageOutput": "Azure.AI.Projects.Agents.RunStepCodeInterpreterImageOutput", + "azure.ai.projects.models.RunStepCodeInterpreterImageReference": "Azure.AI.Projects.Agents.RunStepCodeInterpreterImageReference", + "azure.ai.projects.models.RunStepCodeInterpreterLogOutput": "Azure.AI.Projects.Agents.RunStepCodeInterpreterLogOutput", + "azure.ai.projects.models.RunStepCodeInterpreterToolCall": "Azure.AI.Projects.Agents.RunStepCodeInterpreterToolCall", + "azure.ai.projects.models.RunStepCodeInterpreterToolCallDetails": "Azure.AI.Projects.Agents.RunStepCodeInterpreterToolCallDetails", + "azure.ai.projects.models.RunStepCompletionUsage": "Azure.AI.Projects.Agents.RunStepCompletionUsage", + "azure.ai.projects.models.RunStepDelta": "Azure.AI.Projects.Agents.RunStepDelta", + "azure.ai.projects.models.RunStepDeltaChunk": "Azure.AI.Projects.Agents.RunStepDeltaChunk", + "azure.ai.projects.models.RunStepDeltaCodeInterpreterDetailItemObject": "Azure.AI.Projects.Agents.RunStepDeltaCodeInterpreterDetailItemObject", + "azure.ai.projects.models.RunStepDeltaCodeInterpreterOutput": "Azure.AI.Projects.Agents.RunStepDeltaCodeInterpreterOutput", + "azure.ai.projects.models.RunStepDeltaCodeInterpreterImageOutput": "Azure.AI.Projects.Agents.RunStepDeltaCodeInterpreterImageOutput", + "azure.ai.projects.models.RunStepDeltaCodeInterpreterImageOutputObject": "Azure.AI.Projects.Agents.RunStepDeltaCodeInterpreterImageOutputObject", + "azure.ai.projects.models.RunStepDeltaCodeInterpreterLogOutput": "Azure.AI.Projects.Agents.RunStepDeltaCodeInterpreterLogOutput", + "azure.ai.projects.models.RunStepDeltaToolCall": "Azure.AI.Projects.Agents.RunStepDeltaToolCall", + "azure.ai.projects.models.RunStepDeltaCodeInterpreterToolCall": "Azure.AI.Projects.Agents.RunStepDeltaCodeInterpreterToolCall", + "azure.ai.projects.models.RunStepDeltaDetail": "Azure.AI.Projects.Agents.RunStepDeltaDetail", + "azure.ai.projects.models.RunStepDeltaFileSearchToolCall": "Azure.AI.Projects.Agents.RunStepDeltaFileSearchToolCall", + "azure.ai.projects.models.RunStepDeltaFunction": "Azure.AI.Projects.Agents.RunStepDeltaFunction", + "azure.ai.projects.models.RunStepDeltaFunctionToolCall": "Azure.AI.Projects.Agents.RunStepDeltaFunctionToolCall", + "azure.ai.projects.models.RunStepDeltaMessageCreation": "Azure.AI.Projects.Agents.RunStepDeltaMessageCreation", + "azure.ai.projects.models.RunStepDeltaMessageCreationObject": "Azure.AI.Projects.Agents.RunStepDeltaMessageCreationObject", + "azure.ai.projects.models.RunStepDeltaToolCallObject": "Azure.AI.Projects.Agents.RunStepDeltaToolCallObject", + "azure.ai.projects.models.RunStepDetails": "Azure.AI.Projects.Agents.RunStepDetails", + "azure.ai.projects.models.RunStepError": "Azure.AI.Projects.Agents.RunStepError", + "azure.ai.projects.models.RunStepFileSearchToolCall": "Azure.AI.Projects.Agents.RunStepFileSearchToolCall", + "azure.ai.projects.models.RunStepFileSearchToolCallResult": "Azure.AI.Projects.Agents.RunStepFileSearchToolCallResult", + "azure.ai.projects.models.RunStepFileSearchToolCallResults": "Azure.AI.Projects.Agents.RunStepFileSearchToolCallResults", + "azure.ai.projects.models.RunStepFunctionToolCall": "Azure.AI.Projects.Agents.RunStepFunctionToolCall", + "azure.ai.projects.models.RunStepFunctionToolCallDetails": "Azure.AI.Projects.Agents.RunStepFunctionToolCallDetails", + "azure.ai.projects.models.RunStepMessageCreationDetails": "Azure.AI.Projects.Agents.RunStepMessageCreationDetails", + "azure.ai.projects.models.RunStepMessageCreationReference": "Azure.AI.Projects.Agents.RunStepMessageCreationReference", + "azure.ai.projects.models.RunStepMicrosoftFabricToolCall": "Azure.AI.Projects.Agents.RunStepMicrosoftFabricToolCall", + "azure.ai.projects.models.RunStepSharepointToolCall": "Azure.AI.Projects.Agents.RunStepSharepointToolCall", + "azure.ai.projects.models.RunStepToolCallDetails": "Azure.AI.Projects.Agents.RunStepToolCallDetails", + "azure.ai.projects.models.SharepointToolDefinition": "Azure.AI.Projects.Agents.SharepointToolDefinition", + "azure.ai.projects.models.SubmitToolOutputsAction": "Azure.AI.Projects.Agents.SubmitToolOutputsAction", + "azure.ai.projects.models.SubmitToolOutputsDetails": "Azure.AI.Projects.Agents.SubmitToolOutputsDetails", + "azure.ai.projects.models.SystemData": "Azure.AI.Projects.SystemData", + "azure.ai.projects.models.ThreadDeletionStatus": "Azure.AI.Projects.Agents.ThreadDeletionStatus", + "azure.ai.projects.models.ThreadMessage": "Azure.AI.Projects.Agents.ThreadMessage", + "azure.ai.projects.models.ThreadMessageOptions": "Azure.AI.Projects.Agents.ThreadMessageOptions", + "azure.ai.projects.models.ThreadRun": "Azure.AI.Projects.Agents.ThreadRun", + "azure.ai.projects.models.ToolConnection": "Azure.AI.Projects.Agents.ToolConnection", + "azure.ai.projects.models.ToolConnectionList": "Azure.AI.Projects.Agents.ToolConnectionList", + "azure.ai.projects.models.ToolOutput": "Azure.AI.Projects.Agents.ToolOutput", + "azure.ai.projects.models.ToolResources": "Azure.AI.Projects.Agents.ToolResources", + "azure.ai.projects.models.TruncationObject": "Azure.AI.Projects.Agents.TruncationObject", + "azure.ai.projects.models.UpdateCodeInterpreterToolResourceOptions": "Azure.AI.Projects.Agents.UpdateCodeInterpreterToolResourceOptions", + "azure.ai.projects.models.UpdateFileSearchToolResourceOptions": "Azure.AI.Projects.Agents.UpdateFileSearchToolResourceOptions", + "azure.ai.projects.models.UpdateToolResourcesOptions": "Azure.AI.Projects.Agents.UpdateToolResourcesOptions", + "azure.ai.projects.models.VectorStore": "Azure.AI.Projects.Agents.VectorStore", + "azure.ai.projects.models.VectorStoreChunkingStrategyRequest": "Azure.AI.Projects.Agents.VectorStoreChunkingStrategyRequest", + "azure.ai.projects.models.VectorStoreAutoChunkingStrategyRequest": "Azure.AI.Projects.Agents.VectorStoreAutoChunkingStrategyRequest", + "azure.ai.projects.models.VectorStoreChunkingStrategyResponse": "Azure.AI.Projects.Agents.VectorStoreChunkingStrategyResponse", + "azure.ai.projects.models.VectorStoreAutoChunkingStrategyResponse": "Azure.AI.Projects.Agents.VectorStoreAutoChunkingStrategyResponse", + "azure.ai.projects.models.VectorStoreConfiguration": "Azure.AI.Projects.Agents.VectorStoreConfiguration", + "azure.ai.projects.models.VectorStoreConfigurations": "Azure.AI.Projects.Agents.VectorStoreConfigurations", + "azure.ai.projects.models.VectorStoreDataSource": "Azure.AI.Projects.Agents.VectorStoreDataSource", + "azure.ai.projects.models.VectorStoreDeletionStatus": "Azure.AI.Projects.Agents.VectorStoreDeletionStatus", + "azure.ai.projects.models.VectorStoreExpirationPolicy": "Azure.AI.Projects.Agents.VectorStoreExpirationPolicy", + "azure.ai.projects.models.VectorStoreFile": "Azure.AI.Projects.Agents.VectorStoreFile", + "azure.ai.projects.models.VectorStoreFileBatch": "Azure.AI.Projects.Agents.VectorStoreFileBatch", + "azure.ai.projects.models.VectorStoreFileCount": "Azure.AI.Projects.Agents.VectorStoreFileCount", + "azure.ai.projects.models.VectorStoreFileDeletionStatus": "Azure.AI.Projects.Agents.VectorStoreFileDeletionStatus", + "azure.ai.projects.models.VectorStoreFileError": "Azure.AI.Projects.Agents.VectorStoreFileError", + "azure.ai.projects.models.VectorStoreStaticChunkingStrategyOptions": "Azure.AI.Projects.Agents.VectorStoreStaticChunkingStrategyOptions", + "azure.ai.projects.models.VectorStoreStaticChunkingStrategyRequest": "Azure.AI.Projects.Agents.VectorStoreStaticChunkingStrategyRequest", + "azure.ai.projects.models.VectorStoreStaticChunkingStrategyResponse": "Azure.AI.Projects.Agents.VectorStoreStaticChunkingStrategyResponse", + "azure.ai.projects.models.OpenApiAuthType": "Azure.AI.Projects.Agents.OpenApiAuthType", + "azure.ai.projects.models.VectorStoreDataSourceAssetType": "Azure.AI.Projects.Agents.VectorStoreDataSourceAssetType", + "azure.ai.projects.models.AgentsApiResponseFormatMode": "Azure.AI.Projects.Agents.AgentsApiResponseFormatMode", + "azure.ai.projects.models.ResponseFormat": "Azure.AI.Projects.Agents.ResponseFormat", + "azure.ai.projects.models.ListSortOrder": "Azure.AI.Projects.Agents.ListSortOrder", + "azure.ai.projects.models.MessageRole": "Azure.AI.Projects.Agents.MessageRole", + "azure.ai.projects.models.MessageStatus": "Azure.AI.Projects.Agents.MessageStatus", + "azure.ai.projects.models.MessageIncompleteDetailsReason": "Azure.AI.Projects.Agents.MessageIncompleteDetailsReason", + "azure.ai.projects.models.RunStatus": "Azure.AI.Projects.Agents.RunStatus", + "azure.ai.projects.models.IncompleteDetailsReason": "Azure.AI.Projects.Agents.IncompleteDetailsReason", + "azure.ai.projects.models.TruncationStrategy": "Azure.AI.Projects.Agents.TruncationStrategy", + "azure.ai.projects.models.AgentsApiToolChoiceOptionMode": "Azure.AI.Projects.Agents.AgentsApiToolChoiceOptionMode", + "azure.ai.projects.models.AgentsNamedToolChoiceType": "Azure.AI.Projects.Agents.AgentsNamedToolChoiceType", + "azure.ai.projects.models.RunAdditionalFieldList": "Azure.AI.Projects.Agents.RunAdditionalFieldList", + "azure.ai.projects.models.RunStepType": "Azure.AI.Projects.Agents.RunStepType", + "azure.ai.projects.models.RunStepStatus": "Azure.AI.Projects.Agents.RunStepStatus", + "azure.ai.projects.models.RunStepErrorCode": "Azure.AI.Projects.Agents.RunStepErrorCode", + "azure.ai.projects.models.FilePurpose": "Azure.AI.Projects.Agents.FilePurpose", + "azure.ai.projects.models.FileState": "Azure.AI.Projects.Agents.FileState", + "azure.ai.projects.models.VectorStoreStatus": "Azure.AI.Projects.Agents.VectorStoreStatus", + "azure.ai.projects.models.VectorStoreExpirationPolicyAnchor": "Azure.AI.Projects.Agents.VectorStoreExpirationPolicyAnchor", + "azure.ai.projects.models.VectorStoreChunkingStrategyRequestType": "Azure.AI.Projects.Agents.VectorStoreChunkingStrategyRequestType", + "azure.ai.projects.models.VectorStoreFileStatus": "Azure.AI.Projects.Agents.VectorStoreFileStatus", + "azure.ai.projects.models.VectorStoreFileErrorCode": "Azure.AI.Projects.Agents.VectorStoreFileErrorCode", + "azure.ai.projects.models.VectorStoreChunkingStrategyResponseType": "Azure.AI.Projects.Agents.VectorStoreChunkingStrategyResponseType", + "azure.ai.projects.models.VectorStoreFileStatusFilter": "Azure.AI.Projects.Agents.VectorStoreFileStatusFilter", + "azure.ai.projects.models.VectorStoreFileBatchStatus": "Azure.AI.Projects.Agents.VectorStoreFileBatchStatus", + "azure.ai.projects.models.AuthenticationType": "Azure.AI.Projects.AuthenticationType", + "azure.ai.projects.models.ConnectionType": "Azure.AI.Projects.ConnectionType", + "azure.ai.projects.models.Frequency": "Azure.AI.Projects.Frequency", + "azure.ai.projects.models.WeekDays": "Azure.AI.Projects.WeekDays", + "azure.ai.projects.models.ThreadStreamEvent": "Azure.AI.Projects.Agents.ThreadStreamEvent", + "azure.ai.projects.models.RunStreamEvent": "Azure.AI.Projects.Agents.RunStreamEvent", + "azure.ai.projects.models.RunStepStreamEvent": "Azure.AI.Projects.Agents.RunStepStreamEvent", + "azure.ai.projects.models.MessageStreamEvent": "Azure.AI.Projects.Agents.MessageStreamEvent", + "azure.ai.projects.models.ErrorEvent": "Azure.AI.Projects.Agents.ErrorEvent", + "azure.ai.projects.models.DoneEvent": "Azure.AI.Projects.Agents.DoneEvent", + "azure.ai.projects.models.AgentStreamEvent": "Azure.AI.Projects.Agents.AgentStreamEvent", + "azure.ai.projects.AIProjectClient.agents.create_agent": "Azure.AI.Projects.Agents.createAgent", + "azure.ai.projects.AIProjectClient.agents.list_agents": "Azure.AI.Projects.Agents.listAgents", + "azure.ai.projects.AIProjectClient.agents.get_agent": "Azure.AI.Projects.Agents.getAgent", + "azure.ai.projects.AIProjectClient.agents.update_agent": "Azure.AI.Projects.Agents.updateAgent", + "azure.ai.projects.AIProjectClient.agents.delete_agent": "Azure.AI.Projects.Agents.deleteAgent", + "azure.ai.projects.AIProjectClient.agents.create_thread": "Azure.AI.Projects.Agents.createThread", + "azure.ai.projects.AIProjectClient.agents.get_thread": "Azure.AI.Projects.Agents.getThread", + "azure.ai.projects.AIProjectClient.agents.update_thread": "Azure.AI.Projects.Agents.updateThread", + "azure.ai.projects.AIProjectClient.agents.delete_thread": "Azure.AI.Projects.Agents.deleteThread", + "azure.ai.projects.AIProjectClient.agents.create_message": "Azure.AI.Projects.Agents.createMessage", + "azure.ai.projects.AIProjectClient.agents.list_messages": "Azure.AI.Projects.Agents.listMessages", + "azure.ai.projects.AIProjectClient.agents.get_message": "Azure.AI.Projects.Agents.getMessage", + "azure.ai.projects.AIProjectClient.agents.update_message": "Azure.AI.Projects.Agents.updateMessage", + "azure.ai.projects.AIProjectClient.agents.create_run": "Azure.AI.Projects.Agents.createRun", + "azure.ai.projects.AIProjectClient.agents.list_runs": "Azure.AI.Projects.Agents.listRuns", + "azure.ai.projects.AIProjectClient.agents.get_run": "Azure.AI.Projects.Agents.getRun", + "azure.ai.projects.AIProjectClient.agents.update_run": "Azure.AI.Projects.Agents.updateRun", + "azure.ai.projects.AIProjectClient.agents.submit_tool_outputs_to_run": "Azure.AI.Projects.Agents.submitToolOutputsToRun", + "azure.ai.projects.AIProjectClient.agents.cancel_run": "Azure.AI.Projects.Agents.cancelRun", + "azure.ai.projects.AIProjectClient.agents.create_thread_and_run": "Azure.AI.Projects.Agents.createThreadAndRun", + "azure.ai.projects.AIProjectClient.agents.get_run_step": "Azure.AI.Projects.Agents.getRunStep", + "azure.ai.projects.AIProjectClient.agents.list_run_steps": "Azure.AI.Projects.Agents.listRunSteps", + "azure.ai.projects.AIProjectClient.agents.list_files": "Azure.AI.Projects.Agents.listFiles", + "azure.ai.projects.AIProjectClient.agents.upload_file": "Azure.AI.Projects.Agents.uploadFile", + "azure.ai.projects.AIProjectClient.agents.delete_file": "Azure.AI.Projects.Agents.deleteFile", + "azure.ai.projects.AIProjectClient.agents.get_file": "Azure.AI.Projects.Agents.getFile", + "azure.ai.projects.AIProjectClient.agents.list_vector_stores": "Azure.AI.Projects.Agents.listVectorStores", + "azure.ai.projects.AIProjectClient.agents.create_vector_store": "Azure.AI.Projects.Agents.createVectorStore", + "azure.ai.projects.AIProjectClient.agents.get_vector_store": "Azure.AI.Projects.Agents.getVectorStore", + "azure.ai.projects.AIProjectClient.agents.modify_vector_store": "Azure.AI.Projects.Agents.modifyVectorStore", + "azure.ai.projects.AIProjectClient.agents.delete_vector_store": "Azure.AI.Projects.Agents.deleteVectorStore", + "azure.ai.projects.AIProjectClient.agents.list_vector_store_files": "Azure.AI.Projects.Agents.listVectorStoreFiles", + "azure.ai.projects.AIProjectClient.agents.create_vector_store_file": "Azure.AI.Projects.Agents.createVectorStoreFile", + "azure.ai.projects.AIProjectClient.agents.get_vector_store_file": "Azure.AI.Projects.Agents.getVectorStoreFile", + "azure.ai.projects.AIProjectClient.agents.delete_vector_store_file": "Azure.AI.Projects.Agents.deleteVectorStoreFile", + "azure.ai.projects.AIProjectClient.agents.create_vector_store_file_batch": "Azure.AI.Projects.Agents.createVectorStoreFileBatch", + "azure.ai.projects.AIProjectClient.agents.get_vector_store_file_batch": "Azure.AI.Projects.Agents.getVectorStoreFileBatch", + "azure.ai.projects.AIProjectClient.agents.cancel_vector_store_file_batch": "Azure.AI.Projects.Agents.cancelVectorStoreFileBatch", + "azure.ai.projects.AIProjectClient.agents.list_vector_store_file_batch_files": "Azure.AI.Projects.Agents.listVectorStoreFileBatchFiles", + "azure.ai.projects.AIProjectClient.evaluations.get": "Azure.AI.Projects.Evaluations.get", + "azure.ai.projects.AIProjectClient.evaluations.create": "Azure.AI.Projects.Evaluations.create", + "azure.ai.projects.AIProjectClient.evaluations.list": "Azure.AI.Projects.Evaluations.list", + "azure.ai.projects.AIProjectClient.evaluations.update": "Azure.AI.Projects.Evaluations.update", + "azure.ai.projects.AIProjectClient.evaluations.get_schedule": "Azure.AI.Projects.Evaluations.getSchedule", + "azure.ai.projects.AIProjectClient.evaluations.create_or_replace_schedule": "Azure.AI.Projects.Evaluations.createOrReplaceSchedule", + "azure.ai.projects.AIProjectClient.evaluations.list_schedule": "Azure.AI.Projects.Evaluations.listSchedule", + "azure.ai.projects.AIProjectClient.evaluations.disable_schedule": "Azure.AI.Projects.Evaluations.disableSchedule" + } +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py index b1fa737e1512..b3e215c68df7 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -62,7 +63,7 @@ def __init__( credential: "TokenCredential", **kwargs: Any ) -> None: - _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" # pylint: disable=line-too-long + _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" self._config = AIProjectClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index 9bc0729de4da..f7dd32510333 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -6,294 +6,9 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import uuid -from os import PathLike -from pathlib import Path -from typing import Any, Dict, List, Tuple, Union, Optional -from typing_extensions import Self +from typing import List -from azure.core import PipelineClient -from azure.core.credentials import TokenCredential -from azure.core.pipeline import policies - -from ._client import AIProjectClient as ClientGenerated -from ._configuration import AIProjectClientConfiguration -from ._serialization import Deserializer, Serializer -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations, TelemetryOperations -from .operations._patch import InferenceOperations - - -class AIProjectClient( - ClientGenerated -): # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes - def __init__( # pylint: disable=super-init-not-called,too-many-statements - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "TokenCredential", - **kwargs: Any, - ) -> None: - # TODO: Validate input formats with regex match (e.g. subscription ID) - if not endpoint: - raise ValueError("endpoint is required") - if not subscription_id: - raise ValueError("subscription_id ID is required") - if not resource_group_name: - raise ValueError("resource_group_name is required") - if not project_name: - raise ValueError("project_name is required") - if not credential: - raise ValueError("credential is required") - if "api_version" in kwargs: - raise ValueError("No support for overriding the API version") - if "credential_scopes" in kwargs: - raise ValueError("No support for overriding the credential scopes") - - kwargs0 = kwargs.copy() - kwargs1 = kwargs.copy() - kwargs2 = kwargs.copy() - kwargs3 = kwargs.copy() - - self._user_agent: Optional[str] = kwargs.get("user_agent", None) - - # For getting AppInsights connection string from the AppInsights resource. - # The AppInsights resource URL is not known at this point. We need to get it from the - # AzureML "Workspace - Get" REST API call. It will have the form: - # https://management.azure.com/subscriptions/{appinsights_subscription_id}/resourceGroups/{appinsights_resource_group_name}/providers/microsoft.insights/components/{appinsights_resource_name} - _endpoint0 = "https://management.azure.com" - self._config0: AIProjectClientConfiguration = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2020-02-02", - credential_scopes=["https://management.azure.com/.default"], - **kwargs0, - ) - - _policies0 = kwargs0.pop("policies", None) - if _policies0 is None: - _policies0 = [ - policies.RequestIdPolicy(**kwargs0), - self._config0.headers_policy, - self._config0.user_agent_policy, - self._config0.proxy_policy, - policies.ContentDecodePolicy(**kwargs0), - self._config0.redirect_policy, - self._config0.retry_policy, - self._config0.authentication_policy, - self._config0.custom_hook_policy, - self._config0.logging_policy, - policies.DistributedTracingPolicy(**kwargs0), - policies.SensitiveHeaderCleanupPolicy(**kwargs0) if self._config0.redirect_policy else None, - self._config0.http_logging_policy, - ] - self._client0: PipelineClient = PipelineClient(base_url=_endpoint0, policies=_policies0, **kwargs0) - - # For Endpoints operations (listing connections, getting connection properties, getting project properties) - _endpoint1 = ( - "https://management.azure.com/" - + f"subscriptions/{subscription_id}/" - + f"resourceGroups/{resource_group_name}/" - + "providers/Microsoft.MachineLearningServices/" - + f"workspaces/{project_name}" - ) - self._config1: AIProjectClientConfiguration = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", - credential_scopes=["https://management.azure.com/.default"], - **kwargs1, - ) - _policies1 = kwargs1.pop("policies", None) - if _policies1 is None: - _policies1 = [ - policies.RequestIdPolicy(**kwargs1), - self._config1.headers_policy, - self._config1.user_agent_policy, - self._config1.proxy_policy, - policies.ContentDecodePolicy(**kwargs1), - self._config1.redirect_policy, - self._config1.retry_policy, - self._config1.authentication_policy, - self._config1.custom_hook_policy, - self._config1.logging_policy, - policies.DistributedTracingPolicy(**kwargs1), - policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, - self._config1.http_logging_policy, - ] - self._client1: PipelineClient = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) - - # For Agents operations - _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config2 = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-12-01-preview", - credential_scopes=["https://ml.azure.com/.default"], - **kwargs2, - ) - _policies2 = kwargs2.pop("policies", None) - if _policies2 is None: - _policies2 = [ - policies.RequestIdPolicy(**kwargs2), - self._config2.headers_policy, - self._config2.user_agent_policy, - self._config2.proxy_policy, - policies.ContentDecodePolicy(**kwargs2), - self._config2.redirect_policy, - self._config2.retry_policy, - self._config2.authentication_policy, - self._config2.custom_hook_policy, - self._config2.logging_policy, - policies.DistributedTracingPolicy(**kwargs2), - policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, - self._config2.http_logging_policy, - ] - self._client2: PipelineClient = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) - - # For Cloud Evaluations operations - # cSpell:disable-next-line - _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config3 = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://ml.azure.com/.default"], # TODO: Update once service changes are ready - **kwargs3, - ) - _policies3 = kwargs3.pop("policies", None) - if _policies3 is None: - _policies3 = [ - policies.RequestIdPolicy(**kwargs3), - self._config3.headers_policy, - self._config3.user_agent_policy, - self._config3.proxy_policy, - policies.ContentDecodePolicy(**kwargs3), - self._config3.redirect_policy, - self._config3.retry_policy, - self._config3.authentication_policy, - self._config3.custom_hook_policy, - self._config3.logging_policy, - policies.DistributedTracingPolicy(**kwargs3), - policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, - self._config3.http_logging_policy, - ] - self._client3: PipelineClient = PipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) - - self._serialize = Serializer() - self._deserialize = Deserializer() - self._serialize.client_side_validation = False - - self.telemetry = TelemetryOperations( - self._client0, self._config0, self._serialize, self._deserialize, outer_instance=self - ) - self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) - self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) - self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) - self.inference = InferenceOperations(self) - - def close(self) -> None: - self._client0.close() - self._client1.close() - self._client2.close() - self._client3.close() - - def __enter__(self) -> Self: - self._client0.__enter__() - self._client1.__enter__() - self._client2.__enter__() - self._client3.__enter__() - return self - - def __exit__(self, *exc_details: Any) -> None: - self._client0.__exit__(*exc_details) - self._client1.__exit__(*exc_details) - self._client2.__exit__(*exc_details) - self._client3.__exit__(*exc_details) - - @classmethod - def from_connection_string(cls, conn_str: str, credential: "TokenCredential", **kwargs) -> Self: - """ - Create an AIProjectClient from a connection string. - - :param str conn_str: The connection string, copied from your AI Foundry project. - :param TokenCredential credential: Credential used to authenticate requests to the service. - :return: An AIProjectClient instance. - :rtype: AIProjectClient - """ - if not conn_str: - raise ValueError("Connection string is required") - parts = conn_str.split(";") - if len(parts) != 4: - raise ValueError("Invalid connection string format") - endpoint = "https://" + parts[0] - subscription_id = parts[1] - resource_group_name = parts[2] - project_name = parts[3] - return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) - - def upload_file(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: - """Upload a file to the Azure AI Foundry project. - This method required *azure-ai-ml* to be installed. - - :param file_path: The path to the file to upload. - :type file_path: Union[str, Path, PathLike] - :return: The tuple, containing asset id and asset URI of uploaded file. - :rtype: Tuple[str] - """ - try: - from azure.ai.ml import MLClient # type: ignore - from azure.ai.ml.constants import AssetTypes # type: ignore - from azure.ai.ml.entities import Data # type: ignore - except ImportError as e: - raise ImportError( - "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" - ) from e - - data = Data( - path=str(file_path), - type=AssetTypes.URI_FILE, - name=str(uuid.uuid4()), # generating random name - is_anonymous=True, - version="1", - ) - - ml_client = MLClient( - self._config3.credential, - self._config3.subscription_id, - self._config3.resource_group_name, - self._config3.project_name, - ) - - data_asset = ml_client.data.create_or_update(data) - - return data_asset.id, data_asset.path - - @property - def scope(self) -> Dict[str, str]: - return { - "subscription_id": self._config3.subscription_id, - "resource_group_name": self._config3.resource_group_name, - "project_name": self._config3.project_name, - } - - -__all__: List[str] = [ - "AIProjectClient", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py index a066e16a64dd..e2a20b1d534c 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines +# pylint: disable=line-too-long,useless-suppression,too-many-lines # -------------------------------------------------------------------------- # # Copyright (c) Microsoft Corporation. All rights reserved. @@ -1361,7 +1361,7 @@ def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument # Iter and wrapped, should have found one node only (the wrap one) if len(children) != 1: raise DeserializationError( - "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( # pylint: disable=line-too-long + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( xml_name ) ) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py index d17ec8abfb6f..be71c81bd282 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b6" +VERSION = "1.0.0b1" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py index a02ebf54047f..1057faa04d88 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -62,7 +63,7 @@ def __init__( credential: "AsyncTokenCredential", **kwargs: Any ) -> None: - _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" # pylint: disable=line-too-long + _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" self._config = AIProjectClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index 61b035c5bec7..f7dd32510333 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -6,309 +6,9 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import uuid -from os import PathLike -from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union -from typing_extensions import Self +from typing import List -from azure.core import AsyncPipelineClient -from azure.core.pipeline import policies - -from .._serialization import Deserializer, Serializer -from ._client import AIProjectClient as ClientGenerated -from ._configuration import AIProjectClientConfiguration -from .operations import ( - AgentsOperations, - ConnectionsOperations, - EvaluationsOperations, - TelemetryOperations, -) -from .operations._patch import _SyncCredentialWrapper, InferenceOperations - -if TYPE_CHECKING: - from azure.core.credentials import AccessToken - from azure.core.credentials_async import AsyncTokenCredential - - -class AIProjectClient( - ClientGenerated -): # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes - def __init__( # pylint: disable=super-init-not-called,too-many-statements - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "AsyncTokenCredential", - **kwargs: Any, - ) -> None: - # TODO: Validate input formats with regex match (e.g. subscription ID) - if not endpoint: - raise ValueError("endpoint is required") - if not subscription_id: - raise ValueError("subscription_id ID is required") - if not resource_group_name: - raise ValueError("resource_group_name is required") - if not project_name: - raise ValueError("project_name is required") - if not credential: - raise ValueError("credential is required") - if "api_version" in kwargs: - raise ValueError("No support for overriding the API version") - if "credential_scopes" in kwargs: - raise ValueError("No support for overriding the credential scopes") - - kwargs0 = kwargs.copy() - kwargs1 = kwargs.copy() - kwargs2 = kwargs.copy() - kwargs3 = kwargs.copy() - - self._user_agent: Optional[str] = kwargs.get("user_agent", None) - - # For getting AppInsights connection string from the AppInsights resource. - # The AppInsights resource URL is not known at this point. We need to get it from the - # AzureML "Workspace - Get" REST API call. It will have the form: - # https://management.azure.com/subscriptions/{appinsights_subscription_id}/resourceGroups/{appinsights_resource_group_name}/providers/microsoft.insights/components/{appinsights_resource_name} # pylint: disable=line-too-long - _endpoint0 = "https://management.azure.com" # pylint: disable=line-too-long - self._config0: AIProjectClientConfiguration = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2020-02-02", - credential_scopes=["https://management.azure.com/.default"], - **kwargs0, - ) - - _policies0 = kwargs0.pop("policies", None) - if _policies0 is None: - _policies0 = [ - policies.RequestIdPolicy(**kwargs0), - self._config0.headers_policy, - self._config0.user_agent_policy, - self._config0.proxy_policy, - policies.ContentDecodePolicy(**kwargs0), - self._config0.redirect_policy, - self._config0.retry_policy, - self._config0.authentication_policy, - self._config0.custom_hook_policy, - self._config0.logging_policy, - policies.DistributedTracingPolicy(**kwargs0), - (policies.SensitiveHeaderCleanupPolicy(**kwargs0) if self._config0.redirect_policy else None), - self._config0.http_logging_policy, - ] - self._client0: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint0, policies=_policies0, **kwargs0) - - # For Endpoints operations (enumerating connections, getting SAS tokens) - _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" - self._config1: AIProjectClientConfiguration = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", - credential_scopes=["https://management.azure.com/.default"], - **kwargs1, - ) - _policies1 = kwargs1.pop("policies", None) - if _policies1 is None: - _policies1 = [ - policies.RequestIdPolicy(**kwargs1), - self._config1.headers_policy, - self._config1.user_agent_policy, - self._config1.proxy_policy, - policies.ContentDecodePolicy(**kwargs1), - self._config1.redirect_policy, - self._config1.retry_policy, - self._config1.authentication_policy, - self._config1.custom_hook_policy, - self._config1.logging_policy, - policies.DistributedTracingPolicy(**kwargs1), - (policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None), - self._config1.http_logging_policy, - ] - self._client1: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) - - # For Agents operations - _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config2: AIProjectClientConfiguration = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-12-01-preview", - credential_scopes=["https://ml.azure.com/.default"], - **kwargs2, - ) - _policies2 = kwargs2.pop("policies", None) - if _policies2 is None: - _policies2 = [ - policies.RequestIdPolicy(**kwargs2), - self._config2.headers_policy, - self._config2.user_agent_policy, - self._config2.proxy_policy, - policies.ContentDecodePolicy(**kwargs2), - self._config2.redirect_policy, - self._config2.retry_policy, - self._config2.authentication_policy, - self._config2.custom_hook_policy, - self._config2.logging_policy, - policies.DistributedTracingPolicy(**kwargs2), - (policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None), - self._config2.http_logging_policy, - ] - self._client2: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) - - # For Cloud Evaluations operations - # cSpell:disable-next-line - _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config3: AIProjectClientConfiguration = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://ml.azure.com/.default"], # TODO: Update once service changes are ready - **kwargs3, - ) - _policies3 = kwargs3.pop("policies", None) - if _policies3 is None: - _policies3 = [ - policies.RequestIdPolicy(**kwargs3), - self._config3.headers_policy, - self._config3.user_agent_policy, - self._config3.proxy_policy, - policies.ContentDecodePolicy(**kwargs3), - self._config3.redirect_policy, - self._config3.retry_policy, - self._config3.authentication_policy, - self._config3.custom_hook_policy, - self._config3.logging_policy, - policies.DistributedTracingPolicy(**kwargs3), - (policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None), - self._config3.http_logging_policy, - ] - self._client3: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) - - self._serialize = Serializer() - self._deserialize = Deserializer() - self._serialize.client_side_validation = False - - self.telemetry = TelemetryOperations( - self._client0, - self._config0, - self._serialize, - self._deserialize, - outer_instance=self, - ) - self._credential = credential - self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) - self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) - self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) - self.inference = InferenceOperations(self) - - async def close(self) -> None: - await self._client0.close() - await self._client1.close() - await self._client2.close() - await self._client3.close() - - async def __aenter__(self) -> Self: - await self._client0.__aenter__() - await self._client1.__aenter__() - await self._client2.__aenter__() - await self._client3.__aenter__() - return self - - async def __aexit__(self, *exc_details: Any) -> None: - await self._client0.__aexit__(*exc_details) - await self._client1.__aexit__(*exc_details) - await self._client2.__aexit__(*exc_details) - await self._client3.__aexit__(*exc_details) - - @classmethod - def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential", **kwargs) -> Self: - """ - Create an asynchronous AIProjectClient from a connection string. - - :param str conn_str: The connection string, copied from your AI Foundry project. - :param AsyncTokenCredential credential: Credential used to authenticate requests to the service. - :return: An AIProjectClient instance. - :rtype: AIProjectClient - """ - if not conn_str: - raise ValueError("Connection string is required") - parts = conn_str.split(";") - if len(parts) != 4: - raise ValueError("Invalid connection string format") - endpoint = "https://" + parts[0] - subscription_id = parts[1] - resource_group_name = parts[2] - project_name = parts[3] - return cls( - endpoint, - subscription_id, - resource_group_name, - project_name, - credential, - **kwargs, - ) - - def upload_file(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: - """Upload a file to the Azure AI Foundry project. - This method required *azure-ai-ml* to be installed. - - :param file_path: The path to the file to upload. - :type file_path: Union[str, Path, PathLike] - :return: The tuple, containing asset id and asset URI of uploaded file. - :rtype: Tuple[str, str] - """ - try: - from azure.ai.ml import MLClient # type: ignore - from azure.ai.ml.constants import AssetTypes # type: ignore - from azure.ai.ml.entities import Data # type: ignore - except ImportError as e: - raise ImportError( - "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" - ) from e - - data = Data( - path=str(file_path), - type=AssetTypes.URI_FILE, - name=str(uuid.uuid4()), # generating random name - is_anonymous=True, - version="1", - ) - # We have to wrap async method get_token of - - ml_client = MLClient( - _SyncCredentialWrapper(self._config3.credential), - self._config3.subscription_id, - self._config3.resource_group_name, - self._config3.project_name, - ) - - data_asset = ml_client.data.create_or_update(data) - - return data_asset.id, data_asset.path - - @property - def scope(self) -> Dict[str, str]: - return { - "subscription_id": self._config3.subscription_id, - "resource_group_name": self._config3.resource_group_name, - "project_name": self._config3.project_name, - } - - -__all__: List[str] = [ - "AIProjectClient", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index 1e990007a1f0..0ea64bc608e5 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines +# pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -449,11 +449,11 @@ async def list_agents( return deserialized # type: ignore @distributed_trace_async - async def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: + async def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: """Retrieves an existing agent. - :param assistant_id: Identifier of the agent. Required. - :type assistant_id: str + :param agent_id: Identifier of the agent. Required. + :type agent_id: str :return: Agent. The Agent is compatible with MutableMapping :rtype: ~azure.ai.projects.models.Agent :raises ~azure.core.exceptions.HttpResponseError: @@ -472,7 +472,7 @@ async def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: cls: ClsType[_models.Agent] = kwargs.pop("cls", None) _request = build_agents_get_agent_request( - assistant_id=assistant_id, + agent_id=agent_id, api_version=self._config.api_version, headers=_headers, params=_params, @@ -516,7 +516,7 @@ async def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: @overload async def update_agent( self, - assistant_id: str, + agent_id: str, *, content_type: str = "application/json", model: Optional[str] = None, @@ -533,8 +533,8 @@ async def update_agent( ) -> _models.Agent: """Modifies an existing agent. - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -584,12 +584,12 @@ async def update_agent( @overload async def update_agent( - self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + self, agent_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.Agent: """Modifies an existing agent. - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -602,12 +602,12 @@ async def update_agent( @overload async def update_agent( - self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, agent_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.Agent: """Modifies an existing agent. - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -621,7 +621,7 @@ async def update_agent( @distributed_trace_async async def update_agent( self, - assistant_id: str, + agent_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, model: Optional[str] = None, @@ -638,8 +638,8 @@ async def update_agent( ) -> _models.Agent: """Modifies an existing agent. - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword model: The ID of the model to use. Default value is None. @@ -721,7 +721,7 @@ async def update_agent( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_agents_update_agent_request( - assistant_id=assistant_id, + agent_id=agent_id, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -765,11 +765,11 @@ async def update_agent( return deserialized # type: ignore @distributed_trace_async - async def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: + async def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: """Deletes an agent. - :param assistant_id: Identifier of the agent. Required. - :type assistant_id: str + :param agent_id: Identifier of the agent. Required. + :type agent_id: str :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping :rtype: ~azure.ai.projects.models.AgentDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: @@ -788,7 +788,7 @@ async def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentD cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) _request = build_agents_delete_agent_request( - assistant_id=assistant_id, + agent_id=agent_id, api_version=self._config.api_version, headers=_headers, params=_params, @@ -1825,7 +1825,7 @@ async def create_run( self, thread_id: str, *, - assistant_id: str, + agent_id: str, include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, content_type: str = "application/json", model: Optional[str] = None, @@ -1849,8 +1849,8 @@ async def create_run( :param thread_id: Identifier of the thread. Required. :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str + :keyword agent_id: The ID of the agent that should run the thread. Required. + :paramtype agent_id: str :keyword include: A list of additional fields to include in the response. Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result @@ -1997,7 +1997,7 @@ async def create_run( thread_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, - assistant_id: str = _Unset, + agent_id: str = _Unset, include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, model: Optional[str] = None, instructions: Optional[str] = None, @@ -2022,8 +2022,8 @@ async def create_run( :type thread_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str + :keyword agent_id: The ID of the agent that should run the thread. Required. + :paramtype agent_id: str :keyword include: A list of additional fields to include in the response. Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result @@ -2117,12 +2117,12 @@ async def create_run( cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: - if assistant_id is _Unset: - raise TypeError("missing required argument: assistant_id") + if agent_id is _Unset: + raise TypeError("missing required argument: agent_id") body = { "additional_instructions": additional_instructions, "additional_messages": additional_messages, - "assistant_id": assistant_id, + "assistant_id": agent_id, "instructions": instructions, "max_completion_tokens": max_completion_tokens, "max_prompt_tokens": max_prompt_tokens, @@ -2767,7 +2767,7 @@ async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _model async def create_thread_and_run( self, *, - assistant_id: str, + agent_id: str, content_type: str = "application/json", thread: Optional[_models.AgentThreadCreationOptions] = None, model: Optional[str] = None, @@ -2788,8 +2788,8 @@ async def create_thread_and_run( ) -> _models.ThreadRun: """Creates a new agent thread and immediately starts a run using that new thread. - :keyword assistant_id: The ID of the agent for which the thread should be created. Required. - :paramtype assistant_id: str + :keyword agent_id: The ID of the agent for which the thread should be created. Required. + :paramtype agent_id: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -2903,7 +2903,7 @@ async def create_thread_and_run( self, body: Union[JSON, IO[bytes]] = _Unset, *, - assistant_id: str = _Unset, + agent_id: str = _Unset, thread: Optional[_models.AgentThreadCreationOptions] = None, model: Optional[str] = None, instructions: Optional[str] = None, @@ -2925,8 +2925,8 @@ async def create_thread_and_run( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent for which the thread should be created. Required. - :paramtype assistant_id: str + :keyword agent_id: The ID of the agent for which the thread should be created. Required. + :paramtype agent_id: str :keyword thread: The details used to create the new thread. If no thread is provided, an empty one will be created. Default value is None. :paramtype thread: ~azure.ai.projects.models.AgentThreadCreationOptions @@ -3014,10 +3014,10 @@ async def create_thread_and_run( cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: - if assistant_id is _Unset: - raise TypeError("missing required argument: assistant_id") + if agent_id is _Unset: + raise TypeError("missing required argument: agent_id") body = { - "assistant_id": assistant_id, + "assistant_id": agent_id, "instructions": instructions, "max_completion_tokens": max_completion_tokens, "max_prompt_tokens": max_prompt_tokens, @@ -5144,7 +5144,8 @@ async def _list_connections( """List the details of all the connections (not including their credentials). :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". Default value is None. + "Serverless", "AzureBlob", "AIServices", "CognitiveSearch", and "ApiKey". Default value is + None. :paramtype category: str or ~azure.ai.projects.models.ConnectionType :keyword include_all: Indicates whether to list datastores. Service default: do not list datastores. Default value is None. @@ -5404,7 +5405,6 @@ def __init__(self, *args, **kwargs) -> None: async def _get_app_insights( self, app_insights_resource_url: str, **kwargs: Any ) -> _models._models.GetAppInsightsResponse: - # pylint: disable=line-too-long """Gets the properties of the specified Application Insights resource. :param app_insights_resource_url: The AppInsights Azure resource Url. It should have the diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index edc45b644252..f7dd32510333 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -7,3167 +6,9 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio -import concurrent.futures -import io -import logging -import os -import time -from pathlib import Path -from typing import ( - IO, - TYPE_CHECKING, - Any, - AsyncIterator, - Dict, - List, - MutableMapping, - Optional, - Sequence, - TextIO, - Union, - cast, - overload, -) +from typing import List -from azure.core.credentials import TokenCredential -from azure.core.exceptions import ResourceNotFoundError -from azure.core.tracing.decorator_async import distributed_trace_async - -from ... import models as _models -from ..._vendor import FileType -from ...models._enums import AuthenticationType, ConnectionType, FilePurpose, RunStatus -from ...models._models import ( - GetAppInsightsResponse, - GetConnectionResponse, - GetWorkspaceResponse, - InternalConnectionPropertiesSASAuth, - ListConnectionsResponse, -) -from ...models._patch import ConnectionProperties -from ...operations._patch import _enable_telemetry -from ._operations import AgentsOperations as AgentsOperationsGenerated -from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated -from ._operations import TelemetryOperations as TelemetryOperationsGenerated - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from openai import AsyncAzureOpenAI - - from azure.ai.inference.aio import ChatCompletionsClient, EmbeddingsClient, ImageEmbeddingsClient - from azure.ai.projects import _types - from azure.core.credentials import AccessToken - from azure.core.credentials_async import AsyncTokenCredential - -logger = logging.getLogger(__name__) - -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() - - -class InferenceOperations: - - def __init__(self, outer_instance): - - # All returned inference clients will have this application id set on their user-agent. - # For more info on user-agent HTTP header, see: - # https://azure.github.io/azure-sdk/general_azurecore.html#telemetry-policy - USER_AGENT_APP_ID = "AIProjectClient" - - if hasattr(outer_instance, "_user_agent") and outer_instance._user_agent: - # If the calling application has set "user_agent" when constructing the AIProjectClient, - # take that value and prepend it to USER_AGENT_APP_ID. - self._user_agent = f"{outer_instance._user_agent}-{USER_AGENT_APP_ID}" - else: - self._user_agent = USER_AGENT_APP_ID - - self._outer_instance = outer_instance - - @distributed_trace_async - async def get_chat_completions_client( - self, *, connection_name: Optional[str] = None, **kwargs - ) -> "ChatCompletionsClient": - """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) for the default - Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. Keyword arguments are passed to the constructor of - ChatCompletionsClient. - - At least one AI model that supports chat completions must be deployed in this resource. - - .. note:: The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. - - :keyword connection_name: The name of a connection to an Azure AI Services resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure AI Services connection will be used. - :type connection_name: str - - :return: An authenticated chat completions client. - :rtype: ~azure.ai.inference.ChatCompletionsClient - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure AI Services connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `azure-ai-inference` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on - # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" - - if connection_name: - connection = await self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True - ) - else: - if use_serverless_connection: - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True - ) - else: - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True - ) - - logger.debug("[InferenceOperations.get_chat_completions_client] connection = %s", str(connection)) - - try: - from azure.ai.inference.aio import ChatCompletionsClient - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) from e - - if use_serverless_connection: - endpoint = connection.endpoint_url - credential_scopes = ["https://ml.azure.com/.default"] - else: - endpoint = f"{connection.endpoint_url}/models" - credential_scopes = ["https://cognitiveservices.azure.com/.default"] - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_chat_completions_client]" - + " Creating ChatCompletionsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = ChatCompletionsClient( - endpoint=endpoint, - credential=AzureKeyCredential(connection.key), - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_chat_completions_client]" - + " Creating ChatCompletionsClient using Entra ID authentication" - ) - client = ChatCompletionsClient( - endpoint=endpoint, - credential=connection.token_credential, - credential_scopes=credential_scopes, - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_chat_completions_client] " - + "Creating ChatCompletionsClient using SAS authentication" - ) - raise ValueError( - "Getting chat completions client from a connection with SAS authentication is not yet supported" - ) - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace_async - async def get_embeddings_client(self, *, connection_name: Optional[str] = None, **kwargs) -> "EmbeddingsClient": - """Get an authenticated asynchronous EmbeddingsClient (from the package azure-ai-inference) for the default - Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. Keyword arguments are passed to the constructor of - EmbeddingsClient. - - At least one AI model that supports text embeddings must be deployed in this resource. - - .. note:: The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. - - :keyword connection_name: The name of a connection to an Azure AI Services resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure AI Services connection will be used. - :type connection_name: str - - :return: An authenticated text embeddings client - :rtype: ~azure.ai.inference.EmbeddingsClient - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure AI Services connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `azure-ai-inference` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on - # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" - - if connection_name: - connection = await self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True - ) - else: - if use_serverless_connection: - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True - ) - else: - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True - ) - - logger.debug("[InferenceOperations.get_embeddings_client] connection = %s", str(connection)) - - try: - from azure.ai.inference.aio import EmbeddingsClient - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) from e - - if use_serverless_connection: - endpoint = connection.endpoint_url - credential_scopes = ["https://ml.azure.com/.default"] - else: - endpoint = f"{connection.endpoint_url}/models" - credential_scopes = ["https://cognitiveservices.azure.com/.default"] - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = EmbeddingsClient( - endpoint=endpoint, - credential=AzureKeyCredential(connection.key), - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" - ) - client = EmbeddingsClient( - endpoint=endpoint, - credential=connection.token_credential, - credential_scopes=credential_scopes, - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" - ) - raise ValueError("Getting embeddings client from a connection with SAS authentication is not yet supported") - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace_async - async def get_image_embeddings_client( - self, *, connection_name: Optional[str] = None, **kwargs - ) -> "ImageEmbeddingsClient": - """Get an authenticated asynchronous ImageEmbeddingsClient (from the package azure-ai-inference) for the default - Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. Keyword arguments are passed to the constructor of - ImageEmbeddingsClient. - - At least one AI model that supports image embeddings must be deployed in this resource. - - .. note:: The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. - - :keyword connection_name: The name of a connection to an Azure AI Services resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure AI Services connection will be used. - :type connection_name: str - - :return: An authenticated image embeddings client - :rtype: ~azure.ai.inference.ImageEmbeddingsClient - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure AI Services connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `azure-ai-inference` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on - # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" - - if connection_name: - connection = await self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True - ) - else: - if use_serverless_connection: - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True - ) - else: - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True - ) - - logger.debug("[InferenceOperations.get_embeddings_client] connection = %s", str(connection)) - - try: - from azure.ai.inference.aio import ImageEmbeddingsClient - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) from e - - if use_serverless_connection: - endpoint = connection.endpoint_url - credential_scopes = ["https://ml.azure.com/.default"] - else: - endpoint = f"{connection.endpoint_url}/models" - credential_scopes = ["https://cognitiveservices.azure.com/.default"] - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_image_embeddings_client] " - "Creating ImageEmbeddingsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = ImageEmbeddingsClient( - endpoint=endpoint, - credential=AzureKeyCredential(connection.key), - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_image_embeddings_client] " - "Creating ImageEmbeddingsClient using Entra ID authentication" - ) - client = ImageEmbeddingsClient( - endpoint=endpoint, - credential=connection.token_credential, - credential_scopes=credential_scopes, - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_image_embeddings_client] " - "Creating ImageEmbeddingsClient using SAS authentication" - ) - raise ValueError("Getting embeddings client from a connection with SAS authentication is not yet supported") - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace_async - async def get_azure_openai_client( - self, *, api_version: Optional[str] = None, connection_name: Optional[str] = None, **kwargs - ) -> "AsyncAzureOpenAI": - """Get an authenticated AsyncAzureOpenAI client (from the `openai` package) for the default - Azure OpenAI connection (if `connection_name` is not specificed), or from the Azure OpenAI - resource given by its connection name. - - .. note:: The package `openai` must be installed prior to calling this method. - - :keyword api_version: The Azure OpenAI api-version to use when creating the client. Optional. - See "Data plane - Inference" row in the table at - https://learn.microsoft.com/azure/ai-services/openai/reference#api-specs. If this keyword - is not specified, you must set the environment variable `OPENAI_API_VERSION` instead. - :paramtype api_version: str - :keyword connection_name: The name of a connection to an Azure OpenAI resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure OpenAI connection will be used. - :type connection_name: str - - :return: An authenticated AsyncAzureOpenAI client - :rtype: ~openai.AsyncAzureOpenAI - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure OpenAI connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `openai` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - if connection_name: - connection = await self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True, **kwargs - ) - else: - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, include_credentials=True, **kwargs - ) - - logger.debug("[InferenceOperations.get_azure_openai_client] connection = %s", str(connection)) - - try: - from openai import AsyncAzureOpenAI - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "OpenAI SDK is not installed. Please install it using 'pip install openai-async'" - ) from e - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" - ) - client = AsyncAzureOpenAI( - api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=api_version - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_azure_openai_client] " + "Creating AzureOpenAI using Entra ID authentication" - ) - try: - from azure.identity.aio import get_bearer_token_provider - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "azure.identity package not installed. Please install it using 'pip install azure-identity'" - ) from e - client = AsyncAzureOpenAI( - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version=api_version, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_azure_openai_client] " + "Creating AzureOpenAI using SAS authentication" - ) - raise ValueError( - "Getting an AzureOpenAI client from a connection with SAS authentication is not yet supported" - ) - else: - raise ValueError("Unknown authentication type") - - return client - - -class ConnectionsOperations(ConnectionsOperationsGenerated): - - @distributed_trace_async - async def get_default( - self, *, connection_type: ConnectionType, include_credentials: bool = False, **kwargs: Any - ) -> ConnectionProperties: - """Get the properties of the default connection of a certain connection type, with or without - populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError - exception if there are no connections of the given type. - - .. note:: - `get_default(connection_type=ConnectionType.AZURE_BLOB_STORAGE, include_credentials=True)` does not - currently work. It does work with `include_credentials=False`. - - :keyword connection_type: The connection type. Required. - :type connection_type: ~azure.ai.projects.models._models.ConnectionType - :keyword include_credentials: Whether to populate the connection properties with authentication credentials. - Optional. - :type include_credentials: bool - :return: The connection properties. - :rtype: ~azure.ai.projects.model.ConnectionProperties - :raises ~azure.core.exceptions.ResourceNotFoundError: - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_type: - raise ValueError("You must specify an connection type") - # Since there is no notion of default connection at the moment, list all connections in the category - # and return the first one (index 0), unless overridden by the environment variable DEFAULT_CONNECTION_INDEX. - connection_properties_list = await self.list(connection_type=connection_type, **kwargs) - if len(connection_properties_list) > 0: - default_connection_index = int(os.getenv("DEFAULT_CONNECTION_INDEX", "0")) - if include_credentials: - return await self.get( - connection_name=connection_properties_list[default_connection_index].name, - include_credentials=include_credentials, - **kwargs, - ) - return connection_properties_list[default_connection_index] - raise ResourceNotFoundError(f"No connection of type {connection_type} found") - - @distributed_trace_async - async def get( - self, *, connection_name: str, include_credentials: bool = False, **kwargs: Any - ) -> ConnectionProperties: - """Get the properties of a single connection, given its connection name, with or without - populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError - exception if a connection with the given name was not found. - - .. note:: This method is not supported for Azure Blob Storage connections. - - :keyword connection_name: Connection Name. Required. - :type connection_name: str - :keyword include_credentials: Whether to populate the connection properties with authentication credentials. - Optional. - :type include_credentials: bool - :return: The connection properties, or `None` if a connection with this name does not exist. - :rtype: ~azure.ai.projects.models.ConnectionProperties - :raises ~azure.core.exceptions.ResourceNotFoundError: - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_name: - raise ValueError("Connection name cannot be empty") - if include_credentials: - connection: GetConnectionResponse = await self._get_connection_with_secrets( - connection_name=connection_name, ignored="ignore", **kwargs - ) - if connection.properties.auth_type == AuthenticationType.ENTRA_ID: - return ConnectionProperties(connection=connection, token_credential=self._config.credential) - if connection.properties.auth_type == AuthenticationType.SAS: - from ...models._patch import SASTokenCredential - - cred_prop = cast(InternalConnectionPropertiesSASAuth, connection.properties) - sync_credential = _SyncCredentialWrapper(self._config.credential) - - token_credential = SASTokenCredential( - sas_token=cred_prop.credentials.sas, - credential=sync_credential, - subscription_id=self._config.subscription_id, - resource_group_name=self._config.resource_group_name, - project_name=self._config.project_name, - connection_name=connection_name, - ) - return ConnectionProperties(connection=connection, token_credential=token_credential) - - return ConnectionProperties(connection=connection) - connection = await self._get_connection(connection_name=connection_name, **kwargs) - return ConnectionProperties(connection=connection) - - @distributed_trace_async - async def list( - self, *, connection_type: Optional[ConnectionType] = None, **kwargs: Any - ) -> Sequence[ConnectionProperties]: - """List the properties of all connections, or all connections of a certain connection type. - - :keyword connection_type: The connection type. Optional. If provided, this method lists connections of this - type. If not provided, all connections are listed. - :type connection_type: ~azure.ai.projects.models._models.ConnectionType - :return: A list of connection properties - :rtype: Iterable[~azure.ai.projects.models._models.ConnectionProperties] - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connections_list: ListConnectionsResponse = await self._list_connections( - include_all=True, category=connection_type, **kwargs - ) - - # Iterate to create the simplified result property - connection_properties_list: List[ConnectionProperties] = [] - for connection in connections_list.value: - connection_properties_list.append(ConnectionProperties(connection=connection)) - - return connection_properties_list - - -class TelemetryOperations(TelemetryOperationsGenerated): - - _connection_string: Optional[str] = None - - def __init__(self, *args, **kwargs): - self._outer_instance = kwargs.pop("outer_instance") - super().__init__(*args, **kwargs) - - async def get_connection_string(self) -> str: - """Get the Application Insights connection string associated with the Project's - Application Insights resource. - - :return: The Application Insights connection string if a the resource was enabled for the Project. - :rtype: str - :raises ~azure.core.exceptions.ResourceNotFoundError: Application Insights resource was not enabled - for this project. - """ - if not self._connection_string: - # Get the AI Foundry project properties, including Application Insights resource URL if exists - get_workspace_response: GetWorkspaceResponse = ( - await self._outer_instance.connections._get_workspace() # pylint: disable=protected-access - ) - - if not get_workspace_response.properties.application_insights: - raise ResourceNotFoundError("Application Insights resource was not enabled for this Project.") - - # Make a GET call to the Application Insights resource URL to get the connection string - app_insights_respose: GetAppInsightsResponse = await self._get_app_insights( - app_insights_resource_url=get_workspace_response.properties.application_insights - ) - - self._connection_string = app_insights_respose.properties.connection_string - - return self._connection_string - - # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? - # TODO: This could be a class method. But we don't have a class property AIProjectClient.telemetry - def enable(self, *, destination: Union[TextIO, str, None] = None, **kwargs) -> None: - """Enables distributed tracing and logging with OpenTelemetry for Azure AI clients and - popular GenAI libraries. - - Following instrumentations are enabled (when corresponding packages are installed): - - - Azure AI Inference (`azure-ai-inference`) - - Azure AI Projects (`azure-ai-projects`) - - OpenAI (`opentelemetry-instrumentation-openai-v2`) - - Langchain (`opentelemetry-instrumentation-langchain`) - - The recording of prompt and completion messages is disabled by default. To enable it, set the - `AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED` environment variable to `true`. - - When destination is provided, the method configures OpenTelemetry SDK to export traces to - stdout or OTLP (OpenTelemetry protocol) gRPC endpoint. It's recommended for local - development only. For production use, make sure to configure OpenTelemetry SDK directly. - - :keyword destination: Recommended for local testing only. Set it to `sys.stdout` to print - traces and logs to console output, or a string holding the OpenTelemetry protocol (OTLP) - endpoint such as "http://localhost:4317". - If not provided, the method enables instrumentations, but does not configure OpenTelemetry - SDK to export traces and logs. - :paramtype destination: Union[TextIO, str, None] - """ - _enable_telemetry(destination=destination, **kwargs) - - -class AgentsOperations(AgentsOperationsGenerated): - - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) - self._toolset: Dict[str, _models.AsyncToolSet] = {} - - # pylint: disable=arguments-differ - @overload - async def create_agent( # pylint: disable=arguments-differ - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - async def create_agent( # pylint: disable=arguments-differ - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.Agent: - """ - Creates a new agent with various configurations, delegating to the generated operations. - - :param body: JSON or IO[bytes]. Required if `model` is not provided. - :type body: Union[JSON, IO[bytes]] - :keyword model: The ID of the model to use. Required if `body` is not provided. - :paramtype model: str - :keyword name: The name of the new agent. - :paramtype name: Optional[str] - :keyword description: A description for the new agent. - :paramtype description: Optional[str] - :keyword instructions: System instructions for the agent. - :paramtype instructions: Optional[str] - :keyword tools: List of tools definitions for the agent. - :paramtype tools: Optional[List[_models.ToolDefinition]] - :keyword tool_resources: Resources used by the agent's tools. - :paramtype tool_resources: Optional[_models.ToolResources] - :keyword toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). - :paramtype toolset: Optional[_models.AsyncToolSet] - :keyword temperature: Sampling temperature for generating agent responses. - :paramtype temperature: Optional[float] - :keyword top_p: Nucleus sampling parameter. - :paramtype top_p: Optional[float] - :keyword response_format: Response format for tool calls. - :paramtype response_format: Optional["_types.AgentsApiResponseFormatOption"] - :keyword metadata: Key/value pairs for storing additional information. - :paramtype metadata: Optional[Dict[str, str]] - :keyword content_type: Content type of the body. - :paramtype content_type: str - :return: An Agent object. - :rtype: _models.Agent - :raises: HttpResponseError for HTTP errors. - """ - if body is not _Unset: - if isinstance(body, io.IOBase): - return await super().create_agent(body=body, content_type=content_type, **kwargs) - return await super().create_agent(body=body, **kwargs) - - if toolset is not None: - tools = toolset.definitions - tool_resources = toolset.resources - - new_agent = await super().create_agent( - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - if toolset is not None: - self._toolset[new_agent.id] = toolset - return new_agent - - # pylint: disable=arguments-differ - @overload - async def update_agent( # pylint: disable=arguments-differ - self, - assistant_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - async def update_agent( # pylint: disable=arguments-differ - self, - assistant_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_agent( - self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_agent( - self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def update_agent( - self, - assistant_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - self._validate_tools_and_tool_resources(tools, tool_resources) - - if body is not _Unset: - if isinstance(body, io.IOBase): - return await super().update_agent(body=body, content_type=content_type, **kwargs) - return await super().update_agent(body=body, **kwargs) - - if toolset is not None: - self._toolset[assistant_id] = toolset - tools = toolset.definitions - tool_resources = toolset.resources - - return await super().update_agent( - assistant_id=assistant_id, - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - def _validate_tools_and_tool_resources( - self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources] - ): - if tool_resources is None: - return - if tools is None: - tools = [] - - if tool_resources.file_search is not None and not any( - isinstance(tool, _models.FileSearchToolDefinition) for tool in tools - ): - raise ValueError( - "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" - ) - if tool_resources.code_interpreter is not None and not any( - isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools - ): - raise ValueError( - "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" - ) - - # pylint: disable=arguments-differ - @overload - async def create_run( # pylint: disable=arguments-differ - self, - thread_id: str, - *, - assistant_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_run( - self, - thread_id: str, - body: JSON, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_run( - self, - thread_id: str, - body: IO[bytes], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_run( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - elif assistant_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - include=include, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=False, - stream=False, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - return await response - - @distributed_trace_async - async def create_and_process_run( - self, - thread_id: str, - *, - assistant_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: int = 1, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread and processes the run. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword model: The overridden model name that the agent should use to run the thread. - Default value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run - the thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword toolset: The Collection of tools and resources (alternative to `tools` and - `tool_resources`). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or - ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or - ~azure.ai.projects.models.AgentsApiResponseFormatMode or - ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: The time in seconds to wait between polling the service for run status. - Default value is 1. - :paramtype sleep_interval: int - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - # Create and initiate the run with additional parameters - run = await self.create_run( - thread_id=thread_id, - assistant_id=assistant_id, - include=include, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=toolset.definitions if toolset else None, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - # Monitor and process the run status - while run.status in [ - RunStatus.QUEUED, - RunStatus.IN_PROGRESS, - RunStatus.REQUIRES_ACTION, - ]: - time.sleep(sleep_interval) - run = await self.get_run(thread_id=thread_id, run_id=run.id) - - if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logging.warning("No tool calls provided - cancelling run") - await self.cancel_run(thread_id=thread_id, run_id=run.id) - break - # We need tool set only if we are executing local function. In case if - # the tool is azure_function we just need to wait when it will be finished. - if any(tool_call.type == "function" for tool_call in tool_calls): - toolset = toolset or self._toolset.get(run.assistant_id) - if toolset: - tool_outputs = await toolset.execute_tool_calls(tool_calls) - else: - raise ValueError("Toolset is not available in the client.") - - logging.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - await self.submit_tool_outputs_to_run( - thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs - ) - - logging.info("Current run status: %s", run.status) - - return run - - @overload - async def create_stream( - self, - thread_id: str, - *, - assistant_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: None = None, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.AsyncAgentEventHandler]: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: None - :paramtype event_handler: None. _models.AsyncAgentEventHandler will be applied as default. - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_stream( - self, - thread_id: str, - *, - assistant_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: _models.BaseAsyncAgentEventHandlerT, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.BaseAsyncAgentEventHandlerT]: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - event_handler: None = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.AsyncAgentEventHandler]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a `data: [DONE]` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword event_handler: None - :paramtype event_handler: None. _models.AsyncAgentEventHandler will be applied as default. - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - event_handler: _models.BaseAsyncAgentEventHandlerT, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.BaseAsyncAgentEventHandlerT]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a `data: [DONE]` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_stream( # pyright: ignore[reportInconsistentOverload] - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.BaseAsyncAgentEventHandlerT] = None, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream[_models.BaseAsyncAgentEventHandlerT]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a `data: [DONE]` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - elif assistant_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - assistant_id=assistant_id, - include=include, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=True, - stream=True, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) - - if not event_handler: - event_handler = cast(_models.BaseAsyncAgentEventHandlerT, _models.AsyncAgentEventHandler()) - - return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - # pylint: disable=arguments-differ - @overload - async def submit_tool_outputs_to_run( # pylint: disable=arguments-differ - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - return await response - - @overload - async def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]], - *, - event_handler: _models.BaseAsyncAgentEventHandler, - content_type: str = "application/json", - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: _models.BaseAsyncAgentEventHandler, - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def submit_tool_outputs_to_stream( # pyright: ignore[reportInconsistentOverload] - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: _models.BaseAsyncAgentEventHandler, - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # Cast the response to Iterator[bytes] for type correctness - response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) - - event_handler.initialize(response_iterator, self._handle_submit_tool_outputs) - - async def _handle_submit_tool_outputs( - self, run: _models.ThreadRun, event_handler: _models.BaseAsyncAgentEventHandler - ) -> None: - if isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logger.debug("No tool calls to execute.") - return - - # We need tool set only if we are executing local function. In case if - # the tool is azure_function we just need to wait when it will be finished. - if any(tool_call.type == "function" for tool_call in tool_calls): - toolset = self._toolset.get(run.assistant_id) - if toolset: - tool_outputs = await toolset.execute_tool_calls(tool_calls) - else: - logger.debug("Toolset is not available in the client.") - return - - logger.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - await self.submit_tool_outputs_to_stream( - thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler - ) - - # pylint: disable=arguments-differ - @overload - async def upload_file( # pylint: disable=arguments-differ - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - async def upload_file( # pylint: disable=arguments-differ - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def upload_file( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :type body: Optional[JSON] - :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] - :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :paramtype purpose: Union[str, _models.FilePurpose, None] - :keyword filename: The name of the file. - :paramtype filename: Optional[str] - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: _models.OpenAIFile - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - if body is not None: - return await super().upload_file(body=body, **kwargs) - - if isinstance(purpose, FilePurpose): - purpose = purpose.value - - if file is not None and purpose is not None: - return await super().upload_file(file=file, purpose=purpose, filename=filename, **kwargs) - - if file_path is not None and purpose is not None: - if not os.path.isfile(file_path): - raise FileNotFoundError(f"The file path provided does not exist: {file_path}") - - try: - with open(file_path, "rb") as f: - content = f.read() - - # Determine filename and create correct FileType - base_filename = filename or os.path.basename(file_path) - file_content: FileType = (base_filename, content) - - return await super().upload_file(file=file_content, purpose=purpose, **kwargs) - except IOError as e: - raise IOError(f"Unable to read file: {file_path}.") from e - - raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") - - @overload - async def upload_file_and_poll(self, body: JSON, *, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file_and_poll( - self, - *, - file: FileType, - purpose: Union[str, _models.FilePurpose], - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file_and_poll( - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def upload_file_and_poll( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :type body: Optional[JSON] - :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] - :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :paramtype purpose: Union[str, _models.FilePurpose, None] - :keyword filename: The name of the file. - :paramtype filename: Optional[str] - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: _models.OpenAIFile - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - if body is not None: - uploaded_file = await self.upload_file(body=body, **kwargs) - elif file is not None and purpose is not None: - uploaded_file = await self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) - elif file_path is not None and purpose is not None: - uploaded_file = await self.upload_file(file_path=file_path, purpose=purpose, **kwargs) - else: - raise ValueError( - "Invalid parameters for upload_file_and_poll. Please provide either 'body', " - "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." - ) - - while uploaded_file.status in ["uploaded", "pending", "running"]: - time.sleep(sleep_interval) - uploaded_file = await self.get_file(uploaded_file.id) - - return uploaded_file - - @overload - async def create_vector_store_and_poll( - self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_and_poll( - self, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_and_poll( - self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store_and_poll( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not _Unset: - if isinstance(body, dict): - vector_store = await super().create_vector_store( - body=body, content_type=content_type or "application/json", **kwargs - ) - elif isinstance(body, io.IOBase): - vector_store = await super().create_vector_store(body=body, content_type=content_type, **kwargs) - else: - raise ValueError("Invalid 'body' type: must be a dictionary (JSON) or a file-like object (IO[bytes]).") - else: - store_configuration = None - if data_sources: - store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) - - vector_store = await super().create_vector_store( - file_ids=file_ids, - store_configuration=store_configuration, - name=name, - expires_after=expires_after, - chunking_strategy=chunking_strategy, - metadata=metadata, - **kwargs, - ) - - while vector_store.status == "in_progress": - time.sleep(sleep_interval) - vector_store = await super().get_vector_store(vector_store.id) - - return vector_store - - @overload - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - *, - file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword content_type: Body parameter content-type. Defaults to "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not _Unset: - if isinstance(body, dict): - vector_store_file_batch = await super().create_vector_store_file_batch( - vector_store_id=vector_store_id, - body=body, - content_type=content_type or "application/json", - **kwargs, - ) - elif isinstance(body, io.IOBase): - vector_store_file_batch = await super().create_vector_store_file_batch( - vector_store_id=vector_store_id, - body=body, - content_type=content_type, - **kwargs, - ) - else: - raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like (IO[bytes]).") - else: - vector_store_file_batch = await super().create_vector_store_file_batch( - vector_store_id=vector_store_id, - file_ids=file_ids, - data_sources=data_sources, - chunking_strategy=chunking_strategy, - **kwargs, - ) - - while vector_store_file_batch.status == "in_progress": - time.sleep(sleep_interval) - vector_store_file_batch = await super().get_vector_store_file_batch( - vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id - ) - - return vector_store_file_batch - - @overload - async def create_vector_store_file_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_and_poll( - self, - vector_store_id: str, - *, - content_type: str = "application/json", - file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_id: Identifier of the file. Default value is None. - :paramtype file_id: str - :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store_file_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - content_type: str = "application/json", - file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword content_type: Body Parameter content-type. Defaults to 'application/json'. - :paramtype content_type: str - :keyword file_id: Identifier of the file. Default value is None. - :paramtype file_id: str - :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not _Unset: - if isinstance(body, dict): - vector_store_file = await super().create_vector_store_file( - vector_store_id=vector_store_id, - body=body, - content_type=content_type or "application/json", - **kwargs, - ) - elif isinstance(body, io.IOBase): - vector_store_file = await super().create_vector_store_file( - vector_store_id=vector_store_id, - body=body, - content_type=content_type, - **kwargs, - ) - else: - raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like object (IO[bytes]).") - else: - vector_store_file = await super().create_vector_store_file( - vector_store_id=vector_store_id, - file_id=file_id, - data_source=data_source, - chunking_strategy=chunking_strategy, - **kwargs, - ) - - while vector_store_file.status == "in_progress": - time.sleep(sleep_interval) - vector_store_file = await super().get_vector_store_file( - vector_store_id=vector_store_id, file_id=vector_store_file.id - ) - - return vector_store_file - - @distributed_trace_async - async def get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[bytes]: - """ - Asynchronously returns file content as a byte stream for the given file_id. - - :param file_id: The ID of the file to retrieve. Required. - :type file_id: str - :return: An async iterator that yields bytes from the file content. - :rtype: AsyncIterator[bytes] - :raises ~azure.core.exceptions.HttpResponseError: If the HTTP request fails. - """ - kwargs["stream"] = True - response = await super()._get_file_content(file_id, **kwargs) - return cast(AsyncIterator[bytes], response) - - @distributed_trace_async - async def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: - """ - Asynchronously saves file content retrieved using a file identifier to the specified local directory. - - :param file_id: The unique identifier for the file to retrieve. - :type file_id: str - :param file_name: The name of the file to be saved. - :type file_name: str - :param target_dir: The directory where the file should be saved. Defaults to the current working directory. - :type target_dir: str or Path - :raises ValueError: If the target path is not a directory or the file name is invalid. - :raises RuntimeError: If file content retrieval fails or no content is found. - :raises TypeError: If retrieved chunks are not bytes-like objects. - :raises IOError: If writing to the file fails. - """ - try: - # Determine and validate the target directory - path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() - path.mkdir(parents=True, exist_ok=True) - if not path.is_dir(): - raise ValueError(f"The target path '{path}' is not a directory.") - - # Sanitize and validate the file name - sanitized_file_name = Path(file_name).name - if not sanitized_file_name: - raise ValueError("The provided file name is invalid.") - - # Retrieve the file content - file_content_stream = await self.get_file_content(file_id) - if not file_content_stream: - raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") - - # Collect all chunks asynchronously - chunks = [] - async for chunk in file_content_stream: - if isinstance(chunk, (bytes, bytearray)): - chunks.append(chunk) - else: - raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") - - target_file_path = path / sanitized_file_name - - # Write the collected content to the file synchronously - def write_file(collected_chunks: list): - with open(target_file_path, "wb") as file: - for chunk in collected_chunks: - file.write(chunk) - - # Use the event loop to run the synchronous function in a thread executor - loop = asyncio.get_running_loop() - await loop.run_in_executor(None, write_file, chunks) - - logger.debug("File '%s' saved successfully at '%s'.", sanitized_file_name, target_file_path) - - except (ValueError, RuntimeError, TypeError, IOError) as e: - logger.error("An error occurred in save_file: %s", e) - raise - - @distributed_trace_async - async def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: - """Deletes an agent. - - :param assistant_id: Identifier of the agent. Required. - :type assistant_id: str - :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - if assistant_id in self._toolset: - del self._toolset[assistant_id] - return await super().delete_agent(assistant_id, **kwargs) - - -class _SyncCredentialWrapper(TokenCredential): - """ - The class, synchronizing AsyncTokenCredential. - - :param async_credential: The async credential to be synchronized. - :type async_credential: ~azure.core.credentials_async.AsyncTokenCredential - """ - - def __init__(self, async_credential: "AsyncTokenCredential"): - self._async_credential = async_credential - - def get_token( - self, - *scopes: str, - claims: Optional[str] = None, - tenant_id: Optional[str] = None, - enable_cae: bool = False, - **kwargs: Any, - ) -> "AccessToken": - - pool = concurrent.futures.ThreadPoolExecutor() - return pool.submit( - asyncio.run, - self._async_credential.get_token( - *scopes, - claims=claims, - tenant_id=tenant_id, - enable_cae=enable_cae, - **kwargs, - ), - ).result() - - -__all__: List[str] = [ - "AgentsOperations", - "ConnectionsOperations", - "TelemetryOperations", - "InferenceOperations", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py index aa46cc2d6073..4217e52a3cc0 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py @@ -62,6 +62,8 @@ MessageDeltaTextFileCitationAnnotationObject, MessageDeltaTextFilePathAnnotation, MessageDeltaTextFilePathAnnotationObject, + MessageDeltaTextUrlCitationAnnotation, + MessageDeltaTextUrlCitationDetails, MessageImageFileContent, MessageImageFileDetails, MessageIncompleteDetails, @@ -72,6 +74,8 @@ MessageTextFileCitationDetails, MessageTextFilePathAnnotation, MessageTextFilePathDetails, + MessageTextUrlCitationAnnotation, + MessageTextUrlCitationDetails, MicrosoftFabricToolDefinition, OpenAIFile, OpenAIPageableListOfAgent, @@ -268,6 +272,8 @@ "MessageDeltaTextFileCitationAnnotationObject", "MessageDeltaTextFilePathAnnotation", "MessageDeltaTextFilePathAnnotationObject", + "MessageDeltaTextUrlCitationAnnotation", + "MessageDeltaTextUrlCitationDetails", "MessageImageFileContent", "MessageImageFileDetails", "MessageIncompleteDetails", @@ -278,6 +284,8 @@ "MessageTextFileCitationDetails", "MessageTextFilePathAnnotation", "MessageTextFilePathDetails", + "MessageTextUrlCitationAnnotation", + "MessageTextUrlCitationDetails", "MicrosoftFabricToolDefinition", "OpenAIFile", "OpenAIPageableListOfAgent", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py index 6ed9eb9b3162..dd8e03f3700f 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py @@ -151,6 +151,8 @@ class ConnectionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Azure AI Services""" AZURE_AI_SEARCH = "CognitiveSearch" """Azure AI Search""" + API_KEY = "ApiKey" + """Generic connection that uses API Key authentication""" class DoneEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 1d4d35c02c12..372aa80c7718 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines +# pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -76,42 +76,46 @@ class Agent(_model_base.Model): :vartype metadata: dict[str, str] """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["assistant"] = rest_field() + object: Literal["assistant"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always assistant. Required. Default value is \"assistant\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this object was created. Required.""" - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the agent. Required.""" - description: str = rest_field() + description: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The description of the agent. Required.""" - model: str = rest_field() + model: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the model to use. Required.""" - instructions: str = rest_field() + instructions: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The system instructions for the agent to use. Required.""" - tools: List["_models.ToolDefinition"] = rest_field() + tools: List["_models.ToolDefinition"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The collection of tools enabled for the agent. Required.""" - tool_resources: "_models.ToolResources" = rest_field() + tool_resources: "_models.ToolResources" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A set of resources that are used by the agent's tools. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Required.""" - temperature: float = rest_field() + temperature: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Required.""" - top_p: float = rest_field() + top_p: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. Required.""" - response_format: Optional["_types.AgentsApiResponseFormatOption"] = rest_field() + response_format: Optional["_types.AgentsApiResponseFormatOption"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The response format of the tool calls used by this agent. Is one of the following types: str, Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat, ResponseFormatJsonSchemaType""" - metadata: Dict[str, str] = rest_field() + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Required.""" @@ -161,11 +165,11 @@ class AgentDeletionStatus(_model_base.Model): :vartype object: str """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether deletion was successful. Required.""" - object: Literal["assistant.deleted"] = rest_field() + object: Literal["assistant.deleted"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always 'assistant.deleted'. Required. Default value is \"assistant.deleted\".""" @@ -199,7 +203,9 @@ class AgentsApiResponseFormat(_model_base.Model): :vartype type: str or ~azure.ai.projects.models.ResponseFormat """ - type: Optional[Union[str, "_models.ResponseFormat"]] = rest_field() + type: Optional[Union[str, "_models.ResponseFormat"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Must be one of ``text`` or ``json_object``. Known values are: \"text\" and \"json_object\".""" @overload @@ -232,11 +238,13 @@ class AgentsNamedToolChoice(_model_base.Model): :vartype function: ~azure.ai.projects.models.FunctionName """ - type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field() - """the type of tool. If type is ``function``, the function name must be set. Required. Known + type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """the type of tool. If type is ``function``\ , the function name must be set. Required. Known values are: \"function\", \"code_interpreter\", \"file_search\", \"bing_grounding\", \"fabric_aiskill\", \"sharepoint_grounding\", and \"azure_ai_search\".""" - function: Optional["_models.FunctionName"] = rest_field() + function: Optional["_models.FunctionName"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the function to call.""" @overload @@ -283,19 +291,21 @@ class AgentThread(_model_base.Model): :vartype metadata: dict[str, str] """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["thread"] = rest_field() + object: Literal["thread"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always 'thread'. Required. Default value is \"thread\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this object was created. Required.""" - tool_resources: "_models.ToolResources" = rest_field() + tool_resources: "_models.ToolResources" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A set of resources that are made available to the agent's tools in this thread. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Required.""" - metadata: Dict[str, str] = rest_field() + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Required.""" @@ -329,8 +339,8 @@ class AgentThreadCreationOptions(_model_base.Model): :vartype messages: list[~azure.ai.projects.models.ThreadMessageOptions] :ivar tool_resources: A set of resources that are made available to the agent's tools in this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list of vector store IDs. :vartype tool_resources: ~azure.ai.projects.models.ToolResources :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for @@ -339,15 +349,19 @@ class AgentThreadCreationOptions(_model_base.Model): :vartype metadata: dict[str, str] """ - messages: Optional[List["_models.ThreadMessageOptions"]] = rest_field() + messages: Optional[List["_models.ThreadMessageOptions"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The initial messages to associate with the new thread.""" - tool_resources: Optional["_models.ToolResources"] = rest_field() + tool_resources: Optional["_models.ToolResources"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A set of resources that are made available to the agent's tools in this thread. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs.""" - metadata: Optional[Dict[str, str]] = rest_field() + metadata: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length.""" @@ -380,7 +394,9 @@ class AppInsightsProperties(_model_base.Model): :vartype connection_string: str """ - connection_string: str = rest_field(name="ConnectionString") + connection_string: str = rest_field( + name="ConnectionString", visibility=["read", "create", "update", "delete", "query"] + ) """Authentication type of the connection target. Required.""" @overload @@ -413,7 +429,7 @@ class InputData(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """Type of the data. Required. Default value is None.""" @overload @@ -455,13 +471,17 @@ class ApplicationInsightsConfiguration(InputData, discriminator="app_insights"): type: Literal["app_insights"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore """Required. Default value is \"app_insights\".""" - resource_id: str = rest_field(name="resourceId") + resource_id: str = rest_field(name="resourceId", visibility=["read", "create", "update", "delete", "query"]) """LogAnalytic Workspace resourceID associated with ApplicationInsights. Required.""" - query: str = rest_field() + query: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Query to fetch the data. Required.""" - service_name: Optional[str] = rest_field(name="serviceName") + service_name: Optional[str] = rest_field( + name="serviceName", visibility=["read", "create", "update", "delete", "query"] + ) """Service name.""" - connection_string: Optional[str] = rest_field(name="connectionString") + connection_string: Optional[str] = rest_field( + name="connectionString", visibility=["read", "create", "update", "delete", "query"] + ) """Connection String to connect to ApplicationInsights.""" @overload @@ -493,7 +513,9 @@ class AzureAISearchResource(_model_base.Model): :vartype index_list: list[~azure.ai.projects.models.IndexResource] """ - index_list: Optional[List["_models.IndexResource"]] = rest_field(name="indexes") + index_list: Optional[List["_models.IndexResource"]] = rest_field( + name="indexes", visibility=["read", "create", "update", "delete", "query"] + ) """The indices attached to this agent. There can be a maximum of 1 index resource attached to the agent.""" @@ -529,7 +551,7 @@ class ToolDefinition(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The object type. Required. Default value is None.""" @overload @@ -559,7 +581,7 @@ class AzureAISearchToolDefinition(ToolDefinition, discriminator="azure_ai_search :vartype type: str """ - type: Literal["azure_ai_search"] = rest_discriminator(name="type") # type: ignore + type: Literal["azure_ai_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'azure_ai_search'. Required. Default value is \"azure_ai_search\".""" @@ -592,10 +614,12 @@ class AzureFunctionBinding(_model_base.Model): :vartype storage_queue: ~azure.ai.projects.models.AzureFunctionStorageQueue """ - type: Literal["storage_queue"] = rest_field() + type: Literal["storage_queue"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The type of binding, which is always 'storage_queue'. Required. Default value is \"storage_queue\".""" - storage_queue: "_models.AzureFunctionStorageQueue" = rest_field() + storage_queue: "_models.AzureFunctionStorageQueue" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Storage queue. Required.""" @overload @@ -631,12 +655,16 @@ class AzureFunctionDefinition(_model_base.Model): :vartype output_binding: ~azure.ai.projects.models.AzureFunctionBinding """ - function: "_models.FunctionDefinition" = rest_field() + function: "_models.FunctionDefinition" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The definition of azure function and its parameters. Required.""" - input_binding: "_models.AzureFunctionBinding" = rest_field() + input_binding: "_models.AzureFunctionBinding" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Input storage queue. The queue storage trigger runs a function as messages are added to it. Required.""" - output_binding: "_models.AzureFunctionBinding" = rest_field() + output_binding: "_models.AzureFunctionBinding" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Output storage queue. The function writes output to this queue when the input items are processed. Required.""" @@ -671,9 +699,11 @@ class AzureFunctionStorageQueue(_model_base.Model): :vartype queue_name: str """ - storage_service_endpoint: str = rest_field(name="queue_service_endpoint") + storage_service_endpoint: str = rest_field( + name="queue_service_endpoint", visibility=["read", "create", "update", "delete", "query"] + ) """URI to the Azure Storage Queue service allowing you to manipulate a queue. Required.""" - queue_name: str = rest_field() + queue_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of an Azure function storage queue. Required.""" @overload @@ -707,10 +737,12 @@ class AzureFunctionToolDefinition(ToolDefinition, discriminator="azure_function" :vartype azure_function: ~azure.ai.projects.models.AzureFunctionDefinition """ - type: Literal["azure_function"] = rest_discriminator(name="type") # type: ignore + type: Literal["azure_function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'azure_function'. Required. Default value is \"azure_function\".""" - azure_function: "_models.AzureFunctionDefinition" = rest_field() + azure_function: "_models.AzureFunctionDefinition" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The definition of the concrete function that the function tool should call. Required.""" @overload @@ -743,10 +775,12 @@ class BingGroundingToolDefinition(ToolDefinition, discriminator="bing_grounding" :vartype bing_grounding: ~azure.ai.projects.models.ToolConnectionList """ - type: Literal["bing_grounding"] = rest_discriminator(name="type") # type: ignore + type: Literal["bing_grounding"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'bing_grounding'. Required. Default value is \"bing_grounding\".""" - bing_grounding: "_models.ToolConnectionList" = rest_field() + bing_grounding: "_models.ToolConnectionList" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The list of connections used by the bing grounding tool. Required.""" @overload @@ -776,7 +810,7 @@ class CodeInterpreterToolDefinition(ToolDefinition, discriminator="code_interpre :vartype type: str """ - type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore + type: Literal["code_interpreter"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'code_interpreter'. Required. Default value is \"code_interpreter\".""" @@ -808,11 +842,13 @@ class CodeInterpreterToolResource(_model_base.Model): :vartype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] """ - file_ids: Optional[List[str]] = rest_field() + file_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A list of file IDs made available to the ``code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.""" - data_sources: Optional[List["_models.VectorStoreDataSource"]] = rest_field() + data_sources: Optional[List["_models.VectorStoreDataSource"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The data sources to be used. This option is mutually exclusive with the ``fileIds`` property.""" @overload @@ -842,7 +878,7 @@ class CredentialsApiKeyAuth(_model_base.Model): :vartype key: str """ - key: str = rest_field() + key: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The API key. Required.""" @overload @@ -871,7 +907,7 @@ class CredentialsSASAuth(_model_base.Model): :vartype sas: str """ - sas: str = rest_field(name="SAS") + sas: str = rest_field(name="SAS", visibility=["read", "create", "update", "delete", "query"]) """The Shared Access Signatures (SAS) token. Required.""" @overload @@ -904,7 +940,7 @@ class Trigger(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """Type of the trigger. Required. Default value is None.""" @overload @@ -939,7 +975,7 @@ class CronTrigger(Trigger, discriminator="Cron"): type: Literal["Cron"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore """Required. Default value is \"Cron\".""" - expression: str = rest_field() + expression: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Cron expression for the trigger. Required.""" @overload @@ -974,7 +1010,7 @@ class Dataset(InputData, discriminator="dataset"): type: Literal["dataset"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore """Required. Default value is \"dataset\".""" - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Evaluation input data. Required.""" @overload @@ -1028,17 +1064,19 @@ class Evaluation(_model_base.Model): """Identifier of the evaluation. Required.""" data: "_models.InputData" = rest_field(visibility=["read", "create"]) """Data for evaluation. Required.""" - display_name: Optional[str] = rest_field(name="displayName") + display_name: Optional[str] = rest_field( + name="displayName", visibility=["read", "create", "update", "delete", "query"] + ) """Display Name for evaluation. It helps to find the evaluation easily in AI Foundry. It does not need to be unique.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Description of the evaluation. It can be used to store additional information about the evaluation and is mutable.""" system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) """Metadata containing createdBy and modifiedBy information.""" status: Optional[str] = rest_field(visibility=["read"]) """Status of the evaluation. It is set by service and is read-only.""" - tags: Optional[Dict[str, str]] = rest_field() + tags: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Evaluation's tags. Unlike properties, tags are fully mutable.""" properties: Optional[Dict[str, str]] = rest_field(visibility=["read", "create"]) """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be @@ -1105,14 +1143,14 @@ class EvaluationSchedule(_model_base.Model): """Name of the schedule, which also serves as the unique identifier for the evaluation. Required.""" data: "_models.ApplicationInsightsConfiguration" = rest_field(visibility=["read", "create"]) """Data for evaluation. Required.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Description of the evaluation. It can be used to store additional information about the evaluation and is mutable.""" system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) """Metadata containing createdBy and modifiedBy information.""" provisioning_state: Optional[str] = rest_field(name="provisioningState", visibility=["read"]) """Provisioning State of the evaluation. It is set by service and is read-only.""" - tags: Optional[Dict[str, str]] = rest_field() + tags: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Evaluation's tags. Unlike properties, tags are fully mutable.""" properties: Optional[Dict[str, str]] = rest_field(visibility=["read", "create"]) """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be @@ -1121,7 +1159,7 @@ class EvaluationSchedule(_model_base.Model): """Enabled status of the evaluation. It is set by service and is read-only.""" evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field(visibility=["read", "create"]) """Evaluators to be used for the evaluation. Required.""" - trigger: "_models.Trigger" = rest_field() + trigger: "_models.Trigger" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Trigger for the evaluation. Required.""" @overload @@ -1159,11 +1197,15 @@ class EvaluatorConfiguration(_model_base.Model): :vartype data_mapping: dict[str, str] """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Identifier of the evaluator. Required.""" - init_params: Optional[Dict[str, Any]] = rest_field(name="initParams") + init_params: Optional[Dict[str, Any]] = rest_field( + name="initParams", visibility=["read", "create", "update", "delete", "query"] + ) """Initialization parameters of the evaluator.""" - data_mapping: Optional[Dict[str, str]] = rest_field(name="dataMapping") + data_mapping: Optional[Dict[str, str]] = rest_field( + name="dataMapping", visibility=["read", "create", "update", "delete", "query"] + ) """Data parameters of the evaluator.""" @overload @@ -1200,11 +1242,11 @@ class FileDeletionStatus(_model_base.Model): :vartype object: str """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether deletion was successful. Required.""" - object: Literal["file"] = rest_field() + object: Literal["file"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always 'file'. Required. Default value is \"file\".""" @overload @@ -1239,9 +1281,9 @@ class FileListResponse(_model_base.Model): :vartype data: list[~azure.ai.projects.models.OpenAIFile] """ - object: Literal["list"] = rest_field() + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always 'list'. Required. Default value is \"list\".""" - data: List["_models.OpenAIFile"] = rest_field() + data: List["_models.OpenAIFile"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The files returned for the request. Required.""" @overload @@ -1273,9 +1315,9 @@ class FileSearchRankingOptions(_model_base.Model): :vartype score_threshold: float """ - ranker: str = rest_field() + ranker: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """File search ranker. Required.""" - score_threshold: float = rest_field() + score_threshold: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Ranker search threshold. Required.""" @overload @@ -1309,9 +1351,9 @@ class FileSearchToolCallContent(_model_base.Model): :vartype text: str """ - type: Literal["text"] = rest_field() + type: Literal["text"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The type of the content. Required. Default value is \"text\".""" - text: str = rest_field() + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The text content of the file. Required.""" @overload @@ -1344,9 +1386,11 @@ class FileSearchToolDefinition(ToolDefinition, discriminator="file_search"): :vartype file_search: ~azure.ai.projects.models.FileSearchToolDefinitionDetails """ - type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore + type: Literal["file_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" - file_search: Optional["_models.FileSearchToolDefinitionDetails"] = rest_field() + file_search: Optional["_models.FileSearchToolDefinitionDetails"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Options overrides for the file search tool.""" @overload @@ -1381,13 +1425,15 @@ class FileSearchToolDefinitionDetails(_model_base.Model): :vartype ranking_options: ~azure.ai.projects.models.FileSearchRankingOptions """ - max_num_results: Optional[int] = rest_field() + max_num_results: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The maximum number of results the file search tool should output. The default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. Note that the file search tool may output fewer than ``max_num_results`` results. See the file search tool documentation for more information.""" - ranking_options: Optional["_models.FileSearchRankingOptions"] = rest_field() + ranking_options: Optional["_models.FileSearchRankingOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Ranking options for file search.""" @overload @@ -1422,10 +1468,12 @@ class FileSearchToolResource(_model_base.Model): :vartype vector_stores: list[~azure.ai.projects.models.VectorStoreConfigurations] """ - vector_store_ids: Optional[List[str]] = rest_field() + vector_store_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the vector store attached to this agent. There can be a maximum of 1 vector store attached to the agent.""" - vector_stores: Optional[List["_models.VectorStoreConfigurations"]] = rest_field() + vector_stores: Optional[List["_models.VectorStoreConfigurations"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The list of vector store configuration objects from Azure. This list is limited to one element. The only element of this list contains the list of azure asset IDs used by the search tool.""" @@ -1463,12 +1511,12 @@ class FunctionDefinition(_model_base.Model): :vartype parameters: any """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the function to be called. Required.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A description of what the function does, used by the model to choose when and how to call the function.""" - parameters: Any = rest_field() + parameters: Any = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The parameters the functions accepts, described as a JSON Schema object. Required.""" @overload @@ -1499,7 +1547,7 @@ class FunctionName(_model_base.Model): :vartype name: str """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the function to call. Required.""" @overload @@ -1531,9 +1579,9 @@ class FunctionToolDefinition(ToolDefinition, discriminator="function"): :vartype function: ~azure.ai.projects.models.FunctionDefinition """ - type: Literal["function"] = rest_discriminator(name="type") # type: ignore + type: Literal["function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'function'. Required. Default value is \"function\".""" - function: "_models.FunctionDefinition" = rest_field() + function: "_models.FunctionDefinition" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The definition of the concrete function that the function tool should call. Required.""" @overload @@ -1566,11 +1614,13 @@ class GetAppInsightsResponse(_model_base.Model): :vartype properties: ~azure.ai.projects.models._models.AppInsightsProperties """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A unique identifier for the resource. Required.""" - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the resource. Required.""" - properties: "_models._models.AppInsightsProperties" = rest_field() + properties: "_models._models.AppInsightsProperties" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The properties of the resource. Required.""" @overload @@ -1605,11 +1655,13 @@ class GetConnectionResponse(_model_base.Model): :vartype properties: ~azure.ai.projects.models._models.InternalConnectionProperties """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A unique identifier for the connection. Required.""" - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the resource. Required.""" - properties: "_models._models.InternalConnectionProperties" = rest_field() + properties: "_models._models.InternalConnectionProperties" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The properties of the resource. Required.""" @overload @@ -1644,11 +1696,13 @@ class GetWorkspaceResponse(_model_base.Model): :vartype properties: ~azure.ai.projects.models._models.WorkspaceProperties """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A unique identifier for the resource. Required.""" - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the resource. Required.""" - properties: "_models._models.WorkspaceProperties" = rest_field() + properties: "_models._models.WorkspaceProperties" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The properties of the resource. Required.""" @overload @@ -1681,7 +1735,9 @@ class IncompleteRunDetails(_model_base.Model): :vartype reason: str or ~azure.ai.projects.models.IncompleteDetailsReason """ - reason: Union[str, "_models.IncompleteDetailsReason"] = rest_field() + reason: Union[str, "_models.IncompleteDetailsReason"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The reason why the run is incomplete. This indicates which specific token limit was reached during the run. Required. Known values are: \"max_completion_tokens\" and \"max_prompt_tokens\".""" @@ -1715,9 +1771,9 @@ class IndexResource(_model_base.Model): :vartype index_name: str """ - index_connection_id: str = rest_field() + index_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """An index connection id in an IndexResource attached to this agent. Required.""" - index_name: str = rest_field() + index_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of an index in an IndexResource attached to this agent. Required.""" @overload @@ -1751,20 +1807,22 @@ class InternalConnectionProperties(_model_base.Model): "ApiKey", "AAD", "SAS", and "None". :vartype auth_type: str or ~azure.ai.projects.models.AuthenticationType :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". + "Serverless", "AzureBlob", "AIServices", "CognitiveSearch", and "ApiKey". :vartype category: str or ~azure.ai.projects.models.ConnectionType :ivar target: The connection URL to be used for this service. Required. :vartype target: str """ __mapping__: Dict[str, _model_base.Model] = {} - auth_type: str = rest_discriminator(name="authType") + auth_type: str = rest_discriminator(name="authType", visibility=["read", "create", "update", "delete", "query"]) """Authentication type of the connection target. Required. Known values are: \"ApiKey\", \"AAD\", \"SAS\", and \"None\".""" - category: Union[str, "_models.ConnectionType"] = rest_field() + category: Union[str, "_models.ConnectionType"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", \"AIServices\", and \"CognitiveSearch\".""" - target: str = rest_field() + \"AzureBlob\", \"AIServices\", \"CognitiveSearch\", and \"ApiKey\".""" + target: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The connection URL to be used for this service. Required.""" @overload @@ -1793,7 +1851,7 @@ class InternalConnectionPropertiesAADAuth(InternalConnectionProperties, discrimi :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". + "Serverless", "AzureBlob", "AIServices", "CognitiveSearch", and "ApiKey". :vartype category: str or ~azure.ai.projects.models.ConnectionType :ivar target: The connection URL to be used for this service. Required. :vartype target: str @@ -1802,7 +1860,7 @@ class InternalConnectionPropertiesAADAuth(InternalConnectionProperties, discrimi :vartype auth_type: str or ~azure.ai.projects.models.ENTRA_ID """ - auth_type: Literal[AuthenticationType.ENTRA_ID] = rest_discriminator(name="authType") # type: ignore + auth_type: Literal[AuthenticationType.ENTRA_ID] = rest_discriminator(name="authType", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Authentication type of the connection target. Required. Entra ID authentication (formerly known as AAD)""" @@ -1830,7 +1888,7 @@ class InternalConnectionPropertiesApiKeyAuth(InternalConnectionProperties, discr :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". + "Serverless", "AzureBlob", "AIServices", "CognitiveSearch", and "ApiKey". :vartype category: str or ~azure.ai.projects.models.ConnectionType :ivar target: The connection URL to be used for this service. Required. :vartype target: str @@ -1840,9 +1898,11 @@ class InternalConnectionPropertiesApiKeyAuth(InternalConnectionProperties, discr :vartype credentials: ~azure.ai.projects.models._models.CredentialsApiKeyAuth """ - auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType") # type: ignore + auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Authentication type of the connection target. Required. API Key authentication""" - credentials: "_models._models.CredentialsApiKeyAuth" = rest_field() + credentials: "_models._models.CredentialsApiKeyAuth" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Credentials will only be present for authType=ApiKey. Required.""" @overload @@ -1870,7 +1930,7 @@ class InternalConnectionPropertiesNoAuth(InternalConnectionProperties, discrimin :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". + "Serverless", "AzureBlob", "AIServices", "CognitiveSearch", and "ApiKey". :vartype category: str or ~azure.ai.projects.models.ConnectionType :ivar target: The connection URL to be used for this service. Required. :vartype target: str @@ -1878,7 +1938,7 @@ class InternalConnectionPropertiesNoAuth(InternalConnectionProperties, discrimin :vartype auth_type: str or ~azure.ai.projects.models.NONE """ - auth_type: Literal[AuthenticationType.NONE] = rest_discriminator(name="authType") # type: ignore + auth_type: Literal[AuthenticationType.NONE] = rest_discriminator(name="authType", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Authentication type of the connection target. Required. No authentication""" @overload @@ -1905,7 +1965,7 @@ class InternalConnectionPropertiesSASAuth(InternalConnectionProperties, discrimi :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". + "Serverless", "AzureBlob", "AIServices", "CognitiveSearch", and "ApiKey". :vartype category: str or ~azure.ai.projects.models.ConnectionType :ivar target: The connection URL to be used for this service. Required. :vartype target: str @@ -1916,10 +1976,12 @@ class InternalConnectionPropertiesSASAuth(InternalConnectionProperties, discrimi :vartype credentials: ~azure.ai.projects.models._models.CredentialsSASAuth """ - auth_type: Literal[AuthenticationType.SAS] = rest_discriminator(name="authType") # type: ignore + auth_type: Literal[AuthenticationType.SAS] = rest_discriminator(name="authType", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Authentication type of the connection target. Required. Shared Access Signature (SAS) authentication""" - credentials: "_models._models.CredentialsSASAuth" = rest_field() + credentials: "_models._models.CredentialsSASAuth" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Credentials will only be present for authType=ApiKey. Required.""" @overload @@ -1950,7 +2012,9 @@ class ListConnectionsResponse(_model_base.Model): :vartype value: list[~azure.ai.projects.models._models.GetConnectionResponse] """ - value: List["_models._models.GetConnectionResponse"] = rest_field() + value: List["_models._models.GetConnectionResponse"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A list of connection list secrets. Required.""" @overload @@ -1984,11 +2048,15 @@ class MessageAttachment(_model_base.Model): ~azure.ai.projects.models.FileSearchToolDefinition] """ - file_id: Optional[str] = rest_field() + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the file to attach to the message.""" - data_source: Optional["_models.VectorStoreDataSource"] = rest_field() + data_source: Optional["_models.VectorStoreDataSource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Azure asset ID.""" - tools: List["_types.MessageAttachmentToolDefinition"] = rest_field() + tools: List["_types.MessageAttachmentToolDefinition"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The tools to add to this file. Required.""" @overload @@ -2023,7 +2091,7 @@ class MessageContent(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The object type. Required. Default value is None.""" @overload @@ -2055,9 +2123,11 @@ class MessageDelta(_model_base.Model): :vartype content: list[~azure.ai.projects.models.MessageDeltaContent] """ - role: Union[str, "_models.MessageRole"] = rest_field() + role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The entity that produced the message. Required. Known values are: \"user\" and \"assistant\".""" - content: List["_models.MessageDeltaContent"] = rest_field() + content: List["_models.MessageDeltaContent"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The content of the message as an array of text and/or images. Required.""" @overload @@ -2094,12 +2164,12 @@ class MessageDeltaChunk(_model_base.Model): :vartype delta: ~azure.ai.projects.models.MessageDelta """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The identifier of the message, which can be referenced in API endpoints. Required.""" - object: Literal["thread.message.delta"] = rest_field() + object: Literal["thread.message.delta"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always ``thread.message.delta``. Required. Default value is \"thread.message.delta\".""" - delta: "_models.MessageDelta" = rest_field() + delta: "_models.MessageDelta" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The delta containing the fields that have changed on the Message. Required.""" @overload @@ -2136,9 +2206,9 @@ class MessageDeltaContent(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - index: int = rest_field() + index: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The index of the content part of the message. Required.""" - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The type of content for this content part. Required. Default value is None.""" @overload @@ -2173,10 +2243,12 @@ class MessageDeltaImageFileContent(MessageDeltaContent, discriminator="image_fil :vartype image_file: ~azure.ai.projects.models.MessageDeltaImageFileContentObject """ - type: Literal["image_file"] = rest_discriminator(name="type") # type: ignore + type: Literal["image_file"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The type of content for this content part, which is always \"image_file.\". Required. Default value is \"image_file\".""" - image_file: Optional["_models.MessageDeltaImageFileContentObject"] = rest_field() + image_file: Optional["_models.MessageDeltaImageFileContentObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The image_file data.""" @overload @@ -2205,7 +2277,7 @@ class MessageDeltaImageFileContentObject(_model_base.Model): :vartype file_id: str """ - file_id: Optional[str] = rest_field() + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The file ID of the image in the message content.""" @overload @@ -2230,7 +2302,8 @@ class MessageDeltaTextAnnotation(_model_base.Model): """The abstract base representation of a streamed text content part's text annotation. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - MessageDeltaTextFileCitationAnnotation, MessageDeltaTextFilePathAnnotation + MessageDeltaTextFileCitationAnnotation, MessageDeltaTextFilePathAnnotation, + MessageDeltaTextUrlCitationAnnotation :ivar index: The index of the annotation within a text content part. Required. @@ -2240,9 +2313,9 @@ class MessageDeltaTextAnnotation(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - index: int = rest_field() + index: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The index of the annotation within a text content part. Required.""" - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The type of the text content annotation. Required. Default value is None.""" @overload @@ -2277,10 +2350,12 @@ class MessageDeltaTextContent(MessageDeltaContent, discriminator="text"): :vartype text: ~azure.ai.projects.models.MessageDeltaTextContentObject """ - type: Literal["text"] = rest_discriminator(name="type") # type: ignore + type: Literal["text"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The type of content for this content part, which is always \"text.\". Required. Default value is \"text\".""" - text: Optional["_models.MessageDeltaTextContentObject"] = rest_field() + text: Optional["_models.MessageDeltaTextContentObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The text content details.""" @overload @@ -2311,9 +2386,11 @@ class MessageDeltaTextContentObject(_model_base.Model): :vartype annotations: list[~azure.ai.projects.models.MessageDeltaTextAnnotation] """ - value: Optional[str] = rest_field() + value: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The data that makes up the text.""" - annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = rest_field() + annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Annotations for the text.""" @overload @@ -2354,16 +2431,18 @@ class MessageDeltaTextFileCitationAnnotation(MessageDeltaTextAnnotation, discrim :vartype end_index: int """ - type: Literal["file_citation"] = rest_discriminator(name="type") # type: ignore + type: Literal["file_citation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The type of the text content annotation, which is always \"file_citation.\". Required. Default value is \"file_citation\".""" - file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = rest_field() + file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The file citation information.""" - text: Optional[str] = rest_field() + text: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The text in the message content that needs to be replaced.""" - start_index: Optional[int] = rest_field() + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The start index of this annotation in the content text.""" - end_index: Optional[int] = rest_field() + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The end index of this annotation in the content text.""" @overload @@ -2397,9 +2476,9 @@ class MessageDeltaTextFileCitationAnnotationObject(_model_base.Model): # pylint :vartype quote: str """ - file_id: Optional[str] = rest_field() + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the specific file the citation is from.""" - quote: Optional[str] = rest_field() + quote: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The specific quote in the cited file.""" @overload @@ -2440,16 +2519,18 @@ class MessageDeltaTextFilePathAnnotation(MessageDeltaTextAnnotation, discriminat :vartype text: str """ - type: Literal["file_path"] = rest_discriminator(name="type") # type: ignore + type: Literal["file_path"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The type of the text content annotation, which is always \"file_path.\". Required. Default value is \"file_path\".""" - file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = rest_field() + file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The file path information.""" - start_index: Optional[int] = rest_field() + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The start index of this annotation in the content text.""" - end_index: Optional[int] = rest_field() + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The end index of this annotation in the content text.""" - text: Optional[str] = rest_field() + text: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The text in the message content that needs to be replaced.""" @overload @@ -2482,7 +2563,7 @@ class MessageDeltaTextFilePathAnnotationObject(_model_base.Model): :vartype file_id: str """ - file_id: Optional[str] = rest_field() + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The file ID for the annotation.""" @overload @@ -2503,6 +2584,90 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) +class MessageDeltaTextUrlCitationAnnotation(MessageDeltaTextAnnotation, discriminator="url_citation"): + """A citation within the message that points to a specific URL associated with the message. + Generated when the agent uses tools such as 'bing_grounding' to search the Internet. + + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The object type, which is always 'url_citation'. Required. Default value is + "url_citation". + :vartype type: str + :ivar url_citation: The details of the URL citation. Required. + :vartype url_citation: ~azure.ai.projects.models.MessageDeltaTextUrlCitationDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["url_citation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'url_citation'. Required. Default value is \"url_citation\".""" + url_citation: "_models.MessageDeltaTextUrlCitationDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The details of the URL citation. Required.""" + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + index: int, + url_citation: "_models.MessageDeltaTextUrlCitationDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="url_citation", **kwargs) + + +class MessageDeltaTextUrlCitationDetails(_model_base.Model): + """A representation of a URL citation, as used in text thread message content. + + + :ivar url: The URL associated with this citation. Required. + :vartype url: str + :ivar title: The title of the URL. + :vartype title: str + """ + + url: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The URL associated with this citation. Required.""" + title: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The title of the URL.""" + + @overload + def __init__( + self, + *, + url: str, + title: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + class MessageImageFileContent(MessageContent, discriminator="image_file"): """A representation of image file content in a thread message. @@ -2514,9 +2679,11 @@ class MessageImageFileContent(MessageContent, discriminator="image_file"): :vartype image_file: ~azure.ai.projects.models.MessageImageFileDetails """ - type: Literal["image_file"] = rest_discriminator(name="type") # type: ignore + type: Literal["image_file"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'image_file'. Required. Default value is \"image_file\".""" - image_file: "_models.MessageImageFileDetails" = rest_field() + image_file: "_models.MessageImageFileDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The image file for this thread message content item. Required.""" @overload @@ -2545,7 +2712,7 @@ class MessageImageFileDetails(_model_base.Model): :vartype file_id: str """ - file_id: str = rest_field() + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID for the file associated with this image. Required.""" @overload @@ -2576,7 +2743,9 @@ class MessageIncompleteDetails(_model_base.Model): :vartype reason: str or ~azure.ai.projects.models.MessageIncompleteDetailsReason """ - reason: Union[str, "_models.MessageIncompleteDetailsReason"] = rest_field() + reason: Union[str, "_models.MessageIncompleteDetailsReason"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The provided reason describing why the message was marked as incomplete. Required. Known values are: \"content_filter\", \"max_tokens\", \"run_cancelled\", \"run_failed\", and \"run_expired\".""" @@ -2603,7 +2772,8 @@ class MessageTextAnnotation(_model_base.Model): """An abstract representation of an annotation to text thread message content. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - MessageTextFileCitationAnnotation, MessageTextFilePathAnnotation + MessageTextFileCitationAnnotation, MessageTextFilePathAnnotation, + MessageTextUrlCitationAnnotation :ivar type: The object type. Required. Default value is None. @@ -2613,9 +2783,9 @@ class MessageTextAnnotation(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The object type. Required. Default value is None.""" - text: str = rest_field() + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The textual content associated with this text annotation item. Required.""" @overload @@ -2647,9 +2817,9 @@ class MessageTextContent(MessageContent, discriminator="text"): :vartype text: ~azure.ai.projects.models.MessageTextDetails """ - type: Literal["text"] = rest_discriminator(name="type") # type: ignore + type: Literal["text"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'text'. Required. Default value is \"text\".""" - text: "_models.MessageTextDetails" = rest_field() + text: "_models.MessageTextDetails" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The text and associated annotations for this thread message content item. Required.""" @overload @@ -2680,9 +2850,11 @@ class MessageTextDetails(_model_base.Model): :vartype annotations: list[~azure.ai.projects.models.MessageTextAnnotation] """ - value: str = rest_field() + value: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The text data. Required.""" - annotations: List["_models.MessageTextAnnotation"] = rest_field() + annotations: List["_models.MessageTextAnnotation"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A list of annotations associated with this text. Required.""" @overload @@ -2725,14 +2897,16 @@ class MessageTextFileCitationAnnotation(MessageTextAnnotation, discriminator="fi :vartype end_index: int """ - type: Literal["file_citation"] = rest_discriminator(name="type") # type: ignore + type: Literal["file_citation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'file_citation'. Required. Default value is \"file_citation\".""" - file_citation: "_models.MessageTextFileCitationDetails" = rest_field() + file_citation: "_models.MessageTextFileCitationDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A citation within the message that points to a specific quote from a specific file. Generated when the agent uses the \"file_search\" tool to search files. Required.""" - start_index: Optional[int] = rest_field() + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The first text index associated with this text annotation.""" - end_index: Optional[int] = rest_field() + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The last text index associated with this text annotation.""" @overload @@ -2767,9 +2941,9 @@ class MessageTextFileCitationDetails(_model_base.Model): :vartype quote: str """ - file_id: str = rest_field() + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the file associated with this citation. Required.""" - quote: str = rest_field() + quote: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The specific quote cited in the associated file. Required.""" @overload @@ -2809,14 +2983,16 @@ class MessageTextFilePathAnnotation(MessageTextAnnotation, discriminator="file_p :vartype end_index: int """ - type: Literal["file_path"] = rest_discriminator(name="type") # type: ignore + type: Literal["file_path"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'file_path'. Required. Default value is \"file_path\".""" - file_path: "_models.MessageTextFilePathDetails" = rest_field() + file_path: "_models.MessageTextFilePathDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A URL for the file that's generated when the agent used the code_interpreter tool to generate a file. Required.""" - start_index: Optional[int] = rest_field() + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The first text index associated with this text annotation.""" - end_index: Optional[int] = rest_field() + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The last text index associated with this text annotation.""" @overload @@ -2848,7 +3024,7 @@ class MessageTextFilePathDetails(_model_base.Model): :vartype file_id: str """ - file_id: str = rest_field() + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the specific file that the citation is from. Required.""" @overload @@ -2869,6 +3045,90 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) +class MessageTextUrlCitationAnnotation(MessageTextAnnotation, discriminator="url_citation"): + """A citation within the message that points to a specific URL associated with the message. + Generated when the agent uses tools such as 'bing_grounding' to search the Internet. + + + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + :ivar type: The object type, which is always 'url_citation'. Required. Default value is + "url_citation". + :vartype type: str + :ivar url_citation: The details of the URL citation. Required. + :vartype url_citation: ~azure.ai.projects.models.MessageTextUrlCitationDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["url_citation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'url_citation'. Required. Default value is \"url_citation\".""" + url_citation: "_models.MessageTextUrlCitationDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The details of the URL citation. Required.""" + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + text: str, + url_citation: "_models.MessageTextUrlCitationDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="url_citation", **kwargs) + + +class MessageTextUrlCitationDetails(_model_base.Model): + """A representation of a URL citation, as used in text thread message content. + + + :ivar url: The URL associated with this citation. Required. + :vartype url: str + :ivar title: The title of the URL. + :vartype title: str + """ + + url: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The URL associated with this citation. Required.""" + title: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The title of the URL.""" + + @overload + def __init__( + self, + *, + url: str, + title: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + class MicrosoftFabricToolDefinition(ToolDefinition, discriminator="fabric_aiskill"): """The input definition information for a Microsoft Fabric tool as used to configure an agent. @@ -2880,10 +3140,12 @@ class MicrosoftFabricToolDefinition(ToolDefinition, discriminator="fabric_aiskil :vartype fabric_aiskill: ~azure.ai.projects.models.ToolConnectionList """ - type: Literal["fabric_aiskill"] = rest_discriminator(name="type") # type: ignore + type: Literal["fabric_aiskill"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'fabric_aiskill'. Required. Default value is \"fabric_aiskill\".""" - fabric_aiskill: "_models.ToolConnectionList" = rest_field() + fabric_aiskill: "_models.ToolConnectionList" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The list of connections used by the Microsoft Fabric tool. Required.""" @overload @@ -2932,25 +3194,29 @@ class OpenAIFile(_model_base.Model): :vartype status_details: str """ - object: Literal["file"] = rest_field() + object: Literal["file"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always 'file'. Required. Default value is \"file\".""" - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The identifier, which can be referenced in API endpoints. Required.""" - bytes: int = rest_field() + bytes: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The size of the file, in bytes. Required.""" - filename: str = rest_field() + filename: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the file. Required.""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this object was created. Required.""" - purpose: Union[str, "_models.FilePurpose"] = rest_field() + purpose: Union[str, "_models.FilePurpose"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The intended purpose of a file. Required. Known values are: \"fine-tune\", \"fine-tune-results\", \"assistants\", \"assistants_output\", \"batch\", \"batch_output\", and \"vision\".""" - status: Optional[Union[str, "_models.FileState"]] = rest_field() + status: Optional[Union[str, "_models.FileState"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The state of the file. This field is available in Azure OpenAI only. Known values are: \"uploaded\", \"pending\", \"running\", \"processed\", \"error\", \"deleting\", and \"deleted\".""" - status_details: Optional[str] = rest_field() + status_details: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The error message with details in case processing of this file failed. This field is available in Azure OpenAI only.""" @@ -2998,15 +3264,15 @@ class OpenAIPageableListOfAgent(_model_base.Model): :vartype has_more: bool """ - object: Literal["list"] = rest_field() + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.Agent"] = rest_field() + data: List["_models.Agent"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The requested list of items. Required.""" - first_id: str = rest_field() + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The first ID represented in this list. Required.""" - last_id: str = rest_field() + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The last ID represented in this list. Required.""" - has_more: bool = rest_field() + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether there are additional values available not captured in this list. Required.""" @@ -3051,15 +3317,15 @@ class OpenAIPageableListOfRunStep(_model_base.Model): :vartype has_more: bool """ - object: Literal["list"] = rest_field() + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.RunStep"] = rest_field() + data: List["_models.RunStep"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The requested list of items. Required.""" - first_id: str = rest_field() + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The first ID represented in this list. Required.""" - last_id: str = rest_field() + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The last ID represented in this list. Required.""" - has_more: bool = rest_field() + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether there are additional values available not captured in this list. Required.""" @@ -3104,15 +3370,15 @@ class OpenAIPageableListOfThreadMessage(_model_base.Model): :vartype has_more: bool """ - object: Literal["list"] = rest_field() + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.ThreadMessage"] = rest_field() + data: List["_models.ThreadMessage"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The requested list of items. Required.""" - first_id: str = rest_field() + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The first ID represented in this list. Required.""" - last_id: str = rest_field() + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The last ID represented in this list. Required.""" - has_more: bool = rest_field() + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether there are additional values available not captured in this list. Required.""" @@ -3157,15 +3423,15 @@ class OpenAIPageableListOfThreadRun(_model_base.Model): :vartype has_more: bool """ - object: Literal["list"] = rest_field() + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.ThreadRun"] = rest_field() + data: List["_models.ThreadRun"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The requested list of items. Required.""" - first_id: str = rest_field() + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The first ID represented in this list. Required.""" - last_id: str = rest_field() + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The last ID represented in this list. Required.""" - has_more: bool = rest_field() + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether there are additional values available not captured in this list. Required.""" @@ -3210,15 +3476,15 @@ class OpenAIPageableListOfVectorStore(_model_base.Model): :vartype has_more: bool """ - object: Literal["list"] = rest_field() + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.VectorStore"] = rest_field() + data: List["_models.VectorStore"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The requested list of items. Required.""" - first_id: str = rest_field() + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The first ID represented in this list. Required.""" - last_id: str = rest_field() + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The last ID represented in this list. Required.""" - has_more: bool = rest_field() + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether there are additional values available not captured in this list. Required.""" @@ -3263,15 +3529,15 @@ class OpenAIPageableListOfVectorStoreFile(_model_base.Model): :vartype has_more: bool """ - object: Literal["list"] = rest_field() + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.VectorStoreFile"] = rest_field() + data: List["_models.VectorStoreFile"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The requested list of items. Required.""" - first_id: str = rest_field() + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The first ID represented in this list. Required.""" - last_id: str = rest_field() + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The last ID represented in this list. Required.""" - has_more: bool = rest_field() + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether there are additional values available not captured in this list. Required.""" @@ -3310,7 +3576,7 @@ class OpenApiAuthDetails(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The type of authentication, must be anonymous/connection/managed_identity. Required. Known values are: \"anonymous\", \"connection\", and \"managed_identity\".""" @@ -3340,7 +3606,7 @@ class OpenApiAnonymousAuthDetails(OpenApiAuthDetails, discriminator="anonymous") :vartype type: str or ~azure.ai.projects.models.ANONYMOUS """ - type: Literal[OpenApiAuthType.ANONYMOUS] = rest_discriminator(name="type") # type: ignore + type: Literal[OpenApiAuthType.ANONYMOUS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'anonymous'. Required.""" @overload @@ -3369,9 +3635,11 @@ class OpenApiConnectionAuthDetails(OpenApiAuthDetails, discriminator="connection :vartype security_scheme: ~azure.ai.projects.models.OpenApiConnectionSecurityScheme """ - type: Literal[OpenApiAuthType.CONNECTION] = rest_discriminator(name="type") # type: ignore + type: Literal[OpenApiAuthType.CONNECTION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'connection'. Required.""" - security_scheme: "_models.OpenApiConnectionSecurityScheme" = rest_field() + security_scheme: "_models.OpenApiConnectionSecurityScheme" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Connection auth security details. Required.""" @overload @@ -3400,7 +3668,7 @@ class OpenApiConnectionSecurityScheme(_model_base.Model): :vartype connection_id: str """ - connection_id: str = rest_field() + connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Connection id for Connection auth type. Required.""" @overload @@ -3436,14 +3704,14 @@ class OpenApiFunctionDefinition(_model_base.Model): :vartype auth: ~azure.ai.projects.models.OpenApiAuthDetails """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the function to be called. Required.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A description of what the function does, used by the model to choose when and how to call the function.""" - spec: Any = rest_field() + spec: Any = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The openapi function shape, described as a JSON Schema object. Required.""" - auth: "_models.OpenApiAuthDetails" = rest_field() + auth: "_models.OpenApiAuthDetails" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Open API authentication details. Required.""" @overload @@ -3477,9 +3745,11 @@ class OpenApiManagedAuthDetails(OpenApiAuthDetails, discriminator="managed_ident :vartype security_scheme: ~azure.ai.projects.models.OpenApiManagedSecurityScheme """ - type: Literal[OpenApiAuthType.MANAGED_IDENTITY] = rest_discriminator(name="type") # type: ignore + type: Literal[OpenApiAuthType.MANAGED_IDENTITY] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'managed_identity'. Required.""" - security_scheme: "_models.OpenApiManagedSecurityScheme" = rest_field() + security_scheme: "_models.OpenApiManagedSecurityScheme" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Connection auth security details. Required.""" @overload @@ -3508,7 +3778,7 @@ class OpenApiManagedSecurityScheme(_model_base.Model): :vartype audience: str """ - audience: str = rest_field() + audience: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Authentication scope for managed_identity auth type. Required.""" @overload @@ -3539,9 +3809,11 @@ class OpenApiToolDefinition(ToolDefinition, discriminator="openapi"): :vartype openapi: ~azure.ai.projects.models.OpenApiFunctionDefinition """ - type: Literal["openapi"] = rest_discriminator(name="type") # type: ignore + type: Literal["openapi"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'openapi'. Required. Default value is \"openapi\".""" - openapi: "_models.OpenApiFunctionDefinition" = rest_field() + openapi: "_models.OpenApiFunctionDefinition" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The openapi function definition. Required.""" @overload @@ -3576,13 +3848,17 @@ class RecurrenceSchedule(_model_base.Model): :vartype month_days: list[int] """ - hours: List[int] = rest_field() + hours: List[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """List of hours for the schedule. Required.""" - minutes: List[int] = rest_field() + minutes: List[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """List of minutes for the schedule. Required.""" - week_days: Optional[List[Union[str, "_models.WeekDays"]]] = rest_field(name="weekDays") + week_days: Optional[List[Union[str, "_models.WeekDays"]]] = rest_field( + name="weekDays", visibility=["read", "create", "update", "delete", "query"] + ) """List of days for the schedule.""" - month_days: Optional[List[int]] = rest_field(name="monthDays") + month_days: Optional[List[int]] = rest_field( + name="monthDays", visibility=["read", "create", "update", "delete", "query"] + ) """List of month days for the schedule.""" @overload @@ -3625,12 +3901,14 @@ class RecurrenceTrigger(Trigger, discriminator="Recurrence"): type: Literal["Recurrence"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore """Required. Default value is \"Recurrence\".""" - frequency: Union[str, "_models.Frequency"] = rest_field() + frequency: Union[str, "_models.Frequency"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The frequency to trigger schedule. Required. Known values are: \"Month\", \"Week\", \"Day\", \"Hour\", and \"Minute\".""" - interval: int = rest_field() + interval: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Specifies schedule interval in conjunction with frequency. Required.""" - schedule: Optional["_models.RecurrenceSchedule"] = rest_field() + schedule: Optional["_models.RecurrenceSchedule"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The recurrence schedule.""" @overload @@ -3665,7 +3943,7 @@ class RequiredAction(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The object type. Required. Default value is None.""" @overload @@ -3701,9 +3979,9 @@ class RequiredToolCall(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The object type for the required tool call. Required. Default value is None.""" - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the tool call. This ID must be referenced when submitting tool outputs. Required.""" @overload @@ -3741,10 +4019,12 @@ class RequiredFunctionToolCall(RequiredToolCall, discriminator="function"): :vartype function: ~azure.ai.projects.models.RequiredFunctionToolCallDetails """ - type: Literal["function"] = rest_discriminator(name="type") # type: ignore + type: Literal["function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type of the required tool call. Always 'function' for function tools. Required. Default value is \"function\".""" - function: "_models.RequiredFunctionToolCallDetails" = rest_field() + function: "_models.RequiredFunctionToolCallDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Detailed information about the function to be executed by the tool that includes name and arguments. Required.""" @@ -3780,9 +4060,9 @@ class RequiredFunctionToolCallDetails(_model_base.Model): :vartype arguments: str """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the function. Required.""" - arguments: str = rest_field() + arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The arguments to use when invoking the named function, as provided by the model. Arguments are presented as a JSON document that should be validated and parsed for evaluation. Required.""" @@ -3819,12 +4099,12 @@ class ResponseFormatJsonSchema(_model_base.Model): :vartype schema: any """ - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A description of what the response format is for, used by the model to determine how to respond in the format.""" - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of a schema. Required.""" - schema: Any = rest_field() + schema: Any = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The JSON schema object, describing the response format. Required.""" @overload @@ -3859,9 +4139,11 @@ class ResponseFormatJsonSchemaType(_model_base.Model): :vartype json_schema: ~azure.ai.projects.models.ResponseFormatJsonSchema """ - type: Literal["json_schema"] = rest_field() + type: Literal["json_schema"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Type. Required. Default value is \"json_schema\".""" - json_schema: "_models.ResponseFormatJsonSchema" = rest_field() + json_schema: "_models.ResponseFormatJsonSchema" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The JSON schema, describing response format. Required.""" @overload @@ -3896,11 +4178,11 @@ class RunCompletionUsage(_model_base.Model): :vartype total_tokens: int """ - completion_tokens: int = rest_field() + completion_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Number of completion tokens used over the course of the run. Required.""" - prompt_tokens: int = rest_field() + prompt_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Number of prompt tokens used over the course of the run. Required.""" - total_tokens: int = rest_field() + total_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Total number of tokens used (prompt + completion). Required.""" @overload @@ -3933,9 +4215,9 @@ class RunError(_model_base.Model): :vartype message: str """ - code: str = rest_field() + code: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The status for the error. Required.""" - message: str = rest_field() + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The human-readable text associated with the error. Required.""" @overload @@ -3971,8 +4253,8 @@ class RunStep(_model_base.Model): :ivar type: The type of run step, which can be either message_creation or tool_calls. Required. Known values are: "message_creation" and "tool_calls". :vartype type: str or ~azure.ai.projects.models.RunStepType - :ivar assistant_id: The ID of the agent associated with the run step. Required. - :vartype assistant_id: str + :ivar agent_id: The ID of the agent associated with the run step. Required. + :vartype agent_id: str :ivar thread_id: The ID of the thread that was run. Required. :vartype thread_id: str :ivar run_id: The ID of the run that this run step is a part of. Required. @@ -4007,41 +4289,53 @@ class RunStep(_model_base.Model): :vartype metadata: dict[str, str] """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["thread.run.step"] = rest_field() + object: Literal["thread.run.step"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always 'thread.run.step'. Required. Default value is \"thread.run.step\".""" - type: Union[str, "_models.RunStepType"] = rest_field() + type: Union[str, "_models.RunStepType"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The type of run step, which can be either message_creation or tool_calls. Required. Known values are: \"message_creation\" and \"tool_calls\".""" - assistant_id: str = rest_field() + agent_id: str = rest_field(name="assistant_id", visibility=["read", "create", "update", "delete", "query"]) """The ID of the agent associated with the run step. Required.""" - thread_id: str = rest_field() + thread_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the thread that was run. Required.""" - run_id: str = rest_field() + run_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the run that this run step is a part of. Required.""" - status: Union[str, "_models.RunStepStatus"] = rest_field() + status: Union[str, "_models.RunStepStatus"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The status of this run step. Required. Known values are: \"in_progress\", \"cancelled\", \"failed\", \"completed\", and \"expired\".""" - step_details: "_models.RunStepDetails" = rest_field() + step_details: "_models.RunStepDetails" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The details for this run step. Required.""" - last_error: "_models.RunStepError" = rest_field() + last_error: "_models.RunStepError" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """If applicable, information about the last error encountered by this run step. Required.""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this object was created. Required.""" - expired_at: datetime.datetime = rest_field(format="unix-timestamp") + expired_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this item expired. Required.""" - completed_at: datetime.datetime = rest_field(format="unix-timestamp") + completed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this completed. Required.""" - cancelled_at: datetime.datetime = rest_field(format="unix-timestamp") + cancelled_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" - failed_at: datetime.datetime = rest_field(format="unix-timestamp") + failed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this failed. Required.""" - usage: Optional["_models.RunStepCompletionUsage"] = rest_field() + usage: Optional["_models.RunStepCompletionUsage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Usage statistics related to the run step. This value will be ``null`` while the run step's status is ``in_progress``.""" - metadata: Dict[str, str] = rest_field() + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Required.""" @@ -4052,7 +4346,7 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin type: Union[str, "_models.RunStepType"], - assistant_id: str, + agent_id: str, thread_id: str, run_id: str, status: Union[str, "_models.RunStepStatus"], @@ -4097,9 +4391,9 @@ class RunStepToolCall(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The object type. Required. Default value is None.""" - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the tool call. This ID must be referenced when you submit tool outputs. Required.""" @overload @@ -4137,10 +4431,10 @@ class RunStepAzureAISearchToolCall(RunStepToolCall, discriminator="azure_ai_sear :vartype azure_ai_search: dict[str, str] """ - type: Literal["azure_ai_search"] = rest_discriminator(name="type") # type: ignore + type: Literal["azure_ai_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'azure_ai_search'. Required. Default value is \"azure_ai_search\".""" - azure_ai_search: Dict[str, str] = rest_field() + azure_ai_search: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Reserved for future use. Required.""" @overload @@ -4178,10 +4472,10 @@ class RunStepBingGroundingToolCall(RunStepToolCall, discriminator="bing_groundin :vartype bing_grounding: dict[str, str] """ - type: Literal["bing_grounding"] = rest_discriminator(name="type") # type: ignore + type: Literal["bing_grounding"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'bing_grounding'. Required. Default value is \"bing_grounding\".""" - bing_grounding: Dict[str, str] = rest_field() + bing_grounding: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Reserved for future use. Required.""" @overload @@ -4215,7 +4509,7 @@ class RunStepCodeInterpreterToolCallOutput(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The object type. Required. Default value is None.""" @overload @@ -4247,9 +4541,11 @@ class RunStepCodeInterpreterImageOutput(RunStepCodeInterpreterToolCallOutput, di :vartype image: ~azure.ai.projects.models.RunStepCodeInterpreterImageReference """ - type: Literal["image"] = rest_discriminator(name="type") # type: ignore + type: Literal["image"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'image'. Required. Default value is \"image\".""" - image: "_models.RunStepCodeInterpreterImageReference" = rest_field() + image: "_models.RunStepCodeInterpreterImageReference" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Referential information for the image associated with this output. Required.""" @overload @@ -4278,7 +4574,7 @@ class RunStepCodeInterpreterImageReference(_model_base.Model): :vartype file_id: str """ - file_id: str = rest_field() + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the file associated with this image. Required.""" @overload @@ -4310,9 +4606,9 @@ class RunStepCodeInterpreterLogOutput(RunStepCodeInterpreterToolCallOutput, disc :vartype logs: str """ - type: Literal["logs"] = rest_discriminator(name="type") # type: ignore + type: Literal["logs"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'logs'. Required. Default value is \"logs\".""" - logs: str = rest_field() + logs: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The serialized log output emitted by the code interpreter. Required.""" @overload @@ -4349,10 +4645,12 @@ class RunStepCodeInterpreterToolCall(RunStepToolCall, discriminator="code_interp :vartype code_interpreter: ~azure.ai.projects.models.RunStepCodeInterpreterToolCallDetails """ - type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore + type: Literal["code_interpreter"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'code_interpreter'. Required. Default value is \"code_interpreter\".""" - code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails" = rest_field() + code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The details of the tool call to the code interpreter tool. Required.""" @overload @@ -4385,9 +4683,11 @@ class RunStepCodeInterpreterToolCallDetails(_model_base.Model): :vartype outputs: list[~azure.ai.projects.models.RunStepCodeInterpreterToolCallOutput] """ - input: str = rest_field() + input: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The input provided by the model to the code interpreter tool. Required.""" - outputs: List["_models.RunStepCodeInterpreterToolCallOutput"] = rest_field() + outputs: List["_models.RunStepCodeInterpreterToolCallOutput"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The outputs produced by the code interpreter tool back to the model in response to the tool call. Required.""" @@ -4423,11 +4723,11 @@ class RunStepCompletionUsage(_model_base.Model): :vartype total_tokens: int """ - completion_tokens: int = rest_field() + completion_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Number of completion tokens used over the course of the run step. Required.""" - prompt_tokens: int = rest_field() + prompt_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Number of prompt tokens used over the course of the run step. Required.""" - total_tokens: int = rest_field() + total_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Total number of tokens used (prompt + completion). Required.""" @overload @@ -4457,7 +4757,9 @@ class RunStepDelta(_model_base.Model): :vartype step_details: ~azure.ai.projects.models.RunStepDeltaDetail """ - step_details: Optional["_models.RunStepDeltaDetail"] = rest_field() + step_details: Optional["_models.RunStepDeltaDetail"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The details of the run step.""" @overload @@ -4493,12 +4795,12 @@ class RunStepDeltaChunk(_model_base.Model): :vartype delta: ~azure.ai.projects.models.RunStepDelta """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The identifier of the run step, which can be referenced in API endpoints. Required.""" - object: Literal["thread.run.step.delta"] = rest_field() + object: Literal["thread.run.step.delta"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always ``thread.run.step.delta``. Required. Default value is \"thread.run.step.delta\".""" - delta: "_models.RunStepDelta" = rest_field() + delta: "_models.RunStepDelta" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The delta containing the fields that have changed on the run step. Required.""" @overload @@ -4534,11 +4836,13 @@ class RunStepDeltaCodeInterpreterDetailItemObject(_model_base.Model): # pylint: :vartype outputs: list[~azure.ai.projects.models.RunStepDeltaCodeInterpreterOutput] """ - input: Optional[str] = rest_field() + input: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The input into the Code Interpreter tool call.""" - outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = rest_field() + outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The outputs from the Code Interpreter tool call. Code Interpreter can output one or more - items, including text (``logs``) or images (``image``). Each of these are represented + items, including text (\ ``logs``\ ) or images (\ ``image``\ ). Each of these are represented by a different object type.""" @@ -4578,10 +4882,10 @@ class RunStepDeltaCodeInterpreterOutput(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - index: int = rest_field() + index: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The index of the output in the streaming run step tool call's Code Interpreter outputs array. Required.""" - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The type of the streaming run step tool call's Code Interpreter output. Required. Default value is None.""" @@ -4618,9 +4922,11 @@ class RunStepDeltaCodeInterpreterImageOutput(RunStepDeltaCodeInterpreterOutput, :vartype image: ~azure.ai.projects.models.RunStepDeltaCodeInterpreterImageOutputObject """ - type: Literal["image"] = rest_discriminator(name="type") # type: ignore + type: Literal["image"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always \"image.\". Required. Default value is \"image\".""" - image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = rest_field() + image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The image data for the Code Interpreter tool call output.""" @overload @@ -4649,7 +4955,7 @@ class RunStepDeltaCodeInterpreterImageOutputObject(_model_base.Model): # pylint :vartype file_id: str """ - file_id: Optional[str] = rest_field() + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The file ID for the image.""" @overload @@ -4684,9 +4990,9 @@ class RunStepDeltaCodeInterpreterLogOutput(RunStepDeltaCodeInterpreterOutput, di :vartype logs: str """ - type: Literal["logs"] = rest_discriminator(name="type") # type: ignore + type: Literal["logs"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The type of the object, which is always \"logs.\". Required. Default value is \"logs\".""" - logs: Optional[str] = rest_field() + logs: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The text output from the Code Interpreter tool call.""" @overload @@ -4727,11 +5033,11 @@ class RunStepDeltaToolCall(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - index: int = rest_field() + index: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The index of the tool call detail in the run step's tool_calls array. Required.""" - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the tool call, used when submitting outputs to the run. Required.""" - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The type of the tool call detail item in a streaming run step's details. Required. Default value is None.""" @@ -4771,10 +5077,12 @@ class RunStepDeltaCodeInterpreterToolCall(RunStepDeltaToolCall, discriminator="c ~azure.ai.projects.models.RunStepDeltaCodeInterpreterDetailItemObject """ - type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore + type: Literal["code_interpreter"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always \"code_interpreter.\". Required. Default value is \"code_interpreter\".""" - code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = rest_field() + code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The Code Interpreter data for the tool call.""" @overload @@ -4809,7 +5117,7 @@ class RunStepDeltaDetail(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The object type for the run step detail object. Required. Default value is None.""" @overload @@ -4842,12 +5150,14 @@ class RunStepDeltaFileSearchToolCall(RunStepDeltaToolCall, discriminator="file_s "file_search". :vartype type: str :ivar file_search: Reserved for future use. - :vartype file_search: dict[str, str] + :vartype file_search: ~azure.ai.projects.models.RunStepFileSearchToolCallResults """ - type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore + type: Literal["file_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always \"file_search.\". Required. Default value is \"file_search\".""" - file_search: Optional[Dict[str, str]] = rest_field() + file_search: Optional["_models.RunStepFileSearchToolCallResults"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Reserved for future use.""" @overload @@ -4856,7 +5166,7 @@ def __init__( *, index: int, id: str, # pylint: disable=redefined-builtin - file_search: Optional[Dict[str, str]] = None, + file_search: Optional["_models.RunStepFileSearchToolCallResults"] = None, ) -> None: ... @overload @@ -4881,11 +5191,11 @@ class RunStepDeltaFunction(_model_base.Model): :vartype output: str """ - name: Optional[str] = rest_field() + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the function.""" - arguments: Optional[str] = rest_field() + arguments: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The arguments passed to the function as input.""" - output: Optional[str] = rest_field() + output: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The output of the function, null if outputs have not yet been submitted.""" @overload @@ -4923,9 +5233,11 @@ class RunStepDeltaFunctionToolCall(RunStepDeltaToolCall, discriminator="function :vartype function: ~azure.ai.projects.models.RunStepDeltaFunction """ - type: Literal["function"] = rest_discriminator(name="type") # type: ignore + type: Literal["function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always \"function.\". Required. Default value is \"function\".""" - function: Optional["_models.RunStepDeltaFunction"] = rest_field() + function: Optional["_models.RunStepDeltaFunction"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The function data for the tool call.""" @overload @@ -4959,10 +5271,12 @@ class RunStepDeltaMessageCreation(RunStepDeltaDetail, discriminator="message_cre :vartype message_creation: ~azure.ai.projects.models.RunStepDeltaMessageCreationObject """ - type: Literal["message_creation"] = rest_discriminator(name="type") # type: ignore + type: Literal["message_creation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always \"message_creation.\". Required. Default value is \"message_creation\".""" - message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = rest_field() + message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The message creation data.""" @overload @@ -4990,7 +5304,7 @@ class RunStepDeltaMessageCreationObject(_model_base.Model): :vartype message_id: str """ - message_id: Optional[str] = rest_field() + message_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the newly-created message.""" @overload @@ -5022,9 +5336,11 @@ class RunStepDeltaToolCallObject(RunStepDeltaDetail, discriminator="tool_calls") :vartype tool_calls: list[~azure.ai.projects.models.RunStepDeltaToolCall] """ - type: Literal["tool_calls"] = rest_discriminator(name="type") # type: ignore + type: Literal["tool_calls"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always \"tool_calls.\". Required. Default value is \"tool_calls\".""" - tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = rest_field() + tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The collection of tool calls for the tool call detail item.""" @overload @@ -5057,7 +5373,7 @@ class RunStepDetails(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The object type. Required. Known values are: \"message_creation\" and \"tool_calls\".""" @overload @@ -5089,10 +5405,12 @@ class RunStepError(_model_base.Model): :vartype message: str """ - code: Union[str, "_models.RunStepErrorCode"] = rest_field() + code: Union[str, "_models.RunStepErrorCode"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The error code for this error. Required. Known values are: \"server_error\" and \"rate_limit_exceeded\".""" - message: str = rest_field() + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The human-readable text associated with this error. Required.""" @overload @@ -5130,9 +5448,11 @@ class RunStepFileSearchToolCall(RunStepToolCall, discriminator="file_search"): :vartype file_search: ~azure.ai.projects.models.RunStepFileSearchToolCallResults """ - type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore + type: Literal["file_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" - file_search: "_models.RunStepFileSearchToolCallResults" = rest_field() + file_search: "_models.RunStepFileSearchToolCallResults" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """For now, this is always going to be an empty object. Required.""" @overload @@ -5170,13 +5490,15 @@ class RunStepFileSearchToolCallResult(_model_base.Model): :vartype content: list[~azure.ai.projects.models.FileSearchToolCallContent] """ - file_id: str = rest_field() + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the file that result was found in. Required.""" - file_name: str = rest_field() + file_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the file that result was found in. Required.""" - score: float = rest_field() + score: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The score of the result. All values must be a floating point number between 0 and 1. Required.""" - content: Optional[List["_models.FileSearchToolCallContent"]] = rest_field() + content: Optional[List["_models.FileSearchToolCallContent"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The content of the result that was found. The content is only included if requested via the include query parameter.""" @@ -5211,9 +5533,13 @@ class RunStepFileSearchToolCallResults(_model_base.Model): :vartype results: list[~azure.ai.projects.models.RunStepFileSearchToolCallResult] """ - ranking_options: Optional["_models.FileSearchRankingOptions"] = rest_field() + ranking_options: Optional["_models.FileSearchRankingOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Ranking options for file search.""" - results: List["_models.RunStepFileSearchToolCallResult"] = rest_field() + results: List["_models.RunStepFileSearchToolCallResult"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The array of a file search results. Required.""" @overload @@ -5250,9 +5576,11 @@ class RunStepFunctionToolCall(RunStepToolCall, discriminator="function"): :vartype function: ~azure.ai.projects.models.RunStepFunctionToolCallDetails """ - type: Literal["function"] = rest_discriminator(name="type") # type: ignore + type: Literal["function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'function'. Required. Default value is \"function\".""" - function: "_models.RunStepFunctionToolCallDetails" = rest_field() + function: "_models.RunStepFunctionToolCallDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The detailed information about the function called by the model. Required.""" @overload @@ -5288,11 +5616,11 @@ class RunStepFunctionToolCallDetails(_model_base.Model): :vartype output: str """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the function. Required.""" - arguments: str = rest_field() + arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The arguments that the model requires are provided to the named function. Required.""" - output: str = rest_field() + output: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The output of the function, only populated for function calls that have already have had their outputs submitted. Required.""" @@ -5328,10 +5656,12 @@ class RunStepMessageCreationDetails(RunStepDetails, discriminator="message_creat :vartype message_creation: ~azure.ai.projects.models.RunStepMessageCreationReference """ - type: Literal[RunStepType.MESSAGE_CREATION] = rest_discriminator(name="type") # type: ignore + type: Literal[RunStepType.MESSAGE_CREATION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'message_creation'. Required. Represents a run step to create a message.""" - message_creation: "_models.RunStepMessageCreationReference" = rest_field() + message_creation: "_models.RunStepMessageCreationReference" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Information about the message creation associated with this run step. Required.""" @overload @@ -5360,7 +5690,7 @@ class RunStepMessageCreationReference(_model_base.Model): :vartype message_id: str """ - message_id: str = rest_field() + message_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the message created by this run step. Required.""" @overload @@ -5397,10 +5727,12 @@ class RunStepMicrosoftFabricToolCall(RunStepToolCall, discriminator="fabric_aisk :vartype microsoft_fabric: dict[str, str] """ - type: Literal["fabric_aiskill"] = rest_discriminator(name="type") # type: ignore + type: Literal["fabric_aiskill"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'fabric_aiskill'. Required. Default value is \"fabric_aiskill\".""" - microsoft_fabric: Dict[str, str] = rest_field(name="fabric_aiskill") + microsoft_fabric: Dict[str, str] = rest_field( + name="fabric_aiskill", visibility=["read", "create", "update", "delete", "query"] + ) """Reserved for future use. Required.""" @overload @@ -5438,10 +5770,12 @@ class RunStepSharepointToolCall(RunStepToolCall, discriminator="sharepoint_groun :vartype share_point: dict[str, str] """ - type: Literal["sharepoint_grounding"] = rest_discriminator(name="type") # type: ignore + type: Literal["sharepoint_grounding"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'sharepoint_grounding'. Required. Default value is \"sharepoint_grounding\".""" - share_point: Dict[str, str] = rest_field(name="sharepoint_grounding") + share_point: Dict[str, str] = rest_field( + name="sharepoint_grounding", visibility=["read", "create", "update", "delete", "query"] + ) """Reserved for future use. Required.""" @overload @@ -5474,10 +5808,10 @@ class RunStepToolCallDetails(RunStepDetails, discriminator="tool_calls"): :vartype tool_calls: list[~azure.ai.projects.models.RunStepToolCall] """ - type: Literal[RunStepType.TOOL_CALLS] = rest_discriminator(name="type") # type: ignore + type: Literal[RunStepType.TOOL_CALLS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'tool_calls'. Required. Represents a run step that calls tools.""" - tool_calls: List["_models.RunStepToolCall"] = rest_field() + tool_calls: List["_models.RunStepToolCall"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A list of tool call details for this run step. Required.""" @overload @@ -5509,10 +5843,12 @@ class SharepointToolDefinition(ToolDefinition, discriminator="sharepoint_groundi :vartype sharepoint_grounding: ~azure.ai.projects.models.ToolConnectionList """ - type: Literal["sharepoint_grounding"] = rest_discriminator(name="type") # type: ignore + type: Literal["sharepoint_grounding"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'sharepoint_grounding'. Required. Default value is \"sharepoint_grounding\".""" - sharepoint_grounding: "_models.ToolConnectionList" = rest_field() + sharepoint_grounding: "_models.ToolConnectionList" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The list of connections used by the SharePoint tool. Required.""" @overload @@ -5545,10 +5881,12 @@ class SubmitToolOutputsAction(RequiredAction, discriminator="submit_tool_outputs :vartype submit_tool_outputs: ~azure.ai.projects.models.SubmitToolOutputsDetails """ - type: Literal["submit_tool_outputs"] = rest_discriminator(name="type") # type: ignore + type: Literal["submit_tool_outputs"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'submit_tool_outputs'. Required. Default value is \"submit_tool_outputs\".""" - submit_tool_outputs: "_models.SubmitToolOutputsDetails" = rest_field() + submit_tool_outputs: "_models.SubmitToolOutputsDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The details describing tools that should be called to submit tool outputs. Required.""" @overload @@ -5578,7 +5916,9 @@ class SubmitToolOutputsDetails(_model_base.Model): :vartype tool_calls: list[~azure.ai.projects.models.RequiredToolCall] """ - tool_calls: List["_models.RequiredToolCall"] = rest_field() + tool_calls: List["_models.RequiredToolCall"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The list of tool calls that must be resolved for the agent thread run to continue. Required.""" @overload @@ -5641,11 +5981,11 @@ class ThreadDeletionStatus(_model_base.Model): :vartype object: str """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether deletion was successful. Required.""" - object: Literal["thread.deleted"] = rest_field() + object: Literal["thread.deleted"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always 'thread.deleted'. Required. Default value is \"thread.deleted\".""" @@ -5702,8 +6042,8 @@ class ThreadMessage(_model_base.Model): :vartype role: str or ~azure.ai.projects.models.MessageRole :ivar content: The list of content items associated with the agent thread message. Required. :vartype content: list[~azure.ai.projects.models.MessageContent] - :ivar assistant_id: If applicable, the ID of the agent that authored this message. Required. - :vartype assistant_id: str + :ivar agent_id: If applicable, the ID of the agent that authored this message. Required. + :vartype agent_id: str :ivar run_id: If applicable, the ID of the run associated with the authoring of this message. Required. :vartype run_id: str @@ -5716,36 +6056,46 @@ class ThreadMessage(_model_base.Model): :vartype metadata: dict[str, str] """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["thread.message"] = rest_field() + object: Literal["thread.message"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always 'thread.message'. Required. Default value is \"thread.message\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this object was created. Required.""" - thread_id: str = rest_field() + thread_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the thread that this message belongs to. Required.""" - status: Union[str, "_models.MessageStatus"] = rest_field() + status: Union[str, "_models.MessageStatus"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The status of the message. Required. Known values are: \"in_progress\", \"incomplete\", and \"completed\".""" - incomplete_details: "_models.MessageIncompleteDetails" = rest_field() + incomplete_details: "_models.MessageIncompleteDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """On an incomplete message, details about why the message is incomplete. Required.""" - completed_at: datetime.datetime = rest_field(format="unix-timestamp") + completed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp (in seconds) for when the message was completed. Required.""" - incomplete_at: datetime.datetime = rest_field(format="unix-timestamp") + incomplete_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp (in seconds) for when the message was marked as incomplete. Required.""" - role: Union[str, "_models.MessageRole"] = rest_field() + role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The role associated with the agent thread message. Required. Known values are: \"user\" and \"assistant\".""" - content: List["_models.MessageContent"] = rest_field() + content: List["_models.MessageContent"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The list of content items associated with the agent thread message. Required.""" - assistant_id: str = rest_field() + agent_id: str = rest_field(name="assistant_id", visibility=["read", "create", "update", "delete", "query"]) """If applicable, the ID of the agent that authored this message. Required.""" - run_id: str = rest_field() + run_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """If applicable, the ID of the run associated with the authoring of this message. Required.""" - attachments: List["_models.MessageAttachment"] = rest_field() + attachments: List["_models.MessageAttachment"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A list of files attached to the message, and the tools they were added to. Required.""" - metadata: Dict[str, str] = rest_field() + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Required.""" @@ -5763,7 +6113,7 @@ def __init__( incomplete_at: datetime.datetime, role: Union[str, "_models.MessageRole"], content: List["_models.MessageContent"], - assistant_id: str, + agent_id: str, run_id: str, attachments: List["_models.MessageAttachment"], metadata: Dict[str, str], @@ -5809,22 +6159,24 @@ class ThreadMessageOptions(_model_base.Model): :vartype metadata: dict[str, str] """ - role: Union[str, "_models.MessageRole"] = rest_field() + role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The role of the entity that is creating the message. Allowed values include: - * ``user``: Indicates the message is sent by an actual user and should be used in most + * ``user``\ : Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - * ``assistant``: Indicates the message is generated by the agent. Use this value to insert + * ``assistant``\ : Indicates the message is generated by the agent. Use this value to insert messages from the agent into the conversation. Required. Known values are: \"user\" and \"assistant\".""" - content: str = rest_field() + content: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via a separate call to the create message API. Required.""" - attachments: Optional[List["_models.MessageAttachment"]] = rest_field() + attachments: Optional[List["_models.MessageAttachment"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A list of files attached to the message, and the tools they should be added to.""" - metadata: Optional[Dict[str, str]] = rest_field() + metadata: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length.""" @@ -5863,9 +6215,9 @@ class ThreadRun(_model_base.Model): :vartype object: str :ivar thread_id: The ID of the thread associated with this run. Required. :vartype thread_id: str - :ivar assistant_id: The ID of the agent associated with the thread this run was performed - against. Required. - :vartype assistant_id: str + :ivar agent_id: The ID of the agent associated with the thread this run was performed against. + Required. + :vartype agent_id: str :ivar status: The status of the agent thread run. Required. Known values are: "queued", "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", and "expired". @@ -5939,72 +6291,96 @@ class ThreadRun(_model_base.Model): :vartype parallel_tool_calls: bool """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["thread.run"] = rest_field() + object: Literal["thread.run"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always 'thread.run'. Required. Default value is \"thread.run\".""" - thread_id: str = rest_field() + thread_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the thread associated with this run. Required.""" - assistant_id: str = rest_field() + agent_id: str = rest_field(name="assistant_id", visibility=["read", "create", "update", "delete", "query"]) """The ID of the agent associated with the thread this run was performed against. Required.""" - status: Union[str, "_models.RunStatus"] = rest_field() + status: Union[str, "_models.RunStatus"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The status of the agent thread run. Required. Known values are: \"queued\", \"in_progress\", \"requires_action\", \"cancelling\", \"cancelled\", \"failed\", \"completed\", and \"expired\".""" - required_action: Optional["_models.RequiredAction"] = rest_field() + required_action: Optional["_models.RequiredAction"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The details of the action required for the agent thread run to continue.""" - last_error: "_models.RunError" = rest_field() + last_error: "_models.RunError" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The last error, if any, encountered by this agent thread run. Required.""" - model: str = rest_field() + model: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the model to use. Required.""" - instructions: str = rest_field() + instructions: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The overridden system instructions used for this agent thread run. Required.""" - tools: List["_models.ToolDefinition"] = rest_field() + tools: List["_models.ToolDefinition"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The overridden enabled tools used for this agent thread run. Required.""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this object was created. Required.""" - expires_at: datetime.datetime = rest_field(format="unix-timestamp") + expires_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this item expires. Required.""" - started_at: datetime.datetime = rest_field(format="unix-timestamp") + started_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this item was started. Required.""" - completed_at: datetime.datetime = rest_field(format="unix-timestamp") + completed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this completed. Required.""" - cancelled_at: datetime.datetime = rest_field(format="unix-timestamp") + cancelled_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" - failed_at: datetime.datetime = rest_field(format="unix-timestamp") + failed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp, in seconds, representing when this failed. Required.""" - incomplete_details: "_models.IncompleteRunDetails" = rest_field() + incomplete_details: "_models.IncompleteRunDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Details on why the run is incomplete. Will be ``null`` if the run is not incomplete. Required.""" - usage: "_models.RunCompletionUsage" = rest_field() + usage: "_models.RunCompletionUsage" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Usage statistics related to the run. This value will be ``null`` if the run is not in a - terminal state (i.e. ``in_progress``, ``queued``, etc.). Required.""" - temperature: Optional[float] = rest_field() + terminal state (i.e. ``in_progress``\ , ``queued``\ , etc.). Required.""" + temperature: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The sampling temperature used for this run. If not set, defaults to 1.""" - top_p: Optional[float] = rest_field() + top_p: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The nucleus sampling value used for this run. If not set, defaults to 1.""" - max_prompt_tokens: int = rest_field() + max_prompt_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The maximum number of prompt tokens specified to have been used over the course of the run. Required.""" - max_completion_tokens: int = rest_field() + max_completion_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The maximum number of completion tokens specified to have been used over the course of the run. Required.""" - truncation_strategy: "_models.TruncationObject" = rest_field() + truncation_strategy: "_models.TruncationObject" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The strategy to use for dropping messages as the context windows moves forward. Required.""" - tool_choice: "_types.AgentsApiToolChoiceOption" = rest_field() + tool_choice: "_types.AgentsApiToolChoiceOption" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Controls whether or not and which tool is called by the model. Required. Is one of the following types: str, Union[str, \"_models.AgentsApiToolChoiceOptionMode\"], AgentsNamedToolChoice""" - response_format: "_types.AgentsApiResponseFormatOption" = rest_field() + response_format: "_types.AgentsApiResponseFormatOption" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The response format of the tool calls used in this run. Required. Is one of the following types: str, Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat, ResponseFormatJsonSchemaType""" - metadata: Dict[str, str] = rest_field() + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Required.""" - tool_resources: Optional["_models.UpdateToolResourcesOptions"] = rest_field() + tool_resources: Optional["_models.UpdateToolResourcesOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Override the tools the agent can use for this run. This is useful for modifying the behavior on a per-run basis.""" - parallel_tool_calls: bool = rest_field() + parallel_tool_calls: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Determines if tools can be executed in parallel within the run. Required.""" @overload @@ -6013,7 +6389,7 @@ def __init__( # pylint: disable=too-many-locals *, id: str, # pylint: disable=redefined-builtin thread_id: str, - assistant_id: str, + agent_id: str, status: Union[str, "_models.RunStatus"], last_error: "_models.RunError", model: str, @@ -6060,7 +6436,7 @@ class ToolConnection(_model_base.Model): :vartype connection_id: str """ - connection_id: str = rest_field() + connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A connection in a ToolConnectionList attached to this tool. Required.""" @overload @@ -6091,7 +6467,9 @@ class ToolConnectionList(_model_base.Model): :vartype connection_list: list[~azure.ai.projects.models.ToolConnection] """ - connection_list: Optional[List["_models.ToolConnection"]] = rest_field(name="connections") + connection_list: Optional[List["_models.ToolConnection"]] = rest_field( + name="connections", visibility=["read", "create", "update", "delete", "query"] + ) """The connections attached to this tool. There can be a maximum of 1 connection resource attached to the tool.""" @@ -6124,10 +6502,10 @@ class ToolOutput(_model_base.Model): :vartype output: str """ - tool_call_id: Optional[str] = rest_field() + tool_call_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the tool call being resolved, as provided in the tool calls of a required action from a run.""" - output: Optional[str] = rest_field() + output: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The output from the tool to be submitted.""" @overload @@ -6167,11 +6545,17 @@ class ToolResources(_model_base.Model): :vartype azure_ai_search: ~azure.ai.projects.models.AzureAISearchResource """ - code_interpreter: Optional["_models.CodeInterpreterToolResource"] = rest_field() + code_interpreter: Optional["_models.CodeInterpreterToolResource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Resources to be used by the ``code_interpreter`` tool consisting of file IDs.""" - file_search: Optional["_models.FileSearchToolResource"] = rest_field() + file_search: Optional["_models.FileSearchToolResource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Resources to be used by the ``file_search`` tool consisting of vector store IDs.""" - azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field() + azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Resources to be used by the ``azure_ai_search`` tool consisting of index IDs and names.""" @overload @@ -6211,14 +6595,16 @@ class TruncationObject(_model_base.Model): :vartype last_messages: int """ - type: Union[str, "_models.TruncationStrategy"] = rest_field() + type: Union[str, "_models.TruncationStrategy"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The truncation strategy to use for the thread. The default is ``auto``. If set to - ``last_messages``, the thread will + ``last_messages``\ , the thread will be truncated to the ``lastMessages`` count most recent messages in the thread. When set to - ``auto``, messages in the middle of the thread + ``auto``\ , messages in the middle of the thread will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known values are: \"auto\" and \"last_messages\".""" - last_messages: Optional[int] = rest_field() + last_messages: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The number of most recent messages from the thread when constructing the context for the run.""" @overload @@ -6247,7 +6633,7 @@ class UpdateCodeInterpreterToolResourceOptions(_model_base.Model): :vartype file_ids: list[str] """ - file_ids: Optional[List[str]] = rest_field() + file_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A list of file IDs to override the current list of the agent.""" @overload @@ -6275,7 +6661,7 @@ class UpdateFileSearchToolResourceOptions(_model_base.Model): :vartype vector_store_ids: list[str] """ - vector_store_ids: Optional[List[str]] = rest_field() + vector_store_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A list of vector store IDs to override the current list of the agent.""" @overload @@ -6315,14 +6701,20 @@ class UpdateToolResourcesOptions(_model_base.Model): :vartype azure_ai_search: ~azure.ai.projects.models.AzureAISearchResource """ - code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = rest_field() + code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Overrides the list of file IDs made available to the ``code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.""" - file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = rest_field() + file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Overrides the vector store attached to this agent. There can be a maximum of 1 vector store attached to the agent.""" - azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field() + azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Overrides the resources to be used by the ``azure_ai_search`` tool consisting of index IDs and names.""" @@ -6384,29 +6776,39 @@ class VectorStore(_model_base.Model): :vartype metadata: dict[str, str] """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["vector_store"] = rest_field() + object: Literal["vector_store"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always ``vector_store``. Required. Default value is \"vector_store\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp (in seconds) for when the vector store was created. Required.""" - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the vector store. Required.""" - usage_bytes: int = rest_field() + usage_bytes: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The total number of bytes used by the files in the vector store. Required.""" - file_counts: "_models.VectorStoreFileCount" = rest_field() + file_counts: "_models.VectorStoreFileCount" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Files count grouped by status processed or being processed by this vector store. Required.""" - status: Union[str, "_models.VectorStoreStatus"] = rest_field() - """The status of the vector store, which can be either ``expired``, ``in_progress``, or + status: Union[str, "_models.VectorStoreStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The status of the vector store, which can be either ``expired``\ , ``in_progress``\ , or ``completed``. A status of ``completed`` indicates that the vector store is ready for use. Required. Known values are: \"expired\", \"in_progress\", and \"completed\".""" - expires_after: Optional["_models.VectorStoreExpirationPolicy"] = rest_field() + expires_after: Optional["_models.VectorStoreExpirationPolicy"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Details on when this vector store expires.""" - expires_at: Optional[datetime.datetime] = rest_field(format="unix-timestamp") + expires_at: Optional[datetime.datetime] = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp (in seconds) for when the vector store will expire.""" - last_active_at: datetime.datetime = rest_field(format="unix-timestamp") + last_active_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp (in seconds) for when the vector store was last active. Required.""" - metadata: Dict[str, str] = rest_field() + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Required.""" @@ -6452,7 +6854,7 @@ class VectorStoreChunkingStrategyRequest(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The object type. Required. Known values are: \"auto\" and \"static\".""" @overload @@ -6483,7 +6885,7 @@ class VectorStoreAutoChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, :vartype type: str or ~azure.ai.projects.models.AUTO """ - type: Literal[VectorStoreChunkingStrategyRequestType.AUTO] = rest_discriminator(name="type") # type: ignore + type: Literal[VectorStoreChunkingStrategyRequestType.AUTO] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'auto'. Required.""" @overload @@ -6514,7 +6916,7 @@ class VectorStoreChunkingStrategyResponse(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The object type. Required. Known values are: \"other\" and \"static\".""" @overload @@ -6544,7 +6946,7 @@ class VectorStoreAutoChunkingStrategyResponse(VectorStoreChunkingStrategyRespons :vartype type: str or ~azure.ai.projects.models.OTHER """ - type: Literal[VectorStoreChunkingStrategyResponseType.OTHER] = rest_discriminator(name="type") # type: ignore + type: Literal[VectorStoreChunkingStrategyResponseType.OTHER] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'other'. Required.""" @overload @@ -6572,7 +6974,9 @@ class VectorStoreConfiguration(_model_base.Model): :vartype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] """ - data_sources: List["_models.VectorStoreDataSource"] = rest_field() + data_sources: List["_models.VectorStoreDataSource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Data sources. Required.""" @overload @@ -6604,9 +7008,11 @@ class VectorStoreConfigurations(_model_base.Model): :vartype store_configuration: ~azure.ai.projects.models.VectorStoreConfiguration """ - store_name: str = rest_field(name="name") + store_name: str = rest_field(name="name", visibility=["read", "create", "update", "delete", "query"]) """Name. Required.""" - store_configuration: "_models.VectorStoreConfiguration" = rest_field(name="configuration") + store_configuration: "_models.VectorStoreConfiguration" = rest_field( + name="configuration", visibility=["read", "create", "update", "delete", "query"] + ) """Configurations. Required.""" @overload @@ -6640,9 +7046,11 @@ class VectorStoreDataSource(_model_base.Model): :vartype asset_type: str or ~azure.ai.projects.models.VectorStoreDataSourceAssetType """ - asset_identifier: str = rest_field(name="uri") + asset_identifier: str = rest_field(name="uri", visibility=["read", "create", "update", "delete", "query"]) """Asset URI. Required.""" - asset_type: Union[str, "_models.VectorStoreDataSourceAssetType"] = rest_field(name="type") + asset_type: Union[str, "_models.VectorStoreDataSourceAssetType"] = rest_field( + name="type", visibility=["read", "create", "update", "delete", "query"] + ) """The asset type. Required. Known values are: \"uri_asset\" and \"id_asset\".""" @overload @@ -6679,11 +7087,11 @@ class VectorStoreDeletionStatus(_model_base.Model): :vartype object: str """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether deletion was successful. Required.""" - object: Literal["vector_store.deleted"] = rest_field() + object: Literal["vector_store.deleted"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always 'vector_store.deleted'. Required. Default value is \"vector_store.deleted\".""" @@ -6718,10 +7126,12 @@ class VectorStoreExpirationPolicy(_model_base.Model): :vartype days: int """ - anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"] = rest_field() + anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Anchor timestamp after which the expiration policy applies. Supported anchors: ``last_active_at``. Required. \"last_active_at\"""" - days: int = rest_field() + days: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The anchor timestamp after which the expiration policy applies. Required.""" @overload @@ -6775,27 +7185,33 @@ class VectorStoreFile(_model_base.Model): :vartype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyResponse """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["vector_store.file"] = rest_field() + object: Literal["vector_store.file"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always ``vector_store.file``. Required. Default value is \"vector_store.file\".""" - usage_bytes: int = rest_field() + usage_bytes: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The total vector store usage in bytes. Note that this may be different from the original file size. Required.""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp (in seconds) for when the vector store file was created. Required.""" - vector_store_id: str = rest_field() + vector_store_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the vector store that the file is attached to. Required.""" - status: Union[str, "_models.VectorStoreFileStatus"] = rest_field() - """The status of the vector store file, which can be either ``in_progress``, ``completed``, - ``cancelled``, or ``failed``. The status ``completed`` indicates that the vector store file + status: Union[str, "_models.VectorStoreFileStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The status of the vector store file, which can be either ``in_progress``\ , ``completed``\ , + ``cancelled``\ , or ``failed``. The status ``completed`` indicates that the vector store file is ready for use. Required. Known values are: \"in_progress\", \"completed\", \"failed\", and \"cancelled\".""" - last_error: "_models.VectorStoreFileError" = rest_field() + last_error: "_models.VectorStoreFileError" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The last error associated with this vector store file. Will be ``null`` if there are no errors. Required.""" - chunking_strategy: "_models.VectorStoreChunkingStrategyResponse" = rest_field() + chunking_strategy: "_models.VectorStoreChunkingStrategyResponse" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The strategy used to chunk the file. Required.""" @overload @@ -6848,20 +7264,24 @@ class VectorStoreFileBatch(_model_base.Model): :vartype file_counts: ~azure.ai.projects.models.VectorStoreFileCount """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["vector_store.files_batch"] = rest_field() + object: Literal["vector_store.files_batch"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always ``vector_store.file_batch``. Required. Default value is \"vector_store.files_batch\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) """The Unix timestamp (in seconds) for when the vector store files batch was created. Required.""" - vector_store_id: str = rest_field() + vector_store_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the vector store that the file is attached to. Required.""" - status: Union[str, "_models.VectorStoreFileBatchStatus"] = rest_field() - """The status of the vector store files batch, which can be either ``in_progress``, - ``completed``, ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", + status: Union[str, "_models.VectorStoreFileBatchStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The status of the vector store files batch, which can be either ``in_progress``\ , + ``completed``\ , ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", \"completed\", \"cancelled\", and \"failed\".""" - file_counts: "_models.VectorStoreFileCount" = rest_field() + file_counts: "_models.VectorStoreFileCount" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Files count grouped by status processed or being processed by this vector store. Required.""" @overload @@ -6903,15 +7323,15 @@ class VectorStoreFileCount(_model_base.Model): :vartype total: int """ - in_progress: int = rest_field() + in_progress: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The number of files that are currently being processed. Required.""" - completed: int = rest_field() + completed: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The number of files that have been successfully processed. Required.""" - failed: int = rest_field() + failed: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The number of files that have failed to process. Required.""" - cancelled: int = rest_field() + cancelled: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The number of files that were cancelled. Required.""" - total: int = rest_field() + total: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The total number of files. Required.""" @overload @@ -6951,11 +7371,13 @@ class VectorStoreFileDeletionStatus(_model_base.Model): :vartype object: str """ - id: str = rest_field() + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether deletion was successful. Required.""" - object: Literal["vector_store.file.deleted"] = rest_field() + object: Literal["vector_store.file.deleted"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The object type, which is always 'vector_store.deleted'. Required. Default value is \"vector_store.file.deleted\".""" @@ -6990,10 +7412,12 @@ class VectorStoreFileError(_model_base.Model): :vartype message: str """ - code: Union[str, "_models.VectorStoreFileErrorCode"] = rest_field() + code: Union[str, "_models.VectorStoreFileErrorCode"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: \"server_error\", \"invalid_file\", and \"unsupported_file\".""" - message: str = rest_field() + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A human-readable description of the error. Required.""" @overload @@ -7028,10 +7452,10 @@ class VectorStoreStaticChunkingStrategyOptions(_model_base.Model): :vartype chunk_overlap_tokens: int """ - max_chunk_size_tokens: int = rest_field() + max_chunk_size_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The maximum number of tokens in each chunk. The default value is 800. The minimum value is 100 and the maximum value is 4096. Required.""" - chunk_overlap_tokens: int = rest_field() + chunk_overlap_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The number of tokens that overlap between chunks. The default value is 400. Note that the overlap must not exceed half of max_chunk_size_tokens. Required.""" @@ -7065,9 +7489,11 @@ class VectorStoreStaticChunkingStrategyRequest(VectorStoreChunkingStrategyReques :vartype static: ~azure.ai.projects.models.VectorStoreStaticChunkingStrategyOptions """ - type: Literal[VectorStoreChunkingStrategyRequestType.STATIC] = rest_discriminator(name="type") # type: ignore + type: Literal[VectorStoreChunkingStrategyRequestType.STATIC] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'static'. Required.""" - static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field() + static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The options for the static chunking strategy. Required.""" @overload @@ -7100,9 +7526,11 @@ class VectorStoreStaticChunkingStrategyResponse( :vartype static: ~azure.ai.projects.models.VectorStoreStaticChunkingStrategyOptions """ - type: Literal[VectorStoreChunkingStrategyResponseType.STATIC] = rest_discriminator(name="type") # type: ignore + type: Literal[VectorStoreChunkingStrategyResponseType.STATIC] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The object type, which is always 'static'. Required.""" - static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field() + static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The options for the static chunking strategy. Required.""" @overload @@ -7131,7 +7559,9 @@ class WorkspaceProperties(_model_base.Model): :vartype application_insights: str """ - application_insights: str = rest_field(name="applicationInsights") + application_insights: str = rest_field( + name="applicationInsights", visibility=["read", "create", "update", "delete", "query"] + ) """Authentication type of the connection target. Required.""" @overload diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index d6875e448d65..f7dd32510333 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -7,1807 +6,9 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio -import base64 -import datetime -import inspect -import itertools -import json -import logging -import math -import re -from abc import ABC, abstractmethod -from typing import ( - Any, - AsyncIterator, - Awaitable, - Callable, - Dict, - Generic, - Iterator, - List, - Mapping, - Optional, - Set, - Tuple, - Type, - TypeVar, - Union, - cast, - get_args, - get_origin, - overload, -) +from typing import List -from azure.core.credentials import AccessToken, TokenCredential -from azure.core.credentials_async import AsyncTokenCredential - -from ._enums import AgentStreamEvent, ConnectionType, MessageRole -from ._models import ( - AzureAISearchResource, - AzureAISearchToolDefinition, - AzureFunctionDefinition, - AzureFunctionStorageQueue, - AzureFunctionToolDefinition, - AzureFunctionBinding, - BingGroundingToolDefinition, - CodeInterpreterToolDefinition, - CodeInterpreterToolResource, - FileSearchToolDefinition, - FileSearchToolResource, - FunctionDefinition, - FunctionToolDefinition, - GetConnectionResponse, - IndexResource, - MessageImageFileContent, - MessageTextContent, - MessageTextFileCitationAnnotation, - MessageTextFilePathAnnotation, - MicrosoftFabricToolDefinition, - OpenApiAuthDetails, - OpenApiToolDefinition, - OpenApiFunctionDefinition, - RequiredFunctionToolCall, - RunStep, - RunStepDeltaChunk, - SharepointToolDefinition, - SubmitToolOutputsAction, - ThreadRun, - ToolConnection, - ToolConnectionList, - ToolDefinition, - ToolResources, - MessageDeltaTextContent, - VectorStoreDataSource, -) - -from ._models import MessageDeltaChunk as MessageDeltaChunkGenerated -from ._models import ThreadMessage as ThreadMessageGenerated -from ._models import OpenAIPageableListOfThreadMessage as OpenAIPageableListOfThreadMessageGenerated -from ._models import MessageAttachment as MessageAttachmentGenerated - -from .. import _types - - -logger = logging.getLogger(__name__) - -StreamEventData = Union["MessageDeltaChunk", "ThreadMessage", ThreadRun, RunStep, str] - - -def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[str, Any]: - """ - Remove the parameters, non present in class public fields; return shallow copy of a dictionary. - - **Note:** Classes inherited from the model check that the parameters are present - in the list of attributes and if they are not, the error is being raised. This check may not - be relevant for classes, not inherited from azure.ai.projects._model_base.Model. - :param Type model_class: The class of model to be used. - :param parameters: The parsed dictionary with parameters. - :type parameters: Union[str, Dict[str, Any]] - :return: The dictionary with all invalid parameters removed. - :rtype: Dict[str, Any] - """ - new_params = {} - valid_parameters = set( - filter( - lambda x: not x.startswith("_") and hasattr(model_class.__dict__[x], "_type"), model_class.__dict__.keys() - ) - ) - for k in filter(lambda x: x in valid_parameters, parameters.keys()): - new_params[k] = parameters[k] - return new_params - - -def _safe_instantiate( - model_class: Type, parameters: Union[str, Dict[str, Any]], *, generated_class: Optional[Type] = None -) -> StreamEventData: - """ - Instantiate class with the set of parameters from the server. - - :param Type model_class: The class of model to be used. - :param parameters: The parsed dictionary with parameters. - :type parameters: Union[str, Dict[str, Any]] - :keyword Optional[Type] generated_class: The optional generated type. - :return: The class of model_class type if parameters is a dictionary, or the parameters themselves otherwise. - :rtype: Any - """ - if not generated_class: - generated_class = model_class - if not isinstance(parameters, dict): - return parameters - return cast(StreamEventData, model_class(**_filter_parameters(generated_class, parameters))) - - -def _parse_event(event_data_str: str) -> Tuple[str, StreamEventData]: - event_lines = event_data_str.strip().split("\n") - event_type: Optional[str] = None - event_data = "" - event_obj: StreamEventData - for line in event_lines: - if line.startswith("event:"): - event_type = line.split(":", 1)[1].strip() - elif line.startswith("data:"): - event_data = line.split(":", 1)[1].strip() - - if not event_type: - raise ValueError("Event type not specified in the event data.") - - try: - parsed_data: Union[str, Dict[str, StreamEventData]] = cast(Dict[str, StreamEventData], json.loads(event_data)) - except json.JSONDecodeError: - parsed_data = event_data - - # Workaround for service bug: Rename 'expires_at' to 'expired_at' - if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: - parsed_data["expired_at"] = parsed_data.pop("expires_at") - - # Map to the appropriate class instance - if event_type in { - AgentStreamEvent.THREAD_RUN_CREATED.value, - AgentStreamEvent.THREAD_RUN_QUEUED.value, - AgentStreamEvent.THREAD_RUN_INCOMPLETE.value, - AgentStreamEvent.THREAD_RUN_IN_PROGRESS.value, - AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION.value, - AgentStreamEvent.THREAD_RUN_COMPLETED.value, - AgentStreamEvent.THREAD_RUN_FAILED.value, - AgentStreamEvent.THREAD_RUN_CANCELLING.value, - AgentStreamEvent.THREAD_RUN_CANCELLED.value, - AgentStreamEvent.THREAD_RUN_EXPIRED.value, - }: - event_obj = _safe_instantiate(ThreadRun, parsed_data) - elif event_type in { - AgentStreamEvent.THREAD_RUN_STEP_CREATED.value, - AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS.value, - AgentStreamEvent.THREAD_RUN_STEP_COMPLETED.value, - AgentStreamEvent.THREAD_RUN_STEP_FAILED.value, - AgentStreamEvent.THREAD_RUN_STEP_CANCELLED.value, - AgentStreamEvent.THREAD_RUN_STEP_EXPIRED.value, - }: - event_obj = _safe_instantiate(RunStep, parsed_data) - elif event_type in { - AgentStreamEvent.THREAD_MESSAGE_CREATED.value, - AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS.value, - AgentStreamEvent.THREAD_MESSAGE_COMPLETED.value, - AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE.value, - }: - event_obj = _safe_instantiate(ThreadMessage, parsed_data, generated_class=ThreadMessageGenerated) - elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA.value: - event_obj = _safe_instantiate(MessageDeltaChunk, parsed_data, generated_class=MessageDeltaChunkGenerated) - - elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA.value: - event_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) - else: - event_obj = str(parsed_data) - - return event_type, event_obj - - -class ConnectionProperties: - """The properties of a single connection. - - :ivar id: A unique identifier for the connection. - :vartype id: str - :ivar name: The friendly name of the connection. - :vartype name: str - :ivar authentication_type: The authentication type used by the connection. - :vartype authentication_type: ~azure.ai.projects.models._models.AuthenticationType - :ivar connection_type: The connection type . - :vartype connection_type: ~azure.ai.projects.models._models.ConnectionType - :ivar endpoint_url: The endpoint URL associated with this connection - :vartype endpoint_url: str - :ivar key: The api-key to be used when accessing the connection. - :vartype key: str - :ivar token_credential: The TokenCredential to be used when accessing the connection. - :vartype token_credential: ~azure.core.credentials.TokenCredential - """ - - def __init__( - self, - *, - connection: GetConnectionResponse, - token_credential: Union[TokenCredential, AsyncTokenCredential, None] = None, - ) -> None: - self.id = connection.id - self.name = connection.name - self.authentication_type = connection.properties.auth_type - self.connection_type = cast(ConnectionType, connection.properties.category) - self.endpoint_url = ( - connection.properties.target[:-1] - if connection.properties.target.endswith("/") - else connection.properties.target - ) - self.key: Optional[str] = None - if hasattr(connection.properties, "credentials"): - if hasattr(connection.properties.credentials, "key"): # type: ignore - self.key = connection.properties.credentials.key # type: ignore - self.token_credential = token_credential - - def to_evaluator_model_config( - self, deployment_name: str, api_version: str, *, include_credentials: bool = False - ) -> Dict[str, str]: - """Get model configuration to be used with evaluators, from connection. - - :param deployment_name: Deployment name to build model configuration. - :type deployment_name: str - :param api_version: API version used by model deployment. - :type api_version: str - :keyword include_credentials: Include credentials in the model configuration. If set to True, the model - configuration will have the key field set to the actual key value. - If set to False, the model configuration will have the key field set to the connection id. - To get the secret, connection.get method should be called with include_credentials set to True. - :paramtype include_credentials: bool - - :returns: Model configuration dictionary. - :rtype: Dict[str, str] - """ - connection_type = self.connection_type.value - if self.connection_type.value == ConnectionType.AZURE_OPEN_AI: - connection_type = "azure_openai" - - if self.authentication_type == "ApiKey": - model_config = { - "azure_deployment": deployment_name, - "azure_endpoint": self.endpoint_url, - "type": connection_type, - "api_version": api_version, - "api_key": self.key if include_credentials and self.key else f"{self.id}/credentials/key", - } - else: - model_config = { - "azure_deployment": deployment_name, - "azure_endpoint": self.endpoint_url, - "type": self.connection_type, - "api_version": api_version, - } - return model_config - - def __str__(self): - out = "{\n" - out += f' "name": "{self.name}",\n' - out += f' "id": "{self.id}",\n' - out += f' "authentication_type": "{self.authentication_type}",\n' - out += f' "connection_type": "{self.connection_type}",\n' - out += f' "endpoint_url": "{self.endpoint_url}",\n' - if self.key: - out += ' "key": "REDACTED"\n' - else: - out += ' "key": null\n' - if self.token_credential: - out += ' "token_credential": "REDACTED"\n' - else: - out += ' "token_credential": null\n' - out += "}\n" - return out - - -# TODO: Look into adding an async version of this class -class SASTokenCredential(TokenCredential): - def __init__( - self, - *, - sas_token: str, - credential: TokenCredential, - subscription_id: str, - resource_group_name: str, - project_name: str, - connection_name: str, - ): - self._sas_token = sas_token - self._credential = credential - self._subscription_id = subscription_id - self._resource_group_name = resource_group_name - self._project_name = project_name - self._connection_name = connection_name - self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) - logger.debug("[SASTokenCredential.__init__] Exit. Given token expires on %s.", self._expires_on) - - @classmethod - def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime.datetime: - payload = jwt_token.split(".")[1] - padded_payload = payload + "=" * (4 - len(payload) % 4) # Add padding if necessary - decoded_bytes = base64.urlsafe_b64decode(padded_payload) - decoded_str = decoded_bytes.decode("utf-8") - decoded_payload = json.loads(decoded_str) - expiration_date = decoded_payload.get("exp") - return datetime.datetime.fromtimestamp(expiration_date, datetime.timezone.utc) - - def _refresh_token(self) -> None: - logger.debug("[SASTokenCredential._refresh_token] Enter") - from azure.ai.projects import AIProjectClient - - project_client = AIProjectClient( - credential=self._credential, - # Since we are only going to use the "connections" operations, we don't need to supply an endpoint. - # http://management.azure.com is hard coded in the SDK. - endpoint="not-needed", - subscription_id=self._subscription_id, - resource_group_name=self._resource_group_name, - project_name=self._project_name, - ) - - connection = project_client.connections.get(connection_name=self._connection_name, include_credentials=True) - - self._sas_token = "" - if connection is not None and connection.token_credential is not None: - sas_credential = cast(SASTokenCredential, connection.token_credential) - self._sas_token = sas_credential._sas_token # pylint: disable=protected-access - self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) - logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) - - def get_token( - self, - *scopes: str, - claims: Optional[str] = None, - tenant_id: Optional[str] = None, - enable_cae: bool = False, - **kwargs: Any, - ) -> AccessToken: - """Request an access token for `scopes`. - - :param str scopes: The type of access needed. - - :keyword str claims: Additional claims required in the token, such as those returned in a resource - provider's claims challenge following an authorization failure. - :keyword str tenant_id: Optional tenant to include in the token request. - :keyword bool enable_cae: Indicates whether to enable Continuous Access Evaluation (CAE) for the requested - token. Defaults to False. - - :rtype: AccessToken - :return: An AccessToken instance containing the token string and its expiration time in Unix time. - """ - logger.debug("SASTokenCredential.get_token] Enter") - if self._expires_on < datetime.datetime.now(datetime.timezone.utc): - self._refresh_token() - return AccessToken(self._sas_token, math.floor(self._expires_on.timestamp())) - - -# Define type_map to translate Python type annotations to JSON Schema types -type_map = { - "str": "string", - "int": "integer", - "float": "number", - "bool": "boolean", - "NoneType": "null", - "list": "array", - "dict": "object", -} - - -def _map_type(annotation) -> Dict[str, Any]: # pylint: disable=too-many-return-statements - if annotation == inspect.Parameter.empty: - return {"type": "string"} # Default type if annotation is missing - - origin = get_origin(annotation) - - if origin in {list, List}: - args = get_args(annotation) - item_type = args[0] if args else str - return {"type": "array", "items": _map_type(item_type)} - if origin in {dict, Dict}: - return {"type": "object"} - if origin is Union: - args = get_args(annotation) - # If Union contains None, it is an optional parameter - if type(None) in args: - # If Union contains only one non-None type, it is a nullable parameter - non_none_args = [arg for arg in args if arg is not type(None)] - if len(non_none_args) == 1: - schema = _map_type(non_none_args[0]) - if "type" in schema: - if isinstance(schema["type"], str): - schema["type"] = [schema["type"], "null"] - elif "null" not in schema["type"]: - schema["type"].append("null") - else: - schema["type"] = ["null"] - return schema - # If Union contains multiple types, it is a oneOf parameter - return {"oneOf": [_map_type(arg) for arg in args]} - if isinstance(annotation, type): - schema_type = type_map.get(annotation.__name__, "string") - return {"type": schema_type} - - return {"type": "string"} # Fallback to "string" if type is unrecognized - - -def is_optional(annotation) -> bool: - origin = get_origin(annotation) - if origin is Union: - args = get_args(annotation) - return type(None) in args - return False - - -class MessageDeltaChunk(MessageDeltaChunkGenerated): - @property - def text(self) -> str: - """Get the text content of the delta chunk. - - :rtype: str - """ - if not self.delta or not self.delta.content: - return "" - return "".join( - content_part.text.value or "" - for content_part in self.delta.content - if isinstance(content_part, MessageDeltaTextContent) and content_part.text - ) - - -class ThreadMessage(ThreadMessageGenerated): - @property - def text_messages(self) -> List[MessageTextContent]: - """Returns all text message contents in the messages. - - :rtype: List[MessageTextContent] - """ - if not self.content: - return [] - return [content for content in self.content if isinstance(content, MessageTextContent)] - - @property - def image_contents(self) -> List[MessageImageFileContent]: - """Returns all image file contents from image message contents in the messages. - - :rtype: List[MessageImageFileContent] - """ - if not self.content: - return [] - return [content for content in self.content if isinstance(content, MessageImageFileContent)] - - @property - def file_citation_annotations(self) -> List[MessageTextFileCitationAnnotation]: - """Returns all file citation annotations from text message annotations in the messages. - - :rtype: List[MessageTextFileCitationAnnotation] - """ - if not self.content: - return [] - - return [ - annotation - for content in self.content - if isinstance(content, MessageTextContent) - for annotation in content.text.annotations - if isinstance(annotation, MessageTextFileCitationAnnotation) - ] - - @property - def file_path_annotations(self) -> List[MessageTextFilePathAnnotation]: - """Returns all file path annotations from text message annotations in the messages. - - :rtype: List[MessageTextFilePathAnnotation] - """ - if not self.content: - return [] - return [ - annotation - for content in self.content - if isinstance(content, MessageTextContent) - for annotation in content.text.annotations - if isinstance(annotation, MessageTextFilePathAnnotation) - ] - - -class MessageAttachment(MessageAttachmentGenerated): - @overload - def __init__( - self, - *, - tools: List["FileSearchToolDefinition"], - file_id: Optional[str] = None, - data_source: Optional["VectorStoreDataSource"] = None, - ) -> None: ... - @overload - def __init__( - self, - *, - tools: List["CodeInterpreterToolDefinition"], - file_id: Optional[str] = None, - data_source: Optional["VectorStoreDataSource"] = None, - ) -> None: ... - @overload - def __init__( - self, - *, - tools: List["_types.MessageAttachmentToolDefinition"], - file_id: Optional[str] = None, - data_source: Optional["VectorStoreDataSource"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -ToolDefinitionT = TypeVar("ToolDefinitionT", bound=ToolDefinition) -ToolT = TypeVar("ToolT", bound="Tool") - - -class Tool(ABC, Generic[ToolDefinitionT]): - """ - An abstract class representing a tool that can be used by an agent. - """ - - @property - @abstractmethod - def definitions(self) -> List[ToolDefinitionT]: - """Get the tool definitions.""" - - @property - @abstractmethod - def resources(self) -> ToolResources: - """Get the tool resources.""" - - @abstractmethod - def execute(self, tool_call: Any) -> Any: - """ - Execute the tool with the provided tool call. - - :param Any tool_call: The tool call to execute. - :return: The output of the tool operations. - """ - - -class BaseFunctionTool(Tool[FunctionToolDefinition]): - """ - A tool that executes user-defined functions. - """ - - def __init__(self, functions: Set[Callable[..., Any]]): - """ - Initialize FunctionTool with a set of functions. - - :param functions: A set of function objects. - """ - self._functions = self._create_function_dict(functions) - self._definitions = self._build_function_definitions(self._functions) - - def add_functions(self, extra_functions: Set[Callable[..., Any]]) -> None: - """ - Add more functions into this FunctionTool’s existing function set. - If a function with the same name already exists, it is overwritten. - - :param extra_functions: A set of additional functions to be added to - the existing function set. Functions are defined as callables and - may have any number of arguments and return types. - :type extra_functions: Set[Callable[..., Any]] - """ - # Convert the existing dictionary of { name: function } back into a set - existing_functions = set(self._functions.values()) - # Merge old + new - combined = existing_functions.union(extra_functions) - # Rebuild state - self._functions = self._create_function_dict(combined) - self._definitions = self._build_function_definitions(self._functions) - - def _create_function_dict(self, functions: Set[Callable[..., Any]]) -> Dict[str, Callable[..., Any]]: - return {func.__name__: func for func in functions} - - def _build_function_definitions(self, functions: Dict[str, Any]) -> List[FunctionToolDefinition]: - specs: List[FunctionToolDefinition] = [] - # Flexible regex to capture ':param : ' - param_pattern = re.compile( - r""" - ^\s* # Optional leading whitespace - :param # Literal ':param' - \s+ # At least one whitespace character - (?P[^:\s\(\)]+) # Parameter name (no spaces, colons, or parentheses) - (?:\s*\(\s*(?P[^)]+?)\s*\))? # Optional type in parentheses, allowing internal spaces - \s*:\s* # Colon ':' surrounded by optional whitespace - (?P.+) # Description (rest of the line) - """, - re.VERBOSE, - ) - - for name, func in functions.items(): - sig = inspect.signature(func) - params = sig.parameters - docstring = inspect.getdoc(func) or "" - description = docstring.split("\n", maxsplit=1)[0] if docstring else "No description" - - param_descriptions = {} - for line in docstring.splitlines(): - line = line.strip() - match = param_pattern.match(line) - if match: - groups = match.groupdict() - param_name = groups.get("name") - param_desc = groups.get("description") - param_desc = param_desc.strip() if param_desc else "No description" - param_descriptions[param_name] = param_desc.strip() - - properties = {} - required = [] - for param_name, param in params.items(): - param_type_info = _map_type(param.annotation) - param_description = param_descriptions.get(param_name, "No description") - - properties[param_name] = {**param_type_info, "description": param_description} - - # If the parameter has no default value and is not optional, add it to the required list - if param.default is inspect.Parameter.empty and not is_optional(param.annotation): - required.append(param_name) - - function_def = FunctionDefinition( - name=name, - description=description, - parameters={"type": "object", "properties": properties, "required": required}, - ) - tool_def = FunctionToolDefinition(function=function_def) - specs.append(tool_def) - - return specs - - def _get_func_and_args(self, tool_call: RequiredFunctionToolCall) -> Tuple[Any, Dict[str, Any]]: - function_name = tool_call.function.name - arguments = tool_call.function.arguments - - if function_name not in self._functions: - logging.error("Function '%s' not found.", function_name) - raise ValueError(f"Function '{function_name}' not found.") - - function = self._functions[function_name] - - try: - parsed_arguments = json.loads(arguments) - except json.JSONDecodeError as e: - logging.error("Invalid JSON arguments for function '%s': %s", function_name, e) - raise ValueError(f"Invalid JSON arguments: {e}") from e - - if not isinstance(parsed_arguments, dict): - logging.error("Arguments must be a JSON object for function '%s'.", function_name) - raise TypeError("Arguments must be a JSON object.") - - return function, parsed_arguments - - @property - def definitions(self) -> List[FunctionToolDefinition]: - """ - Get the function definitions. - - :return: A list of function definitions. - :rtype: List[ToolDefinition] - """ - return self._definitions - - @property - def resources(self) -> ToolResources: - """ - Get the tool resources for the agent. - - :return: An empty ToolResources as FunctionTool doesn't have specific resources. - :rtype: ToolResources - """ - return ToolResources() - - -class FunctionTool(BaseFunctionTool): - - def execute(self, tool_call: RequiredFunctionToolCall) -> Any: - function, parsed_arguments = self._get_func_and_args(tool_call) - - try: - return function(**parsed_arguments) if parsed_arguments else function() - except TypeError as e: - error_message = f"Error executing function '{tool_call.function.name}': {e}" - logging.error(error_message) - # Return error message as JSON string back to agent in order to make possible self - # correction to the function call - return json.dumps({"error": error_message}) - - -class AsyncFunctionTool(BaseFunctionTool): - - async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: # pylint: disable=invalid-overridden-method - function, parsed_arguments = self._get_func_and_args(tool_call) - - try: - if inspect.iscoroutinefunction(function): - return await function(**parsed_arguments) if parsed_arguments else await function() - return function(**parsed_arguments) if parsed_arguments else function() - except TypeError as e: - error_message = f"Error executing function '{tool_call.function.name}': {e}" - logging.error(error_message) - # Return error message as JSON string back to agent in order to make possible self correction - # to the function call - return json.dumps({"error": error_message}) - - -class AzureAISearchTool(Tool[AzureAISearchToolDefinition]): - """ - A tool that searches for information using Azure AI Search. - """ - - def __init__(self, index_connection_id: str, index_name: str): - self.index_list = [IndexResource(index_connection_id=index_connection_id, index_name=index_name)] - - @property - def definitions(self) -> List[AzureAISearchToolDefinition]: - """ - Get the Azure AI search tool definitions. - - :return: A list of tool definitions. - :rtype: List[ToolDefinition] - """ - return [AzureAISearchToolDefinition()] - - @property - def resources(self) -> ToolResources: - """ - Get the Azure AI search resources. - - :return: ToolResources populated with azure_ai_search associated resources. - :rtype: ToolResources - """ - return ToolResources(azure_ai_search=AzureAISearchResource(index_list=self.index_list)) - - def execute(self, tool_call: Any): - """ - AI Search tool does not execute client-side. - - :param Any tool_call: The tool call to execute. - """ - - -class OpenApiTool(Tool[OpenApiToolDefinition]): - """ - A tool that retrieves information using OpenAPI specs. - Initialized with an initial API definition (name, description, spec, auth), - this class also supports adding and removing additional API definitions dynamically. - """ - - def __init__(self, name: str, description: str, spec: Any, auth: OpenApiAuthDetails): - """ - Constructor initializes the tool with a primary API definition. - - :param name: The name of the API. - :param description: The API description. - :param spec: The API specification. - :param auth: Authentication details for the API. - :type auth: OpenApiAuthDetails - """ - self._default_auth = auth - self._definitions: List[OpenApiToolDefinition] = [ - OpenApiToolDefinition( - openapi=OpenApiFunctionDefinition(name=name, description=description, spec=spec, auth=auth) - ) - ] - - @property - def definitions(self) -> List[OpenApiToolDefinition]: - """ - Get the list of all API definitions for the tool. - - :return: A list of OpenAPI tool definitions. - :rtype: List[ToolDefinition] - """ - return self._definitions - - def add_definition(self, name: str, description: str, spec: Any, auth: Optional[OpenApiAuthDetails] = None) -> None: - """ - Adds a new API definition dynamically. - Raises a ValueError if a definition with the same name already exists. - - :param name: The name of the API. - :type name: str - :param description: The description of the API. - :type description: str - :param spec: The API specification. - :type spec: Any - :param auth: Optional authentication details for this particular API definition. - If not provided, the tool's default authentication details will be used. - :type auth: Optional[OpenApiAuthDetails] - :raises ValueError: If a definition with the same name exists. - """ - # Check if a definition with the same name exists. - if any(definition.openapi.name == name for definition in self._definitions): - raise ValueError(f"Definition '{name}' already exists and cannot be added again.") - - # Use provided auth if specified, otherwise use default - auth_to_use = auth if auth is not None else self._default_auth - - new_definition = OpenApiToolDefinition( - openapi=OpenApiFunctionDefinition(name=name, description=description, spec=spec, auth=auth_to_use) - ) - self._definitions.append(new_definition) - - def remove_definition(self, name: str) -> None: - """ - Removes an API definition based on its name. - - :param name: The name of the API definition to remove. - :type name: str - :raises ValueError: If the definition with the specified name does not exist. - """ - for definition in self._definitions: - if definition.openapi.name == name: - self._definitions.remove(definition) - logging.info("Definition '%s' removed. Total definitions: %d.", name, len(self._definitions)) - return - raise ValueError(f"Definition with the name '{name}' does not exist.") - - @property - def resources(self) -> ToolResources: - """ - Get the tool resources for the agent. - - :return: An empty ToolResources as OpenApiTool doesn't have specific resources. - :rtype: ToolResources - """ - return ToolResources() - - def execute(self, tool_call: Any) -> None: - """ - OpenApiTool does not execute client-side. - - :param Any tool_call: The tool call to execute. - :type tool_call: Any - """ - - -class AzureFunctionTool(Tool[AzureFunctionToolDefinition]): - """ - A tool that is used to inform agent about available the Azure function. - - :param name: The azure function name. - :param description: The azure function description. - :param parameters: The description of function parameters. - :param input_queue: Input queue used, by azure function. - :param output_queue: Output queue used, by azure function. - """ - - def __init__( - self, - name: str, - description: str, - parameters: Dict[str, Any], - input_queue: AzureFunctionStorageQueue, - output_queue: AzureFunctionStorageQueue, - ) -> None: - self._definitions = [ - AzureFunctionToolDefinition( - azure_function=AzureFunctionDefinition( - function=FunctionDefinition( - name=name, - description=description, - parameters=parameters, - ), - input_binding=AzureFunctionBinding(storage_queue=input_queue), - output_binding=AzureFunctionBinding(storage_queue=output_queue), - ) - ) - ] - - @property - def definitions(self) -> List[AzureFunctionToolDefinition]: - """ - Get the Azure AI search tool definitions. - - :rtype: List[ToolDefinition] - """ - return self._definitions - - @property - def resources(self) -> ToolResources: - """ - Get the Azure AI search resources. - - :rtype: ToolResources - """ - return ToolResources() - - def execute(self, tool_call: Any) -> Any: - pass - - -class ConnectionTool(Tool[ToolDefinitionT]): - """ - A tool that requires connection ids. - Used as base class for Bing Grounding, Sharepoint, and Microsoft Fabric - """ - - def __init__(self, connection_id: str): - """ - Initialize ConnectionTool with a connection_id. - - :param connection_id: Connection ID used by tool. All connection tools allow only one connection. - """ - self.connection_ids = [ToolConnection(connection_id=connection_id)] - - @property - def resources(self) -> ToolResources: - """ - Get the connection tool resources. - - :rtype: ToolResources - """ - return ToolResources() - - def execute(self, tool_call: Any) -> Any: - pass - - -class BingGroundingTool(ConnectionTool[BingGroundingToolDefinition]): - """ - A tool that searches for information using Bing. - """ - - @property - def definitions(self) -> List[BingGroundingToolDefinition]: - """ - Get the Bing grounding tool definitions. - - :rtype: List[ToolDefinition] - """ - return [BingGroundingToolDefinition(bing_grounding=ToolConnectionList(connection_list=self.connection_ids))] - - -class FabricTool(ConnectionTool[MicrosoftFabricToolDefinition]): - """ - A tool that searches for information using Microsoft Fabric. - """ - - @property - def definitions(self) -> List[MicrosoftFabricToolDefinition]: - """ - Get the Microsoft Fabric tool definitions. - - :rtype: List[ToolDefinition] - """ - return [MicrosoftFabricToolDefinition(fabric_aiskill=ToolConnectionList(connection_list=self.connection_ids))] - - -class SharepointTool(ConnectionTool[SharepointToolDefinition]): - """ - A tool that searches for information using Sharepoint. - """ - - @property - def definitions(self) -> List[SharepointToolDefinition]: - """ - Get the Sharepoint tool definitions. - - :rtype: List[ToolDefinition] - """ - return [SharepointToolDefinition(sharepoint_grounding=ToolConnectionList(connection_list=self.connection_ids))] - - -class FileSearchTool(Tool[FileSearchToolDefinition]): - """ - A tool that searches for uploaded file information from the created vector stores. - - :param vector_store_ids: A list of vector store IDs to search for files. - :type vector_store_ids: list[str] - """ - - def __init__(self, vector_store_ids: Optional[List[str]] = None): - if vector_store_ids is None: - self.vector_store_ids = set() - else: - self.vector_store_ids = set(vector_store_ids) - - def add_vector_store(self, store_id: str) -> None: - """ - Add a vector store ID to the list of vector stores to search for files. - - :param store_id: The ID of the vector store to search for files. - :type store_id: str - - """ - self.vector_store_ids.add(store_id) - - def remove_vector_store(self, store_id: str) -> None: - """ - Remove a vector store ID from the list of vector stores to search for files. - - :param store_id: The ID of the vector store to remove. - :type store_id: str - - """ - self.vector_store_ids.remove(store_id) - - @property - def definitions(self) -> List[FileSearchToolDefinition]: - """ - Get the file search tool definitions. - - :rtype: List[ToolDefinition] - """ - return [FileSearchToolDefinition()] - - @property - def resources(self) -> ToolResources: - """ - Get the file search resources. - - :rtype: ToolResources - """ - return ToolResources(file_search=FileSearchToolResource(vector_store_ids=list(self.vector_store_ids))) - - def execute(self, tool_call: Any) -> Any: - pass - - -class CodeInterpreterTool(Tool[CodeInterpreterToolDefinition]): - """ - A tool that interprets code files uploaded to the agent. - - :param file_ids: A list of file IDs to interpret. - :type file_ids: list[str] - """ - - def __init__(self, file_ids: Optional[List[str]] = None): - if file_ids is None: - self.file_ids = set() - else: - self.file_ids = set(file_ids) - - def add_file(self, file_id: str) -> None: - """ - Add a file ID to the list of files to interpret. - - :param file_id: The ID of the file to interpret. - :type file_id: str - """ - self.file_ids.add(file_id) - - def remove_file(self, file_id: str) -> None: - """ - Remove a file ID from the list of files to interpret. - - :param file_id: The ID of the file to remove. - :type file_id: str - """ - self.file_ids.remove(file_id) - - @property - def definitions(self) -> List[CodeInterpreterToolDefinition]: - """ - Get the code interpreter tool definitions. - - :rtype: List[ToolDefinition] - """ - return [CodeInterpreterToolDefinition()] - - @property - def resources(self) -> ToolResources: - """ - Get the code interpreter resources. - - :rtype: ToolResources - """ - if not self.file_ids: - return ToolResources() - return ToolResources(code_interpreter=CodeInterpreterToolResource(file_ids=list(self.file_ids))) - - def execute(self, tool_call: Any) -> Any: - pass - - -class BaseToolSet: - """ - Abstract class for a collection of tools that can be used by an agent. - """ - - def __init__(self) -> None: - self._tools: List[Tool] = [] - - def validate_tool_type(self, tool: Tool) -> None: - pass - - def add(self, tool: Tool): - """ - Add a tool to the tool set. - - :param Tool tool: The tool to add. - :raises ValueError: If a tool of the same type already exists. - """ - self.validate_tool_type(tool) - - if any(isinstance(existing_tool, type(tool)) for existing_tool in self._tools): - raise ValueError("Tool of type {type(tool).__name__} already exists in the ToolSet.") - self._tools.append(tool) - - def remove(self, tool_type: Type[Tool]) -> None: - """ - Remove a tool of the specified type from the tool set. - - :param Type[Tool] tool_type: The type of tool to remove. - :raises ValueError: If a tool of the specified type is not found. - """ - for i, tool in enumerate(self._tools): - if isinstance(tool, tool_type): - del self._tools[i] - logging.info("Tool of type %s removed from the ToolSet.", tool_type.__name__) - return - raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") - - @property - def definitions(self) -> List[ToolDefinition]: - """ - Get the definitions for all tools in the tool set. - - :rtype: List[ToolDefinition] - """ - tools = [] - for tool in self._tools: - tools.extend(tool.definitions) - return tools - - @property - def resources(self) -> ToolResources: - """ - Get the resources for all tools in the tool set. - - :rtype: ToolResources - """ - tool_resources: Dict[str, Any] = {} - for tool in self._tools: - resources = tool.resources - for key, value in resources.items(): - if key in tool_resources: - if isinstance(tool_resources[key], dict) and isinstance(value, dict): - tool_resources[key].update(value) - else: - tool_resources[key] = value - return self._create_tool_resources_from_dict(tool_resources) - - def _create_tool_resources_from_dict(self, resources: Dict[str, Any]) -> ToolResources: - """ - Safely converts a dictionary into a ToolResources instance. - - :param resources: A dictionary of tool resources. Should be a mapping - accepted by ~azure.ai.projects.models.AzureAISearchResource - :type resources: Dict[str, Any] - :return: A ToolResources instance. - :rtype: ToolResources - """ - try: - return ToolResources(**resources) - except TypeError as e: - logging.error("Error creating ToolResources: %s", e) - raise ValueError("Invalid resources for ToolResources.") from e - - def get_definitions_and_resources(self) -> Dict[str, Any]: - """ - Get the definitions and resources for all tools in the tool set. - - :return: A dictionary containing the tool resources and definitions. - :rtype: Dict[str, Any] - """ - return { - "tool_resources": self.resources, - "tools": self.definitions, - } - - def get_tool(self, tool_type: Type[ToolT]) -> ToolT: - """ - Get a tool of the specified type from the tool set. - - :param Type[Tool] tool_type: The type of tool to get. - :return: The tool of the specified type. - :rtype: Tool - :raises ValueError: If a tool of the specified type is not found. - """ - for tool in self._tools: - if isinstance(tool, tool_type): - return cast(ToolT, tool) - raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") - - -class ToolSet(BaseToolSet): - """ - A collection of tools that can be used by an synchronize agent. - """ - - def validate_tool_type(self, tool: Tool) -> None: - """ - Validate the type of the tool. - - :param Tool tool: The type of the tool to validate. - :raises ValueError: If the tool type is not a subclass of Tool. - """ - if isinstance(tool, AsyncFunctionTool): - raise ValueError( - "AsyncFunctionTool is not supported in ToolSet. " - + "To use async functions, use AsyncToolSet and agents operations in azure.ai.projects.aio." - ) - - def execute_tool_calls(self, tool_calls: List[Any]) -> Any: - """ - Execute a tool of the specified type with the provided tool calls. - - :param List[Any] tool_calls: A list of tool calls to execute. - :return: The output of the tool operations. - :rtype: Any - """ - tool_outputs = [] - - for tool_call in tool_calls: - try: - if tool_call.type == "function": - tool = self.get_tool(FunctionTool) - output = tool.execute(tool_call) - tool_output = { - "tool_call_id": tool_call.id, - "output": output, - } - tool_outputs.append(tool_output) - except Exception as e: # pylint: disable=broad-exception-caught - logging.error("Failed to execute tool call %s: %s", tool_call, e) - - return tool_outputs - - -class AsyncToolSet(BaseToolSet): - """ - A collection of tools that can be used by an asynchronous agent. - """ - - def validate_tool_type(self, tool: Tool) -> None: - """ - Validate the type of the tool. - - :param Tool tool: The type of the tool to validate. - :raises ValueError: If the tool type is not a subclass of Tool. - """ - if isinstance(tool, FunctionTool): - raise ValueError( - "FunctionTool is not supported in AsyncToolSet. " - + "Please use AsyncFunctionTool instead and provide sync and/or async function(s)." - ) - - async def execute_tool_calls(self, tool_calls: List[Any]) -> Any: - """ - Execute a tool of the specified type with the provided tool calls. - - :param List[Any] tool_calls: A list of tool calls to execute. - :return: The output of the tool operations. - :rtype: Any - """ - tool_outputs = [] - - for tool_call in tool_calls: - try: - if tool_call.type == "function": - tool = self.get_tool(AsyncFunctionTool) - output = await tool.execute(tool_call) - tool_output = { - "tool_call_id": tool_call.id, - "output": output, - } - tool_outputs.append(tool_output) - except Exception as e: # pylint: disable=broad-exception-caught - logging.error("Failed to execute tool call %s: %s", tool_call, e) - - return tool_outputs - - -EventFunctionReturnT = TypeVar("EventFunctionReturnT") -T = TypeVar("T") -BaseAsyncAgentEventHandlerT = TypeVar("BaseAsyncAgentEventHandlerT", bound="BaseAsyncAgentEventHandler") -BaseAgentEventHandlerT = TypeVar("BaseAgentEventHandlerT", bound="BaseAgentEventHandler") - - -async def async_chain(*iterators: AsyncIterator[T]) -> AsyncIterator[T]: - for iterator in iterators: - async for item in iterator: - yield item - - -class BaseAsyncAgentEventHandler(AsyncIterator[T]): - - def __init__(self) -> None: - self.response_iterator: Optional[AsyncIterator[bytes]] = None - self.submit_tool_outputs: Optional[Callable[[ThreadRun, "BaseAsyncAgentEventHandler[T]"], Awaitable[None]]] = ( - None - ) - self.buffer: Optional[str] = None - - def initialize( - self, - response_iterator: AsyncIterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, "BaseAsyncAgentEventHandler[T]"], Awaitable[None]], - ): - self.response_iterator = ( - async_chain(self.response_iterator, response_iterator) if self.response_iterator else response_iterator - ) - self.submit_tool_outputs = submit_tool_outputs - - async def __anext__(self) -> T: - self.buffer = "" if self.buffer is None else self.buffer - if self.response_iterator is None: - raise ValueError("The response handler was not initialized.") - - if not "\n\n" in self.buffer: - async for chunk in self.response_iterator: - self.buffer += chunk.decode("utf-8") - if "\n\n" in self.buffer: - break - - if self.buffer == "": - raise StopAsyncIteration() - - event_str = "" - if "\n\n" in self.buffer: - event_end_index = self.buffer.index("\n\n") - event_str = self.buffer[:event_end_index] - self.buffer = self.buffer[event_end_index:].lstrip() - else: - event_str = self.buffer - self.buffer = "" - - return await self._process_event(event_str) - - async def _process_event(self, event_data_str: str) -> T: - raise NotImplementedError("This method needs to be implemented.") - - async def until_done(self) -> None: - """ - Iterates through all events until the stream is marked as done. - Calls the provided callback function with each event data. - """ - try: - async for _ in self: - pass - except StopAsyncIteration: - pass - - -class BaseAgentEventHandler(Iterator[T]): - - def __init__(self) -> None: - self.response_iterator: Optional[Iterator[bytes]] = None - self.submit_tool_outputs: Optional[Callable[[ThreadRun, "BaseAgentEventHandler[T]"], None]] = None - self.buffer: Optional[str] = None - - def initialize( - self, - response_iterator: Iterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, "BaseAgentEventHandler[T]"], None], - ) -> None: - self.response_iterator = ( - itertools.chain(self.response_iterator, response_iterator) if self.response_iterator else response_iterator - ) - self.submit_tool_outputs = submit_tool_outputs - - def __next__(self) -> T: - self.buffer = "" if self.buffer is None else self.buffer - if self.response_iterator is None: - raise ValueError("The response handler was not initialized.") - - if not "\n\n" in self.buffer: - for chunk in self.response_iterator: - self.buffer += chunk.decode("utf-8") - if "\n\n" in self.buffer: - break - - if self.buffer == "": - raise StopIteration() - - event_str = "" - if "\n\n" in self.buffer: - event_end_index = self.buffer.index("\n\n") - event_str = self.buffer[:event_end_index] - self.buffer = self.buffer[event_end_index:].lstrip() - else: - event_str = self.buffer - self.buffer = "" - - return self._process_event(event_str) - - def _process_event(self, event_data_str: str) -> T: - raise NotImplementedError("This method needs to be implemented.") - - def until_done(self) -> None: - """ - Iterates through all events until the stream is marked as done. - Calls the provided callback function with each event data. - """ - try: - for _ in self: - pass - except StopIteration: - pass - - -class AsyncAgentEventHandler(BaseAsyncAgentEventHandler[Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]]): - - async def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]: - event_type, event_data_obj = _parse_event(event_data_str) - if ( - isinstance(event_data_obj, ThreadRun) - and event_data_obj.status == "requires_action" - and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) - ): - await cast(Callable[[ThreadRun, "BaseAsyncAgentEventHandler"], Awaitable[None]], self.submit_tool_outputs)( - event_data_obj, self - ) - - func_rt: Optional[EventFunctionReturnT] = None - try: - if isinstance(event_data_obj, MessageDeltaChunk): - func_rt = await self.on_message_delta(event_data_obj) - elif isinstance(event_data_obj, ThreadMessage): - func_rt = await self.on_thread_message(event_data_obj) - elif isinstance(event_data_obj, ThreadRun): - func_rt = await self.on_thread_run(event_data_obj) - elif isinstance(event_data_obj, RunStep): - func_rt = await self.on_run_step(event_data_obj) - elif isinstance(event_data_obj, RunStepDeltaChunk): - func_rt = await self.on_run_step_delta(event_data_obj) - elif event_type == AgentStreamEvent.ERROR: - func_rt = await self.on_error(event_data_obj) - elif event_type == AgentStreamEvent.DONE: - func_rt = await self.on_done() - else: - func_rt = await self.on_unhandled_event( - event_type, event_data_obj - ) # pylint: disable=assignment-from-none - except Exception as e: # pylint: disable=broad-exception-caught - logging.error("Error in event handler for event '%s': %s", event_type, e) - return event_type, event_data_obj, func_rt - - async def on_message_delta( - self, delta: "MessageDeltaChunk" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle message delta events. - - :param MessageDeltaChunk delta: The message delta. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_thread_message( - self, message: "ThreadMessage" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle thread message events. - - :param ThreadMessage message: The thread message. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_thread_run( - self, run: "ThreadRun" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle thread run events. - - :param ThreadRun run: The thread run. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_run_step(self, step: "RunStep") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle run step events. - - :param RunStep step: The run step. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_run_step_delta( - self, delta: "RunStepDeltaChunk" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle run step delta events. - - :param RunStepDeltaChunk delta: The run step delta. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_error(self, data: str) -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle error events. - - :param str data: The error event's data. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_done( - self, - ) -> Optional[EventFunctionReturnT]: - """Handle the completion of the stream. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - async def on_unhandled_event( - self, event_type: str, event_data: str # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle any unhandled event types. - - :param str event_type: The event type. - :param Any event_data: The event's data. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - -class AgentEventHandler(BaseAgentEventHandler[Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]]): - - def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]: - - event_type, event_data_obj = _parse_event(event_data_str) - if ( - isinstance(event_data_obj, ThreadRun) - and event_data_obj.status == "requires_action" - and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) - ): - cast(Callable[[ThreadRun, "BaseAgentEventHandler"], Awaitable[None]], self.submit_tool_outputs)( - event_data_obj, self - ) - - func_rt: Optional[EventFunctionReturnT] = None - try: - if isinstance(event_data_obj, MessageDeltaChunk): - func_rt = self.on_message_delta(event_data_obj) # pylint: disable=assignment-from-none - elif isinstance(event_data_obj, ThreadMessage): - func_rt = self.on_thread_message(event_data_obj) # pylint: disable=assignment-from-none - elif isinstance(event_data_obj, ThreadRun): - func_rt = self.on_thread_run(event_data_obj) # pylint: disable=assignment-from-none - elif isinstance(event_data_obj, RunStep): - func_rt = self.on_run_step(event_data_obj) # pylint: disable=assignment-from-none - elif isinstance(event_data_obj, RunStepDeltaChunk): - func_rt = self.on_run_step_delta(event_data_obj) # pylint: disable=assignment-from-none - elif event_type == AgentStreamEvent.ERROR: - func_rt = self.on_error(event_data_obj) # pylint: disable=assignment-from-none - elif event_type == AgentStreamEvent.DONE: - func_rt = self.on_done() # pylint: disable=assignment-from-none - else: - func_rt = self.on_unhandled_event(event_type, event_data_obj) # pylint: disable=assignment-from-none - except Exception as e: # pylint: disable=broad-exception-caught - logging.error("Error in event handler for event '%s': %s", event_type, e) - return event_type, event_data_obj, func_rt - - def on_message_delta( - self, delta: "MessageDeltaChunk" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle message delta events. - - :param MessageDeltaChunk delta: The message delta. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_thread_message( - self, message: "ThreadMessage" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle thread message events. - - :param ThreadMessage message: The thread message. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_thread_run(self, run: "ThreadRun") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle thread run events. - - :param ThreadRun run: The thread run. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_run_step(self, step: "RunStep") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle run step events. - - :param RunStep step: The run step. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_run_step_delta( - self, delta: "RunStepDeltaChunk" # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle run step delta events. - - :param RunStepDeltaChunk delta: The run step delta. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_error(self, data: str) -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument - """Handle error events. - - :param str data: The error event's data. - :rtype: Optional[EventFunctionReturnT] - """ - return None - - def on_done( - self, - ) -> Optional[EventFunctionReturnT]: - """Handle the completion of the stream.""" - return None - - def on_unhandled_event( - self, event_type: str, event_data: str # pylint: disable=unused-argument - ) -> Optional[EventFunctionReturnT]: - """Handle any unhandled event types. - - :param str event_type: The event type. - :param Any event_data: The event's data. - """ - return None - - -class AsyncAgentRunStream(Generic[BaseAsyncAgentEventHandlerT]): - def __init__( - self, - response_iterator: AsyncIterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, BaseAsyncAgentEventHandlerT], Awaitable[None]], - event_handler: BaseAsyncAgentEventHandlerT, - ): - self.response_iterator = response_iterator - self.event_handler = event_handler - self.submit_tool_outputs = submit_tool_outputs - self.event_handler.initialize( - self.response_iterator, - cast(Callable[[ThreadRun, BaseAsyncAgentEventHandler], Awaitable[None]], submit_tool_outputs), - ) - - async def __aenter__(self): - return self.event_handler - - async def __aexit__(self, exc_type, exc_val, exc_tb): - close_method = getattr(self.response_iterator, "close", None) - if callable(close_method): - result = close_method() - if asyncio.iscoroutine(result): - await result - - -class AgentRunStream(Generic[BaseAgentEventHandlerT]): - def __init__( - self, - response_iterator: Iterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, BaseAgentEventHandlerT], None], - event_handler: BaseAgentEventHandlerT, - ): - self.response_iterator = response_iterator - self.event_handler = event_handler - self.submit_tool_outputs = submit_tool_outputs - self.event_handler.initialize( - self.response_iterator, - cast(Callable[[ThreadRun, BaseAgentEventHandler], None], submit_tool_outputs), - ) - - def __enter__(self): - return self.event_handler - - def __exit__(self, exc_type, exc_val, exc_tb): - close_method = getattr(self.response_iterator, "close", None) - if callable(close_method): - close_method() - - -class OpenAIPageableListOfThreadMessage(OpenAIPageableListOfThreadMessageGenerated): - - @property - def text_messages(self) -> List[MessageTextContent]: - """Returns all text message contents in the messages. - - :rtype: List[MessageTextContent] - """ - texts = [content for msg in self.data for content in msg.text_messages] - return texts - - @property - def image_contents(self) -> List[MessageImageFileContent]: - """Returns all image file contents from image message contents in the messages. - - :rtype: List[MessageImageFileContent] - """ - return [content for msg in self.data for content in msg.image_contents] - - @property - def file_citation_annotations(self) -> List[MessageTextFileCitationAnnotation]: - """Returns all file citation annotations from text message annotations in the messages. - - :rtype: List[MessageTextFileCitationAnnotation] - """ - annotations = [annotation for msg in self.data for annotation in msg.file_citation_annotations] - return annotations - - @property - def file_path_annotations(self) -> List[MessageTextFilePathAnnotation]: - """Returns all file path annotations from text message annotations in the messages. - - :rtype: List[MessageTextFilePathAnnotation] - """ - annotations = [annotation for msg in self.data for annotation in msg.file_path_annotations] - return annotations - - def get_last_message_by_role(self, role: MessageRole) -> Optional[ThreadMessage]: - """Returns the last message from a sender in the specified role. - - :param role: The role of the sender. - :type role: MessageRole - - :return: The last message from a sender in the specified role. - :rtype: ~azure.ai.projects.models.ThreadMessage - """ - for msg in self.data: - if msg.role == role: - return msg - return None - - def get_last_text_message_by_role(self, role: MessageRole) -> Optional[MessageTextContent]: - """Returns the last text message from a sender in the specified role. - - :param role: The role of the sender. - :type role: MessageRole - - :return: The last text message from a sender in the specified role. - :rtype: ~azure.ai.projects.models.MessageTextContent - """ - for msg in self.data: - if msg.role == role: - for content in msg.content: - if isinstance(content, MessageTextContent): - return content - return None - - -__all__: List[str] = [ - "AgentEventHandler", - "AgentRunStream", - "AsyncAgentRunStream", - "AsyncFunctionTool", - "AsyncToolSet", - "AzureAISearchTool", - "AzureFunctionTool", - "BaseAsyncAgentEventHandler", - "BaseAgentEventHandler", - "CodeInterpreterTool", - "ConnectionProperties", - "AsyncAgentEventHandler", - "OpenAIPageableListOfThreadMessage", - "FileSearchTool", - "FunctionTool", - "OpenApiTool", - "BingGroundingTool", - "StreamEventData", - "SharepointTool", - "FabricTool", - "AzureAISearchTool", - "SASTokenCredential", - "Tool", - "ToolSet", - "BaseAsyncAgentEventHandlerT", - "BaseAgentEventHandlerT", - "ThreadMessage", - "MessageTextFileCitationAnnotation", - "MessageDeltaChunk", - "MessageAttachment", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index ef27ce1eca4c..bfd1e05f88df 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines +# pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -107,7 +107,7 @@ def build_agents_list_agents_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_agents_get_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: +def build_agents_get_agent_request(agent_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -115,9 +115,9 @@ def build_agents_get_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequ accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/assistants/{assistantId}" + _url = "/assistants/{agentId}" path_format_arguments = { - "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + "agentId": _SERIALIZER.url("agent_id", agent_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -131,7 +131,7 @@ def build_agents_get_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_agents_update_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: +def build_agents_update_agent_request(agent_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -140,9 +140,9 @@ def build_agents_update_agent_request(assistant_id: str, **kwargs: Any) -> HttpR accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/assistants/{assistantId}" + _url = "/assistants/{agentId}" path_format_arguments = { - "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + "agentId": _SERIALIZER.url("agent_id", agent_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -158,7 +158,7 @@ def build_agents_update_agent_request(assistant_id: str, **kwargs: Any) -> HttpR return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_agents_delete_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: +def build_agents_delete_agent_request(agent_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -166,9 +166,9 @@ def build_agents_delete_agent_request(assistant_id: str, **kwargs: Any) -> HttpR accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/assistants/{assistantId}" + _url = "/assistants/{agentId}" path_format_arguments = { - "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + "agentId": _SERIALIZER.url("agent_id", agent_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -1858,11 +1858,11 @@ def list_agents( return deserialized # type: ignore @distributed_trace - def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: + def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: """Retrieves an existing agent. - :param assistant_id: Identifier of the agent. Required. - :type assistant_id: str + :param agent_id: Identifier of the agent. Required. + :type agent_id: str :return: Agent. The Agent is compatible with MutableMapping :rtype: ~azure.ai.projects.models.Agent :raises ~azure.core.exceptions.HttpResponseError: @@ -1881,7 +1881,7 @@ def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: cls: ClsType[_models.Agent] = kwargs.pop("cls", None) _request = build_agents_get_agent_request( - assistant_id=assistant_id, + agent_id=agent_id, api_version=self._config.api_version, headers=_headers, params=_params, @@ -1925,7 +1925,7 @@ def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: @overload def update_agent( self, - assistant_id: str, + agent_id: str, *, content_type: str = "application/json", model: Optional[str] = None, @@ -1942,8 +1942,8 @@ def update_agent( ) -> _models.Agent: """Modifies an existing agent. - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1993,12 +1993,12 @@ def update_agent( @overload def update_agent( - self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + self, agent_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.Agent: """Modifies an existing agent. - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -2011,12 +2011,12 @@ def update_agent( @overload def update_agent( - self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, agent_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.Agent: """Modifies an existing agent. - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -2030,7 +2030,7 @@ def update_agent( @distributed_trace def update_agent( self, - assistant_id: str, + agent_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, model: Optional[str] = None, @@ -2047,8 +2047,8 @@ def update_agent( ) -> _models.Agent: """Modifies an existing agent. - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword model: The ID of the model to use. Default value is None. @@ -2130,7 +2130,7 @@ def update_agent( _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_agents_update_agent_request( - assistant_id=assistant_id, + agent_id=agent_id, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -2174,11 +2174,11 @@ def update_agent( return deserialized # type: ignore @distributed_trace - def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: + def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: """Deletes an agent. - :param assistant_id: Identifier of the agent. Required. - :type assistant_id: str + :param agent_id: Identifier of the agent. Required. + :type agent_id: str :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping :rtype: ~azure.ai.projects.models.AgentDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: @@ -2197,7 +2197,7 @@ def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletio cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) _request = build_agents_delete_agent_request( - assistant_id=assistant_id, + agent_id=agent_id, api_version=self._config.api_version, headers=_headers, params=_params, @@ -3234,7 +3234,7 @@ def create_run( self, thread_id: str, *, - assistant_id: str, + agent_id: str, include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, content_type: str = "application/json", model: Optional[str] = None, @@ -3258,8 +3258,8 @@ def create_run( :param thread_id: Identifier of the thread. Required. :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str + :keyword agent_id: The ID of the agent that should run the thread. Required. + :paramtype agent_id: str :keyword include: A list of additional fields to include in the response. Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result @@ -3406,7 +3406,7 @@ def create_run( thread_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, - assistant_id: str = _Unset, + agent_id: str = _Unset, include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, model: Optional[str] = None, instructions: Optional[str] = None, @@ -3431,8 +3431,8 @@ def create_run( :type thread_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str + :keyword agent_id: The ID of the agent that should run the thread. Required. + :paramtype agent_id: str :keyword include: A list of additional fields to include in the response. Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result @@ -3526,12 +3526,12 @@ def create_run( cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: - if assistant_id is _Unset: - raise TypeError("missing required argument: assistant_id") + if agent_id is _Unset: + raise TypeError("missing required argument: agent_id") body = { "additional_instructions": additional_instructions, "additional_messages": additional_messages, - "assistant_id": assistant_id, + "assistant_id": agent_id, "instructions": instructions, "max_completion_tokens": max_completion_tokens, "max_prompt_tokens": max_prompt_tokens, @@ -4176,7 +4176,7 @@ def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Thre def create_thread_and_run( self, *, - assistant_id: str, + agent_id: str, content_type: str = "application/json", thread: Optional[_models.AgentThreadCreationOptions] = None, model: Optional[str] = None, @@ -4197,8 +4197,8 @@ def create_thread_and_run( ) -> _models.ThreadRun: """Creates a new agent thread and immediately starts a run using that new thread. - :keyword assistant_id: The ID of the agent for which the thread should be created. Required. - :paramtype assistant_id: str + :keyword agent_id: The ID of the agent for which the thread should be created. Required. + :paramtype agent_id: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -4312,7 +4312,7 @@ def create_thread_and_run( self, body: Union[JSON, IO[bytes]] = _Unset, *, - assistant_id: str = _Unset, + agent_id: str = _Unset, thread: Optional[_models.AgentThreadCreationOptions] = None, model: Optional[str] = None, instructions: Optional[str] = None, @@ -4334,8 +4334,8 @@ def create_thread_and_run( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent for which the thread should be created. Required. - :paramtype assistant_id: str + :keyword agent_id: The ID of the agent for which the thread should be created. Required. + :paramtype agent_id: str :keyword thread: The details used to create the new thread. If no thread is provided, an empty one will be created. Default value is None. :paramtype thread: ~azure.ai.projects.models.AgentThreadCreationOptions @@ -4423,10 +4423,10 @@ def create_thread_and_run( cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: - if assistant_id is _Unset: - raise TypeError("missing required argument: assistant_id") + if agent_id is _Unset: + raise TypeError("missing required argument: agent_id") body = { - "assistant_id": assistant_id, + "assistant_id": agent_id, "instructions": instructions, "max_completion_tokens": max_completion_tokens, "max_prompt_tokens": max_prompt_tokens, @@ -6553,7 +6553,8 @@ def _list_connections( """List the details of all the connections (not including their credentials). :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". Default value is None. + "Serverless", "AzureBlob", "AIServices", "CognitiveSearch", and "ApiKey". Default value is + None. :paramtype category: str or ~azure.ai.projects.models.ConnectionType :keyword include_all: Indicates whether to list datastores. Service default: do not list datastores. Default value is None. @@ -6813,7 +6814,6 @@ def __init__(self, *args, **kwargs): def _get_app_insights( self, app_insights_resource_url: str, **kwargs: Any ) -> _models._models.GetAppInsightsResponse: - # pylint: disable=line-too-long """Gets the properties of the specified Application Insights resource. :param app_insights_resource_url: The AppInsights Azure resource Url. It should have the diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index a7692690bcde..f7dd32510333 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -7,3314 +6,9 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import io -import logging -import os -import sys -import time -from pathlib import Path -from typing import ( - IO, - TYPE_CHECKING, - Any, - Dict, - Iterator, - List, - Optional, - Sequence, - TextIO, - Union, - cast, - overload, -) +from typing import List -from azure.core.exceptions import ResourceNotFoundError -from azure.core.tracing.decorator import distributed_trace - -from .. import models as _models -from .._vendor import FileType -from ..models._enums import AuthenticationType, ConnectionType, FilePurpose, RunStatus -from ..models._models import ( - GetAppInsightsResponse, - GetConnectionResponse, - GetWorkspaceResponse, - InternalConnectionPropertiesSASAuth, - ListConnectionsResponse, -) -from ..models._patch import ConnectionProperties -from ._operations import AgentsOperations as AgentsOperationsGenerated -from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated -from ._operations import TelemetryOperations as TelemetryOperationsGenerated - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from openai import AzureOpenAI - - from azure.ai.inference import ChatCompletionsClient, EmbeddingsClient, ImageEmbeddingsClient - - from .. import _types - -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() - -logger = logging.getLogger(__name__) - - -class InferenceOperations: - - def __init__(self, outer_instance): - - # All returned inference clients will have this application id set on their user-agent. - # For more info on user-agent HTTP header, see: - # https://azure.github.io/azure-sdk/general_azurecore.html#telemetry-policy - USER_AGENT_APP_ID = "AIProjectClient" - - if hasattr(outer_instance, "_user_agent") and outer_instance._user_agent: - # If the calling application has set "user_agent" when constructing the AIProjectClient, - # take that value and prepend it to USER_AGENT_APP_ID. - self._user_agent = f"{outer_instance._user_agent}-{USER_AGENT_APP_ID}" - else: - self._user_agent = USER_AGENT_APP_ID - - self._outer_instance = outer_instance - - @distributed_trace - def get_chat_completions_client( - self, *, connection_name: Optional[str] = None, **kwargs - ) -> "ChatCompletionsClient": - """Get an authenticated ChatCompletionsClient (from the package azure-ai-inference) for the default - Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. Keyword arguments are passed to the constructor of - ChatCompletionsClient. - - At least one AI model that supports chat completions must be deployed in this resource. - - .. note:: The package `azure-ai-inference` must be installed prior to calling this method. - - :keyword connection_name: The name of a connection to an Azure AI Services resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure AI Services connection will be used. - :type connection_name: str - - :return: An authenticated chat completions client. - :rtype: ~azure.ai.inference.ChatCompletionsClient - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure AI Services connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `azure-ai-inference` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on - # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" - - if connection_name: - connection = self._outer_instance.connections.get(connection_name=connection_name, include_credentials=True) - else: - if use_serverless_connection: - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True - ) - else: - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True - ) - - logger.debug("[InferenceOperations.get_chat_completions_client] connection = %s", str(connection)) - - try: - from azure.ai.inference import ChatCompletionsClient - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) from e - - if use_serverless_connection: - endpoint = connection.endpoint_url - credential_scopes = ["https://ml.azure.com/.default"] - else: - endpoint = f"{connection.endpoint_url}/models" - credential_scopes = ["https://cognitiveservices.azure.com/.default"] - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_chat_completions_client] " - + "Creating ChatCompletionsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = ChatCompletionsClient( - endpoint=endpoint, - credential=AzureKeyCredential(connection.key), - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_chat_completions_client] " - + "Creating ChatCompletionsClient using Entra ID authentication" - ) - client = ChatCompletionsClient( - endpoint=endpoint, - credential=connection.token_credential, - credential_scopes=credential_scopes, - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_chat_completions_client] " - + "Creating ChatCompletionsClient using SAS authentication" - ) - raise ValueError( - "Getting chat completions client from a connection with SAS authentication is not yet supported" - ) - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace - def get_embeddings_client(self, *, connection_name: Optional[str] = None, **kwargs) -> "EmbeddingsClient": - """Get an authenticated EmbeddingsClient (from the package azure-ai-inference) for the default - Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. Keyword arguments are passed to the constructor of - EmbeddingsClient. - - At least one AI model that supports text embeddings must be deployed in this resource. - - .. note:: The package `azure-ai-inference` must be installed prior to calling this method. - - :keyword connection_name: The name of a connection to an Azure AI Services resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure AI Services connection will be used. - :type connection_name: str - - :return: An authenticated text embeddings client - :rtype: ~azure.ai.inference.EmbeddingsClient - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure AI Services connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `azure-ai-inference` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on - # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" - - if connection_name: - connection = self._outer_instance.connections.get(connection_name=connection_name, include_credentials=True) - else: - if use_serverless_connection: - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True - ) - else: - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True - ) - - logger.debug("[InferenceOperations.get_embeddings_client] connection = %s", str(connection)) - - try: - from azure.ai.inference import EmbeddingsClient - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) from e - - if use_serverless_connection: - endpoint = connection.endpoint_url - credential_scopes = ["https://ml.azure.com/.default"] - else: - endpoint = f"{connection.endpoint_url}/models" - credential_scopes = ["https://cognitiveservices.azure.com/.default"] - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = EmbeddingsClient( - endpoint=endpoint, - credential=AzureKeyCredential(connection.key), - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" - ) - client = EmbeddingsClient( - endpoint=endpoint, - credential=connection.token_credential, - credential_scopes=credential_scopes, - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" - ) - raise ValueError("Getting embeddings client from a connection with SAS authentication is not yet supported") - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace - def get_image_embeddings_client( - self, *, connection_name: Optional[str] = None, **kwargs - ) -> "ImageEmbeddingsClient": - """Get an authenticated ImageEmbeddingsClient (from the package azure-ai-inference) for the default - Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. Keyword arguments are passed to the constructor of - ImageEmbeddingsClient. - - At least one AI model that supports image embeddings must be deployed in this resource. - - .. note:: The package `azure-ai-inference` must be installed prior to calling this method. - - :keyword connection_name: The name of a connection to an Azure AI Services resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure AI Services connection will be used. - :type connection_name: str - - :return: An authenticated image embeddings client - :rtype: ~azure.ai.inference.ImageEmbeddingsClient - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure AI Services connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `azure-ai-inference` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on - # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" - - if connection_name: - connection = self._outer_instance.connections.get(connection_name=connection_name, include_credentials=True) - else: - if use_serverless_connection: - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True - ) - else: - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True - ) - - logger.debug("[InferenceOperations.get_embeddings_client] connection = %s", str(connection)) - - try: - from azure.ai.inference import ImageEmbeddingsClient - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) from e - - if use_serverless_connection: - endpoint = connection.endpoint_url - credential_scopes = ["https://ml.azure.com/.default"] - else: - endpoint = f"{connection.endpoint_url}/models" - credential_scopes = ["https://cognitiveservices.azure.com/.default"] - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_image_embeddings_client] " - "Creating ImageEmbeddingsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = ImageEmbeddingsClient( - endpoint=endpoint, - credential=AzureKeyCredential(connection.key), - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_image_embeddings_client] " - "Creating ImageEmbeddingsClient using Entra ID authentication" - ) - client = ImageEmbeddingsClient( - endpoint=endpoint, - credential=connection.token_credential, - credential_scopes=credential_scopes, - user_agent=kwargs.pop("user_agent", self._user_agent), - **kwargs, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_image_embeddings_client] " - "Creating ImageEmbeddingsClient using SAS authentication" - ) - raise ValueError( - "Getting image embeddings client from a connection with SAS authentication is not yet supported" - ) - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace - def get_azure_openai_client( - self, *, api_version: Optional[str] = None, connection_name: Optional[str] = None, **kwargs - ) -> "AzureOpenAI": - """Get an authenticated AzureOpenAI client (from the `openai` package) for the default - Azure OpenAI connection (if `connection_name` is not specificed), or from the Azure OpenAI - resource given by its connection name. - - .. note:: The package `openai` must be installed prior to calling this method. - - :keyword api_version: The Azure OpenAI api-version to use when creating the client. Optional. - See "Data plane - Inference" row in the table at - https://learn.microsoft.com/azure/ai-services/openai/reference#api-specs. If this keyword - is not specified, you must set the environment variable `OPENAI_API_VERSION` instead. - :paramtype api_version: str - :keyword connection_name: The name of a connection to an Azure OpenAI resource in your AI Foundry project. - resource. Optional. If not provided, the default Azure OpenAI connection will be used. - :type connection_name: str - - :return: An authenticated AzureOpenAI client - :rtype: ~openai.AzureOpenAI - - :raises ~azure.core.exceptions.ResourceNotFoundError: if an Azure OpenAI connection - does not exist. - :raises ~azure.core.exceptions.ModuleNotFoundError: if the `openai` package - is not installed. - :raises ValueError: if the connection name is an empty string. - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - - if connection_name is not None and not connection_name: - raise ValueError("Connection name cannot be empty") - - if connection_name: - connection = self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True, **kwargs - ) - else: - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, include_credentials=True, **kwargs - ) - - logger.debug("[InferenceOperations.get_azure_openai_client] connection = %s", str(connection)) - - try: - from openai import AzureOpenAI - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "OpenAI SDK is not installed. Please install it using 'pip install openai'" - ) from e - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" - ) - client = AzureOpenAI( - api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=api_version - ) - elif connection.authentication_type == AuthenticationType.ENTRA_ID: - logger.debug( - "[InferenceOperations.get_azure_openai_client] " + "Creating AzureOpenAI using Entra ID authentication" - ) - try: - from azure.identity import get_bearer_token_provider - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "azure.identity package not installed. Please install it using 'pip install azure.identity'" - ) from e - client = AzureOpenAI( - # See https://learn.microsoft.com/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider # pylint: disable=line-too-long - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version=api_version, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug( - "[InferenceOperations.get_azure_openai_client] " + "Creating AzureOpenAI using SAS authentication" - ) - raise ValueError( - "Getting an AzureOpenAI client from a connection with SAS authentication is not yet supported" - ) - else: - raise ValueError("Unknown authentication type") - - return client - - -class ConnectionsOperations(ConnectionsOperationsGenerated): - - @distributed_trace - def get_default( - self, *, connection_type: ConnectionType, include_credentials: bool = False, **kwargs: Any - ) -> ConnectionProperties: - """Get the properties of the default connection of a certain connection type, with or without - populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError - exception if there are no connections of the given type. - - .. note:: - `get_default(connection_type=ConnectionType.AZURE_BLOB_STORAGE, include_credentials=True)` does not - currently work. It does work with `include_credentials=False`. - - :keyword connection_type: The connection type. Required. - :type connection_type: ~azure.ai.projects.models._models.ConnectionType - :keyword include_credentials: Whether to populate the connection properties with authentication credentials. - Optional. - :type include_credentials: bool - :return: The connection properties. - :rtype: ~azure.ai.projects.models.ConnectionProperties - :raises ~azure.core.exceptions.ResourceNotFoundError: - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_type: - raise ValueError("You must specify an connection type") - # Since there is no notion of default connection at the moment, list all connections in the category - # and return the first one (index 0), unless overridden by the environment variable DEFAULT_CONNECTION_INDEX. - connection_properties_list = self.list(connection_type=connection_type, **kwargs) - if len(connection_properties_list) > 0: - default_connection_index = int(os.getenv("DEFAULT_CONNECTION_INDEX", "0")) - if include_credentials: - return self.get( - connection_name=connection_properties_list[default_connection_index].name, - include_credentials=include_credentials, - **kwargs, - ) - return connection_properties_list[default_connection_index] - raise ResourceNotFoundError(f"No connection of type {connection_type} found") - - @distributed_trace - def get(self, *, connection_name: str, include_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: - """Get the properties of a single connection, given its connection name, with or without - populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError - exception if a connection with the given name was not found. - - .. note:: This method is not supported for Azure Blob Storage connections. - - :keyword connection_name: Connection Name. Required. - :type connection_name: str - :keyword include_credentials: Whether to populate the connection properties with authentication credentials. - Optional. - :type include_credentials: bool - :return: The connection properties, or `None` if a connection with this name does not exist. - :rtype: ~azure.ai.projects.models.ConnectionProperties - :raises ~azure.core.exceptions.ResourceNotFoundError: - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_name: - raise ValueError("Connection name cannot be empty") - if include_credentials: - connection: GetConnectionResponse = self._get_connection_with_secrets( - connection_name=connection_name, ignored="ignore", **kwargs - ) - if connection.properties.auth_type == AuthenticationType.ENTRA_ID: - return ConnectionProperties(connection=connection, token_credential=self._config.credential) - if connection.properties.auth_type == AuthenticationType.SAS: - from ..models._patch import SASTokenCredential - - cred_prop = cast(InternalConnectionPropertiesSASAuth, connection.properties) - - token_credential = SASTokenCredential( - sas_token=cred_prop.credentials.sas, - credential=self._config.credential, - subscription_id=self._config.subscription_id, - resource_group_name=self._config.resource_group_name, - project_name=self._config.project_name, - connection_name=connection_name, - ) - return ConnectionProperties(connection=connection, token_credential=token_credential) - - return ConnectionProperties(connection=connection) - connection = self._get_connection(connection_name=connection_name, **kwargs) - return ConnectionProperties(connection=connection) - - @distributed_trace - def list( - self, *, connection_type: Optional[ConnectionType] = None, **kwargs: Any - ) -> Sequence[ConnectionProperties]: - """List the properties of all connections, or all connections of a certain connection type. - - :keyword connection_type: The connection type. Optional. If provided, this method lists connections of this - type. If not provided, all connections are listed. - :type connection_type: ~azure.ai.projects.models._models.ConnectionType - :return: A list of connection properties - :rtype: Sequence[~azure.ai.projects.models._models.ConnectionProperties] - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connections_list: ListConnectionsResponse = self._list_connections( - include_all=True, category=connection_type, **kwargs - ) - - # Iterate to create the simplified result property - connection_properties_list: List[ConnectionProperties] = [] - for connection in connections_list.value: - connection_properties_list.append(ConnectionProperties(connection=connection)) - - return connection_properties_list - - -# Internal helper functions to enable OpenTelemetry, used by both sync and async clients -def _get_trace_exporter(destination: Union[TextIO, str, None]) -> Any: - if isinstance(destination, str): - # `destination` is the OTLP endpoint - # See: https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html#usage - try: - from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter # type: ignore - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "OpenTelemetry OTLP exporter is not installed. " - + "Please install it using 'pip install opentelemetry-exporter-otlp-proto-grpc'" - ) from e - return OTLPSpanExporter(endpoint=destination) - - if isinstance(destination, io.TextIOWrapper): - if destination is sys.stdout: - # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter # pylint: disable=line-too-long - try: - from opentelemetry.sdk.trace.export import ConsoleSpanExporter - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" - ) from e - - return ConsoleSpanExporter() - raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") - - return None - - -def _get_log_exporter(destination: Union[TextIO, str, None]) -> Any: - if isinstance(destination, str): - # `destination` is the OTLP endpoint - # See: https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html#usage - try: - # _logs are considered beta (not internal) in OpenTelemetry Python API/SDK. - # So it's ok to use it for local development, but we'll swallow - # any errors in case of any breaking changes on OTel side. - from opentelemetry.exporter.otlp.proto.grpc._log_exporter import OTLPLogExporter # type: ignore # pylint: disable=import-error,no-name-in-module - except Exception as ex: # pylint: disable=broad-exception-caught - # since OTel logging is still in beta in Python, we're going to swallow any errors - # and just warn about them. - logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) - return None - - return OTLPLogExporter(endpoint=destination) - - if isinstance(destination, io.TextIOWrapper): - if destination is sys.stdout: - # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter # pylint: disable=line-too-long - try: - from opentelemetry.sdk._logs.export import ConsoleLogExporter - - return ConsoleLogExporter() - except ModuleNotFoundError as ex: - # since OTel logging is still in beta in Python, we're going to swallow any errors - # and just warn about them. - logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) - return None - raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") - - return None - - -def _configure_tracing(span_exporter: Any) -> None: - if span_exporter is None: - return - - try: - from opentelemetry import trace - from opentelemetry.sdk.trace import TracerProvider - from opentelemetry.sdk.trace.export import SimpleSpanProcessor - except ModuleNotFoundError as e: - raise ModuleNotFoundError( - "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" - ) from e - - # if tracing was not setup before, we need to create a new TracerProvider - if not isinstance(trace.get_tracer_provider(), TracerProvider): - # If the provider is NoOpTracerProvider, we need to create a new TracerProvider - provider = TracerProvider() - trace.set_tracer_provider(provider) - - # get_tracer_provider returns opentelemetry.trace.TracerProvider - # however, we have opentelemetry.sdk.trace.TracerProvider, which implements - # add_span_processor method, though we need to cast it to fix type checking. - provider = cast(TracerProvider, trace.get_tracer_provider()) - provider.add_span_processor(SimpleSpanProcessor(span_exporter)) - - -def _configure_logging(log_exporter: Any) -> None: - if log_exporter is None: - return - - try: - # _events and _logs are considered beta (not internal) in - # OpenTelemetry Python API/SDK. - # So it's ok to use them for local development, but we'll swallow - # any errors in case of any breaking changes on OTel side. - from opentelemetry import _logs, _events - from opentelemetry.sdk._logs import LoggerProvider # pylint: disable=import-error,no-name-in-module - from opentelemetry.sdk._events import EventLoggerProvider # pylint: disable=import-error,no-name-in-module - from opentelemetry.sdk._logs.export import ( - SimpleLogRecordProcessor, - ) # pylint: disable=import-error,no-name-in-module - - if not isinstance(_logs.get_logger_provider(), LoggerProvider): - logger_provider = LoggerProvider() - _logs.set_logger_provider(logger_provider) - - # get_logger_provider returns opentelemetry._logs.LoggerProvider - # however, we have opentelemetry.sdk._logs.LoggerProvider, which implements - # add_log_record_processor method, though we need to cast it to fix type checking. - logger_provider = cast(LoggerProvider, _logs.get_logger_provider()) - logger_provider.add_log_record_processor(SimpleLogRecordProcessor(log_exporter)) - _events.set_event_logger_provider(EventLoggerProvider(logger_provider)) - except Exception as ex: # pylint: disable=broad-exception-caught - # since OTel logging is still in beta in Python, we're going to swallow any errors - # and just warn about them. - logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) - - -def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: # pylint: disable=unused-argument - """Enable tracing and logging to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) endpoint. - - :param destination: `sys.stdout` to print telemetry to console or a string holding the - OpenTelemetry protocol (OTLP) endpoint. - If not provided, this method enables instrumentation, but does not configure OpenTelemetry - SDK to export traces and logs. - :type destination: Union[TextIO, str, None] - """ - span_exporter = _get_trace_exporter(destination) - _configure_tracing(span_exporter) - - log_exporter = _get_log_exporter(destination) - _configure_logging(log_exporter) - - # Silently try to load a set of relevant Instrumentors - try: - from azure.core.settings import settings - - settings.tracing_implementation = "opentelemetry" - except ModuleNotFoundError: - logger.warning( - "Azure SDK tracing plugin is not installed. " - + "Please install it using 'pip install azure-core-tracing-opentelemetry'" - ) - - try: - from azure.ai.inference.tracing import AIInferenceInstrumentor # type: ignore - - inference_instrumentor = AIInferenceInstrumentor() - if not inference_instrumentor.is_instrumented(): - inference_instrumentor.instrument() - except ModuleNotFoundError: - logger.warning( - "Could not call `AIInferenceInstrumentor().instrument()` since `azure-ai-inference` is not installed" - ) - - try: - from azure.ai.projects.telemetry.agents import AIAgentsInstrumentor - - agents_instrumentor = AIAgentsInstrumentor() - if not agents_instrumentor.is_instrumented(): - agents_instrumentor.instrument() - except Exception as exc: # pylint: disable=broad-exception-caught - logger.warning("Could not call `AIAgentsInstrumentor().instrument()`", exc_info=exc) - - try: - from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor # type: ignore - - OpenAIInstrumentor().instrument() - except ModuleNotFoundError: - logger.warning( - "Could not call `OpenAIInstrumentor().instrument()` since " - + "`opentelemetry-instrumentation-openai-v2` is not installed" - ) - - try: - from opentelemetry.instrumentation.langchain import LangchainInstrumentor # type: ignore - - LangchainInstrumentor().instrument() - except ModuleNotFoundError: - logger.warning( - "Could not call LangchainInstrumentor().instrument()` since " - + "`opentelemetry-instrumentation-langchain` is not installed" - ) - - -class TelemetryOperations(TelemetryOperationsGenerated): - - _connection_string: Optional[str] = None - - def __init__(self, *args, **kwargs): - self._outer_instance = kwargs.pop("outer_instance") - super().__init__(*args, **kwargs) - - def get_connection_string(self) -> str: - """Get the Application Insights connection string associated with the Project's Application Insights resource. - - :return: The Application Insights connection string if a the resource was enabled for the Project. - :rtype: str - :raises ~azure.core.exceptions.ResourceNotFoundError: An Application Insights resource was not - enabled for this project. - """ - if not self._connection_string: - # Get the AI Foundry project properties, including Application Insights resource URL if exists - get_workspace_response: GetWorkspaceResponse = ( - self._outer_instance.connections._get_workspace() # pylint: disable=protected-access - ) - - if not get_workspace_response.properties.application_insights: - raise ResourceNotFoundError("Application Insights resource was not enabled for this Project.") - - # Make a GET call to the Application Insights resource URL to get the connection string - app_insights_respose: GetAppInsightsResponse = self._get_app_insights( - app_insights_resource_url=get_workspace_response.properties.application_insights - ) - - self._connection_string = app_insights_respose.properties.connection_string - - return self._connection_string - - # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? - # TODO: This could be a class method. But we don't have a class property AIProjectClient.telemetry - def enable(self, *, destination: Union[TextIO, str, None] = None, **kwargs) -> None: - """Enables telemetry collection with OpenTelemetry for Azure AI clients and popular GenAI libraries. - - Following instrumentations are enabled (when corresponding packages are installed): - - - Azure AI Inference (`azure-ai-inference`) - - Azure AI Projects (`azure-ai-projects`) - - OpenAI (`opentelemetry-instrumentation-openai-v2`) - - Langchain (`opentelemetry-instrumentation-langchain`) - - The recording of prompt and completion messages is disabled by default. To enable it, set the - `AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED` environment variable to `true`. - - When destination is provided, the method configures OpenTelemetry SDK to export traces to - stdout or OTLP (OpenTelemetry protocol) gRPC endpoint. It's recommended for local - development only. For production use, make sure to configure OpenTelemetry SDK directly. - - :keyword destination: Recommended for local testing only. Set it to `sys.stdout` for - tracing to console output, or a string holding the OpenTelemetry protocol (OTLP) - endpoint such as "http://localhost:4317. - If not provided, the method enables instrumentations, but does not configure OpenTelemetry - SDK to export traces. - :paramtype destination: Union[TextIO, str, None] - """ - _enable_telemetry(destination=destination, **kwargs) - - -class AgentsOperations(AgentsOperationsGenerated): - - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) - self._toolset: Dict[str, _models.ToolSet] = {} - - # pylint: disable=arguments-differ - @overload - def create_agent( # pylint: disable=arguments-differ - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - def create_agent( # pylint: disable=arguments-differ - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.Agent: - """ - Creates a new agent with various configurations, delegating to the generated operations. - - :param body: JSON or IO[bytes]. Required if `model` is not provided. - :type body: Union[JSON, IO[bytes]] - :keyword model: The ID of the model to use. Required if `body` is not provided. - :paramtype model: str - :keyword name: The name of the new agent. - :paramtype name: Optional[str] - :keyword description: A description for the new agent. - :paramtype description: Optional[str] - :keyword instructions: System instructions for the agent. - :paramtype instructions: Optional[str] - :keyword tools: List of tools definitions for the agent. - :paramtype tools: Optional[List[_models.ToolDefinition]] - :keyword tool_resources: Resources used by the agent's tools. - :paramtype tool_resources: Optional[_models.ToolResources] - :keyword toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). - :paramtype toolset: Optional[_models.ToolSet] - :keyword temperature: Sampling temperature for generating agent responses. - :paramtype temperature: Optional[float] - :keyword top_p: Nucleus sampling parameter. - :paramtype top_p: Optional[float] - :keyword response_format: Response format for tool calls. - :paramtype response_format: Optional["_types.AgentsApiResponseFormatOption"] - :keyword metadata: Key/value pairs for storing additional information. - :paramtype metadata: Optional[Dict[str, str]] - :keyword content_type: Content type of the body. - :paramtype content_type: str - :return: An Agent object. - :rtype: _models.Agent - :raises: HttpResponseError for HTTP errors. - """ - - self._validate_tools_and_tool_resources(tools, tool_resources) - - if body is not _Unset: - if isinstance(body, io.IOBase): - return super().create_agent(body=body, content_type=content_type, **kwargs) - return super().create_agent(body=body, **kwargs) - - if toolset is not None: - tools = toolset.definitions - tool_resources = toolset.resources - - new_agent = super().create_agent( - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - if toolset is not None: - self._toolset[new_agent.id] = toolset - return new_agent - - # pylint: disable=arguments-differ - @overload - def update_agent( # pylint: disable=arguments-differ - self, - assistant_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - def update_agent( # pylint: disable=arguments-differ - self, - assistant_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_agent( - self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_agent( - self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def update_agent( - self, - assistant_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - self._validate_tools_and_tool_resources(tools, tool_resources) - - if body is not _Unset: - if isinstance(body, io.IOBase): - return super().update_agent(body=body, content_type=content_type, **kwargs) - return super().update_agent(body=body, **kwargs) - - if toolset is not None: - self._toolset[assistant_id] = toolset - tools = toolset.definitions - tool_resources = toolset.resources - - return super().update_agent( - assistant_id=assistant_id, - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - def _validate_tools_and_tool_resources( - self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources] - ): - if tool_resources is None: - return - if tools is None: - tools = [] - - if tool_resources.file_search is not None and not any( - isinstance(tool, _models.FileSearchToolDefinition) for tool in tools - ): - raise ValueError( - "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" - ) - if tool_resources.code_interpreter is not None and not any( - isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools - ): - raise ValueError( - "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" - ) - - # pylint: disable=arguments-differ - @overload - def create_run( # pylint: disable=arguments-differ - self, - thread_id: str, - *, - assistant_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_run( - self, - thread_id: str, - body: JSON, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_run( - self, - thread_id: str, - body: IO[bytes], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_run( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - elif assistant_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - include=include, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=False, - stream=False, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - return response - - @distributed_trace - def create_and_process_run( - self, - thread_id: str, - *, - assistant_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: int = 1, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread and processes the run. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword model: The overridden model name that the agent should use to run the thread. - Default value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run - the thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] - :keyword toolset: The Collection of tools and resources (alternative to `tools` and - `tool_resources`). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or - ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or - ~azure.ai.projects.models.AgentsApiResponseFormatMode or - ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: The time in seconds to wait between polling the service for run status. - Default value is 1. - :paramtype sleep_interval: int - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - # Create and initiate the run with additional parameters - run = self.create_run( - thread_id=thread_id, - include=include, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=toolset.definitions if toolset else None, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - # Monitor and process the run status - while run.status in [ - RunStatus.QUEUED, - RunStatus.IN_PROGRESS, - RunStatus.REQUIRES_ACTION, - ]: - time.sleep(sleep_interval) - run = self.get_run(thread_id=thread_id, run_id=run.id) - - if run.status == RunStatus.REQUIRES_ACTION and isinstance( - run.required_action, _models.SubmitToolOutputsAction - ): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logging.warning("No tool calls provided - cancelling run") - self.cancel_run(thread_id=thread_id, run_id=run.id) - break - # We need tool set only if we are executing local function. In case if - # the tool is azure_function we just need to wait when it will be finished. - if any(tool_call.type == "function" for tool_call in tool_calls): - toolset = toolset or self._toolset.get(run.assistant_id) - if toolset is not None: - tool_outputs = toolset.execute_tool_calls(tool_calls) - else: - raise ValueError("Toolset is not available in the client.") - - logging.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) - - logging.info("Current run status: %s", run.status) - - return run - - @overload - def create_stream( - self, - thread_id: str, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - assistant_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: None = None, - **kwargs: Any, - ) -> _models.AgentRunStream[_models.AgentEventHandler]: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: None - :paramtype event_handler: None. _models.AgentEventHandler will be applied as default. - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_stream( - self, - thread_id: str, - *, - assistant_id: str, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: _models.BaseAgentEventHandlerT, - **kwargs: Any, - ) -> _models.AgentRunStream[_models.BaseAgentEventHandlerT]: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]], - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - event_handler: None = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.AgentRunStream[_models.AgentEventHandler]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword event_handler: None - :paramtype event_handler: None. _models.AgentEventHandler will be applied as default. - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]], - *, - event_handler: _models.BaseAgentEventHandlerT, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.AgentRunStream[_models.BaseAgentEventHandlerT]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AgentEventHandler - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_stream( # pyright: ignore[reportInconsistentOverload] - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - parallel_tool_calls: Optional[bool] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.BaseAgentEventHandlerT] = None, - **kwargs: Any, - ) -> _models.AgentRunStream[_models.BaseAgentEventHandlerT]: - """Creates a new run for an agent thread. - - Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode - or ~azure.ai.projects.models.AgentsApiResponseFormat - :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. - Default value is None. - :paramtype parallel_tool_calls: bool - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - elif assistant_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - include=include, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=True, - stream=True, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) - - if not event_handler: - event_handler = cast(_models.BaseAgentEventHandlerT, _models.AgentEventHandler()) - return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - # pylint: disable=arguments-differ - @overload - def submit_tool_outputs_to_run( # pylint: disable=arguments-differ - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AgentEventHandler - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, - run_id, - tool_outputs=tool_outputs, - stream_parameter=False, - stream=False, - **kwargs, - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - return response - - @overload - def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]], - *, - event_handler: _models.BaseAgentEventHandler, - content_type: str = "application/json", - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.BaseAgentEventHandler - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: _models.BaseAgentEventHandler, - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.BaseAgentEventHandler - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def submit_tool_outputs_to_stream( # pyright: ignore[reportInconsistentOverload] - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: _models.BaseAgentEventHandler, - **kwargs: Any, - ) -> None: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.BaseAgentEventHandler - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # Cast the response to Iterator[bytes] for type correctness - response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) - - event_handler.initialize(response_iterator, self._handle_submit_tool_outputs) - - def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handler: _models.BaseAgentEventHandler) -> None: - if isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logger.debug("No tool calls to execute.") - return - - # We need tool set only if we are executing local function. In case if - # the tool is azure_function we just need to wait when it will be finished. - if any(tool_call.type == "function" for tool_call in tool_calls): - toolset = self._toolset.get(run.assistant_id) - if toolset: - tool_outputs = toolset.execute_tool_calls(tool_calls) - else: - logger.debug("Toolset is not available in the client.") - return - - logger.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - self.submit_tool_outputs_to_stream( - thread_id=run.thread_id, - run_id=run.id, - tool_outputs=tool_outputs, - event_handler=event_handler, - ) - - # pylint: disable=arguments-differ - @overload - def upload_file( # pylint: disable=arguments-differ - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - def upload_file( # pylint: disable=arguments-differ - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def upload_file( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :type body: Optional[JSON] - :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] - :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - :paramtype purpose: Union[str, _models.FilePurpose, None] - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :keyword filename: The name of the file. - :paramtype filename: Optional[str] - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: _models.OpenAIFile - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - if body is not None: - return super().upload_file(body=body, **kwargs) - - if isinstance(purpose, FilePurpose): - purpose = purpose.value - - if file is not None and purpose is not None: - return super().upload_file(file=file, purpose=purpose, filename=filename, **kwargs) - - if file_path is not None and purpose is not None: - if not os.path.isfile(file_path): - raise FileNotFoundError(f"The file path provided does not exist: {file_path}") - - try: - with open(file_path, "rb") as f: - content = f.read() - - # Determine filename and create correct FileType - base_filename = filename or os.path.basename(file_path) - file_content: FileType = (base_filename, content) - - return super().upload_file(file=file_content, purpose=purpose, **kwargs) - except IOError as e: - raise IOError(f"Unable to read file: {file_path}") from e - - raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") - - @overload - def upload_file_and_poll(self, body: JSON, *, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file_and_poll( - self, - *, - file: FileType, - purpose: Union[str, _models.FilePurpose], - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file_and_poll( - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def upload_file_and_poll( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :type body: Optional[JSON] - :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] - :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :paramtype purpose: Union[str, _models.FilePurpose, None] - :keyword filename: The name of the file. - :paramtype filename: Optional[str] - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: _models.OpenAIFile - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - if body is not None: - uploaded_file = self.upload_file(body=body, **kwargs) - elif file is not None and purpose is not None: - uploaded_file = self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) - elif file_path is not None and purpose is not None: - uploaded_file = self.upload_file(file_path=file_path, purpose=purpose, **kwargs) - else: - raise ValueError( - "Invalid parameters for upload_file_and_poll. Please provide either 'body', " - "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." - ) - - while uploaded_file.status in ["uploaded", "pending", "running"]: - time.sleep(sleep_interval) - uploaded_file = self.get_file(uploaded_file.id) - - return uploaded_file - - @overload - def create_vector_store_and_poll( - self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_and_poll( - self, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_and_poll( - self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store_and_poll( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not _Unset: - if isinstance(body, dict): - vector_store = super().create_vector_store( - body=body, content_type=content_type or "application/json", **kwargs - ) - elif isinstance(body, io.IOBase): - vector_store = super().create_vector_store(body=body, content_type=content_type, **kwargs) - else: - raise ValueError("Invalid 'body' type: must be a dictionary (JSON) or a file-like object (IO[bytes]).") - else: - store_configuration = None - if data_sources: - store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) - - vector_store = super().create_vector_store( - file_ids=file_ids, - store_configuration=store_configuration, - name=name, - expires_after=expires_after, - chunking_strategy=chunking_strategy, - metadata=metadata, - **kwargs, - ) - - while vector_store.status == "in_progress": - time.sleep(sleep_interval) - vector_store = super().get_vector_store(vector_store.id) - - return vector_store - - @overload - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - *, - file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword content_type: Body parameter content-type. Defaults to "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not _Unset: - if isinstance(body, dict): - vector_store_file_batch = super().create_vector_store_file_batch( - vector_store_id=vector_store_id, - body=body, - content_type=content_type or "application/json", - **kwargs, - ) - elif isinstance(body, io.IOBase): - vector_store_file_batch = super().create_vector_store_file_batch( - vector_store_id=vector_store_id, - body=body, - content_type=content_type, - **kwargs, - ) - else: - raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like (IO[bytes]).") - else: - vector_store_file_batch = super().create_vector_store_file_batch( - vector_store_id=vector_store_id, - file_ids=file_ids, - data_sources=data_sources, - chunking_strategy=chunking_strategy, - **kwargs, - ) - - while vector_store_file_batch.status == "in_progress": - time.sleep(sleep_interval) - vector_store_file_batch = super().get_vector_store_file_batch( - vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id - ) - - return vector_store_file_batch - - @distributed_trace - def get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: - """ - Returns file content as byte stream for given file_id. - - :param file_id: The ID of the file to retrieve. Required. - :type file_id: str - :return: An iterator that yields bytes from the file content. - :rtype: Iterator[bytes] - :raises ~azure.core.exceptions.HttpResponseError: If the HTTP request fails. - """ - kwargs["stream"] = True - response = super()._get_file_content(file_id, **kwargs) - return cast(Iterator[bytes], response) - - @distributed_trace - def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: - """ - Synchronously saves file content retrieved using a file identifier to the specified local directory. - - :param file_id: The unique identifier for the file to retrieve. - :type file_id: str - :param file_name: The name of the file to be saved. - :type file_name: str - :param target_dir: The directory where the file should be saved. Defaults to the current working directory. - :type target_dir: Optional[Union[str, Path]] - :raises ValueError: If the target path is not a directory or the file name is invalid. - :raises RuntimeError: If file content retrieval fails or no content is found. - :raises TypeError: If retrieved chunks are not bytes-like objects. - :raises IOError: If writing to the file fails. - """ - try: - # Determine and validate the target directory - path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() - path.mkdir(parents=True, exist_ok=True) - if not path.is_dir(): - raise ValueError(f"The target path '{path}' is not a directory.") - - # Sanitize and validate the file name - sanitized_file_name = Path(file_name).name - if not sanitized_file_name: - raise ValueError("The provided file name is invalid.") - - # Retrieve the file content - file_content_stream = self.get_file_content(file_id) - if not file_content_stream: - raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") - - target_file_path = path / sanitized_file_name - - # Write the file content to disk - with target_file_path.open("wb") as file: - for chunk in file_content_stream: - if isinstance(chunk, (bytes, bytearray)): - file.write(chunk) - else: - raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") - - logger.debug("File '%s' saved successfully at '%s'.", sanitized_file_name, target_file_path) - - except (ValueError, RuntimeError, TypeError, IOError) as e: - logger.error("An error occurred in save_file: %s", e) - raise - - @overload - def create_vector_store_file_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_and_poll( - self, - vector_store_id: str, - *, - content_type: str = "application/json", - file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_id: Identifier of the file. Default value is None. - :paramtype file_id: str - :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store_file_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - content_type: str = "application/json", - file_id: Optional[str] = None, - data_source: Optional[_models.VectorStoreDataSource] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword content_type: Body Parameter content-type. Defaults to 'application/json'. - :paramtype content_type: str - :keyword file_id: Identifier of the file. Default value is None. - :paramtype file_id: str - :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not _Unset: - if isinstance(body, dict): - vector_store_file = super().create_vector_store_file( - vector_store_id=vector_store_id, - body=body, - content_type=content_type or "application/json", - **kwargs, - ) - elif isinstance(body, io.IOBase): - vector_store_file = super().create_vector_store_file( - vector_store_id=vector_store_id, - body=body, - content_type=content_type, - **kwargs, - ) - else: - raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like object (IO[bytes]).") - else: - vector_store_file = super().create_vector_store_file( - vector_store_id=vector_store_id, - file_id=file_id, - data_source=data_source, - chunking_strategy=chunking_strategy, - **kwargs, - ) - - while vector_store_file.status == "in_progress": - time.sleep(sleep_interval) - vector_store_file = super().get_vector_store_file( - vector_store_id=vector_store_id, file_id=vector_store_file.id - ) - - return vector_store_file - - @distributed_trace - def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: - """Deletes an agent. - - :param assistant_id: Identifier of the agent. Required. - :type assistant_id: str - :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - if assistant_id in self._toolset: - del self._toolset[assistant_id] - return super().delete_agent(assistant_id, **kwargs) - - -__all__: List[str] = [ - "AgentsOperations", - "ConnectionsOperations", - "TelemetryOperations", - "InferenceOperations", -] # Add all objects you want publicly available to users at this package level +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py deleted file mode 100644 index a5e9e67bf233..000000000000 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._trace_function import trace_function - -__all__ = [ - "trace_function", -] -__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_trace_function.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_trace_function.py deleted file mode 100644 index 1890a6f1e88d..000000000000 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_trace_function.py +++ /dev/null @@ -1,204 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import functools -import asyncio -from typing import Any, Callable, Optional, Dict - -try: - # pylint: disable = no-name-in-module - from opentelemetry import trace as opentelemetry_trace - - tracer = opentelemetry_trace.get_tracer(__name__) - _tracing_library_available = True -except ModuleNotFoundError: - _tracing_library_available = False - -if _tracing_library_available: - - def trace_function(span_name: Optional[str] = None): - """ - A decorator for tracing function calls using OpenTelemetry. - - This decorator handles various data types for function parameters and return values, - and records them as attributes in the trace span. The supported data types include: - - Basic data types: str, int, float, bool - - Collections: list, dict, tuple, set - - Special handling for collections: - - If a collection (list, dict, tuple, set) contains nested collections, the entire collection - is converted to a string before being recorded as an attribute. - - Sets and dictionaries are always converted to strings to ensure compatibility with span attributes. - - Object types are omitted, and the corresponding parameter is not traced. - - :param span_name: The name of the span. If not provided, the function name is used. - :type span_name: Optional[str] - :return: The decorated function with tracing enabled. - :rtype: Callable - """ - - def decorator(func: Callable) -> Callable: - @functools.wraps(func) - async def async_wrapper(*args: Any, **kwargs: Any) -> Any: - """ - Wrapper function for asynchronous functions. - - :param args: Positional arguments passed to the function. - :type args: Tuple[Any] - :return: The result of the decorated asynchronous function. - :rtype: Any - """ - name = span_name if span_name else func.__name__ - with tracer.start_as_current_span(name) as span: - try: - # Sanitize parameters and set them as attributes - sanitized_params = sanitize_parameters(func, *args, **kwargs) - span.set_attributes(sanitized_params) - result = await func(*args, **kwargs) - sanitized_result = sanitize_for_attributes(result) - if sanitized_result is not None: - if isinstance(sanitized_result, (list, dict, tuple, set)): - if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_result): - sanitized_result = str(sanitized_result) - span.set_attribute("code.function.return.value", sanitized_result) # type: ignore - return result - except Exception as e: - span.record_exception(e) - span.set_attribute("error.type", e.__class__.__qualname__) # type: ignore - raise - - @functools.wraps(func) - def sync_wrapper(*args: Any, **kwargs: Any) -> Any: - """ - Wrapper function for synchronous functions. - - :param args: Positional arguments passed to the function. - :type args: Tuple[Any] - :return: The result of the decorated synchronous function. - :rtype: Any - """ - name = span_name if span_name else func.__name__ - with tracer.start_as_current_span(name) as span: - try: - # Sanitize parameters and set them as attributes - sanitized_params = sanitize_parameters(func, *args, **kwargs) - span.set_attributes(sanitized_params) - result = func(*args, **kwargs) - sanitized_result = sanitize_for_attributes(result) - if sanitized_result is not None: - if isinstance(sanitized_result, (list, dict, tuple, set)): - if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_result): - sanitized_result = str(sanitized_result) - span.set_attribute("code.function.return.value", sanitized_result) # type: ignore - return result - except Exception as e: - span.record_exception(e) - span.set_attribute("error.type", e.__class__.__qualname__) # type: ignore - raise - - # Determine if the function is async - if asyncio.iscoroutinefunction(func): - return async_wrapper - return sync_wrapper - - return decorator - -else: - # Define a no-op decorator if OpenTelemetry is not available - def trace_function(span_name: Optional[str] = None): # pylint: disable=unused-argument - """ - A no-op decorator for tracing function calls when OpenTelemetry is not available. - - :param span_name: Not used in this version. - :type span_name: Optional[str] - :return: The original function. - :rtype: Callable - """ - - def decorator(func: Callable) -> Callable: - return func - - return decorator - - -def sanitize_parameters(func, *args, **kwargs) -> Dict[str, Any]: - """ - Sanitize function parameters to include only built-in data types. - - :param func: The function being decorated. - :type func: Callable - :param args: Positional arguments passed to the function. - :type args: Tuple[Any] - :return: A dictionary of sanitized parameters. - :rtype: Dict[str, Any] - """ - import inspect - - params = inspect.signature(func).parameters - sanitized_params = {} - - for i, (name, param) in enumerate(params.items()): - if param.default == inspect.Parameter.empty and i < len(args): - value = args[i] - else: - value = kwargs.get(name, param.default) - - sanitized_value = sanitize_for_attributes(value) - # Check if the collection has nested collections - if isinstance(sanitized_value, (list, dict, tuple, set)): - if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_value): - sanitized_value = str(sanitized_value) - if sanitized_value is not None: - sanitized_params["code.function.parameter." + name] = sanitized_value - - return sanitized_params - - -# pylint: disable=R0911 -def sanitize_for_attributes(value: Any, is_recursive: bool = False) -> Any: - """ - Sanitize a value to be used as an attribute. - - :param value: The value to sanitize. - :type value: Any - :param is_recursive: Indicates if the function is being called recursively. Default is False. - :type is_recursive: bool - :return: The sanitized value or None if the value is not a supported type. - :rtype: Any - """ - if isinstance(value, (str, int, float, bool)): - return value - if isinstance(value, list): - return [ - sanitize_for_attributes(item, True) - for item in value - if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) - ] - if isinstance(value, dict): - retval = { - k: sanitize_for_attributes(v, True) - for k, v in value.items() - if isinstance(v, (str, int, float, bool, list, dict, tuple, set)) - } - # dict to compatible with span attribute, so return it as a string - if is_recursive: - return retval - return str(retval) - if isinstance(value, tuple): - return tuple( - sanitize_for_attributes(item, True) - for item in value - if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) - ) - if isinstance(value, set): - retval_set = { - sanitize_for_attributes(item, True) - for item in value - if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) - } - if is_recursive: - return retval_set - return str(retval_set) - return None diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/__init__.py deleted file mode 100644 index 34fb7e5f7cd8..000000000000 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._ai_agents_instrumentor import AIAgentsInstrumentor - -__all__ = [ - "AIAgentsInstrumentor", -] diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py deleted file mode 100644 index a0c7bbadd1b8..000000000000 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py +++ /dev/null @@ -1,1793 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import copy -import functools -import importlib -import json -import logging -import os -from enum import Enum -from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast -from urllib.parse import urlparse - -from azure.ai.projects import _types -from azure.ai.projects.models import AgentRunStream, AsyncAgentRunStream, _models -from azure.ai.projects.models._enums import AgentsApiResponseFormatMode, MessageRole, RunStepStatus -from azure.ai.projects.models import ( - MessageAttachment, - MessageDeltaChunk, - MessageIncompleteDetails, - RunStep, - RunStepDeltaChunk, - RunStepFunctionToolCall, - RunStepToolCallDetails, - ThreadMessage, - ThreadRun, - ToolDefinition, - ToolOutput, - ToolResources, -) -from azure.ai.projects.models._patch import AgentEventHandler, AsyncAgentEventHandler, ToolSet -from azure.ai.projects.telemetry.agents._utils import ( - AZ_AI_AGENT_SYSTEM, - ERROR_TYPE, - GEN_AI_AGENT_DESCRIPTION, - GEN_AI_AGENT_ID, - GEN_AI_AGENT_NAME, - GEN_AI_EVENT_CONTENT, - GEN_AI_MESSAGE_ID, - GEN_AI_MESSAGE_STATUS, - GEN_AI_RESPONSE_MODEL, - GEN_AI_SYSTEM, - GEN_AI_SYSTEM_MESSAGE, - GEN_AI_THREAD_ID, - GEN_AI_THREAD_RUN_ID, - GEN_AI_THREAD_RUN_STATUS, - GEN_AI_USAGE_INPUT_TOKENS, - GEN_AI_USAGE_OUTPUT_TOKENS, - OperationName, - start_span, -) -from azure.core import CaseInsensitiveEnumMeta # type: ignore -from azure.core.settings import settings -from azure.core.tracing import AbstractSpan - -_Unset: Any = object() - -try: - # pylint: disable = no-name-in-module - from opentelemetry.trace import Span, StatusCode - - _tracing_library_available = True -except ModuleNotFoundError: - _tracing_library_available = False - - -__all__ = [ - "AIAgentsInstrumentor", -] - - -_agents_traces_enabled: bool = False -_trace_agents_content: bool = False - - -class TraceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): # pylint: disable=C4747 - """An enumeration class to represent different types of traces.""" - - AGENTS = "Agents" - - -class AIAgentsInstrumentor: - """ - A class for managing the trace instrumentation of AI Agents. - - This class allows enabling or disabling tracing for AI Agents. - and provides functionality to check whether instrumentation is active. - - """ - - def __init__(self): - if not _tracing_library_available: - raise ModuleNotFoundError( - "Azure Core Tracing Opentelemetry is not installed. " - "Please install it using 'pip install azure-core-tracing-opentelemetry'" - ) - # In the future we could support different versions from the same library - # and have a parameter that specifies the version to use. - self._impl = _AIAgentsInstrumentorPreview() - - def instrument(self, enable_content_recording: Optional[bool] = None) -> None: - """ - Enable trace instrumentation for AI Agents. - - :param enable_content_recording: Whether content recording is enabled as part - of the traces or not. Content in this context refers to chat message content - and function call tool related function names, function parameter names and - values. True will enable content recording, False will disable it. If no value - is provided, then the value read from environment variable - AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED is used. If the environment variable - is not found, then the value will default to False. Please note that successive calls - to instrument will always apply the content recording value provided with the most - recent call to instrument (including applying the environment variable if no value is - provided and defaulting to false if the environment variable is not found), even if - instrument was already previously called without uninstrument being called in between - the instrument calls. - :type enable_content_recording: bool, optional - - """ - self._impl.instrument(enable_content_recording) - - def uninstrument(self) -> None: - """ - Remove trace instrumentation for AI Agents. - - This method removes any active instrumentation, stopping the tracing - of AI Agents. - """ - self._impl.uninstrument() - - def is_instrumented(self) -> bool: - """ - Check if trace instrumentation for AI Agents is currently enabled. - - :return: True if instrumentation is active, False otherwise. - :rtype: bool - """ - return self._impl.is_instrumented() - - def is_content_recording_enabled(self) -> bool: - """This function gets the content recording value. - - :return: A bool value indicating whether content recording is enabled. - :rtype: bool - """ - return self._impl.is_content_recording_enabled() - - -class _AIAgentsInstrumentorPreview: - # pylint: disable=R0904 - """ - A class for managing the trace instrumentation of AI Agents. - - This class allows enabling or disabling tracing for AI Agents. - and provides functionality to check whether instrumentation is active. - """ - - def _str_to_bool(self, s): - if s is None: - return False - return str(s).lower() == "true" - - def instrument(self, enable_content_recording: Optional[bool] = None): - """ - Enable trace instrumentation for AI Agents. - - :param enable_content_recording: Whether content recording is enabled as part - of the traces or not. Content in this context refers to chat message content - and function call tool related function names, function parameter names and - values. True will enable content recording, False will disable it. If no value - is provided, then the value read from environment variable - AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED is used. If the environment variable - is not found, then the value will default to False. - - :type enable_content_recording: bool, optional - """ - if enable_content_recording is None: - var_value = os.environ.get("AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED") - enable_content_recording = self._str_to_bool(var_value) - if not self.is_instrumented(): - self._instrument_agents(enable_content_recording) - else: - self._set_enable_content_recording(enable_content_recording=enable_content_recording) - - def uninstrument(self): - """ - Disable trace instrumentation for AI Agents. - - This method removes any active instrumentation, stopping the tracing - of AI Agents. - """ - if self.is_instrumented(): - self._uninstrument_agents() - - def is_instrumented(self): - """ - Check if trace instrumentation for AI Agents is currently enabled. - - :return: True if instrumentation is active, False otherwise. - :rtype: bool - """ - return self._is_instrumented() - - def set_enable_content_recording(self, enable_content_recording: bool = False) -> None: - """This function sets the content recording value. - - :param enable_content_recording: Indicates whether tracing of message content should be enabled. - This also controls whether function call tool function names, - parameter names and parameter values are traced. - :type enable_content_recording: bool - """ - self._set_enable_content_recording(enable_content_recording=enable_content_recording) - - def is_content_recording_enabled(self) -> bool: - """This function gets the content recording value. - - :return: A bool value indicating whether content tracing is enabled. - :rtype bool - """ - return self._is_content_recording_enabled() - - def _set_attributes(self, span: "AbstractSpan", *attrs: Tuple[str, Any]) -> None: - for attr in attrs: - key, value = attr - if value is not None: - span.add_attribute(key, value) - - def _parse_url(self, url): - parsed = urlparse(url) - server_address = parsed.hostname - port = parsed.port - return server_address, port - - def _remove_function_call_names_and_arguments(self, tool_calls: list) -> list: - tool_calls_copy = copy.deepcopy(tool_calls) - for tool_call in tool_calls_copy: - if "function" in tool_call: - if "name" in tool_call["function"]: - del tool_call["function"]["name"] - if "arguments" in tool_call["function"]: - del tool_call["function"]["arguments"] - if not tool_call["function"]: - del tool_call["function"] - return tool_calls_copy - - def _create_event_attributes( - self, - thread_id: Optional[str] = None, - agent_id: Optional[str] = None, - thread_run_id: Optional[str] = None, - message_id: Optional[str] = None, - message_status: Optional[str] = None, - usage: Optional[_models.RunStepCompletionUsage] = None, - ) -> Dict[str, Any]: - attrs: Dict[str, Any] = {GEN_AI_SYSTEM: AZ_AI_AGENT_SYSTEM} - if thread_id: - attrs[GEN_AI_THREAD_ID] = thread_id - - if agent_id: - attrs[GEN_AI_AGENT_ID] = agent_id - - if thread_run_id: - attrs[GEN_AI_THREAD_RUN_ID] = thread_run_id - - if message_id: - attrs[GEN_AI_MESSAGE_ID] = message_id - - if message_status: - attrs[GEN_AI_MESSAGE_STATUS] = self._status_to_string(message_status) - - if usage: - attrs[GEN_AI_USAGE_INPUT_TOKENS] = usage.prompt_tokens - attrs[GEN_AI_USAGE_OUTPUT_TOKENS] = usage.completion_tokens - - return attrs - - def add_thread_message_event( - self, span, message: ThreadMessage, usage: Optional[_models.RunStepCompletionUsage] = None - ) -> None: - content_body = {} - if _trace_agents_content: - for content in message.content: - typed_content = content.get(content.type, None) - if typed_content: - content_details = {"value": self._get_field(typed_content, "value")} - annotations = self._get_field(typed_content, "annotations") - if annotations: - content_details["annotations"] = [a.as_dict() for a in annotations] - content_body[content.type] = content_details - - self._add_message_event( - span, - self._get_role(message.role), - content_body, - attachments=message.attachments, - thread_id=message.thread_id, - agent_id=message.assistant_id, - message_id=message.id, - thread_run_id=message.run_id, - message_status=message.status, - incomplete_details=message.incomplete_details, - usage=usage, - ) - - def _add_message_event( - self, - span, - role: str, - content: Any, - attachments: Any = None, # Optional[List[MessageAttachment]] or dict - thread_id: Optional[str] = None, - agent_id: Optional[str] = None, - message_id: Optional[str] = None, - thread_run_id: Optional[str] = None, - message_status: Optional[str] = None, - incomplete_details: Optional[MessageIncompleteDetails] = None, - usage: Optional[_models.RunStepCompletionUsage] = None, - ) -> None: - # TODO document new fields - - event_body = {} - if _trace_agents_content: - event_body["content"] = content - if attachments: - event_body["attachments"] = [] - for attachment in attachments: - attachment_body = {"id": attachment.file_id} - if attachment.tools: - attachment_body["tools"] = [self._get_field(tool, "type") for tool in attachment.tools] - event_body["attachments"].append(attachment_body) - - if incomplete_details: - event_body["incomplete_details"] = incomplete_details - event_body["role"] = role - - attributes = self._create_event_attributes( - thread_id=thread_id, - agent_id=agent_id, - thread_run_id=thread_run_id, - message_id=message_id, - message_status=message_status, - usage=usage, - ) - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_body) - span.span_instance.add_event(name=f"gen_ai.{role}.message", attributes=attributes) - - def _get_field(self, obj: Any, field: str) -> Any: - if not obj: - return None - - if isinstance(obj, dict): - return obj.get(field, None) - - return getattr(obj, field, None) - - def _add_instructions_event( - self, - span: "AbstractSpan", - instructions: Optional[str], - additional_instructions: Optional[str], - agent_id: Optional[str] = None, - thread_id: Optional[str] = None, - ) -> None: - if not instructions: - return - - event_body: Dict[str, Any] = {} - if _trace_agents_content and (instructions or additional_instructions): - if instructions and additional_instructions: - event_body["content"] = f"{instructions} {additional_instructions}" - else: - event_body["content"] = instructions or additional_instructions - - attributes = self._create_event_attributes(agent_id=agent_id, thread_id=thread_id) - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_body) - span.span_instance.add_event(name=GEN_AI_SYSTEM_MESSAGE, attributes=attributes) - - def _get_role(self, role: Optional[Union[str, MessageRole]]) -> str: - if role is None or role is _Unset: - return "user" - - if isinstance(role, MessageRole): - return role.value - - return role - - def _status_to_string(self, status: Any) -> str: - return status.value if hasattr(status, "value") else status - - def _add_tool_assistant_message_event(self, span, step: RunStep) -> None: - # do we want a new event for it ? - tool_calls = [ - { - "id": t.id, - "type": t.type, - "function": ( - {"name": t.function.name, "arguments": json.loads(t.function.arguments)} - if isinstance(t, RunStepFunctionToolCall) - else None - ), - } - for t in cast(RunStepToolCallDetails, step.step_details).tool_calls - ] - - attributes = self._create_event_attributes( - thread_id=step.thread_id, - agent_id=step.assistant_id, - thread_run_id=step.run_id, - message_status=step.status, - usage=step.usage, - ) - - if _trace_agents_content: - attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls}) - else: - tool_calls_non_recording = self._remove_function_call_names_and_arguments(tool_calls=tool_calls) - attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls_non_recording}) - span.span_instance.add_event(name="gen_ai.assistant.message", attributes=attributes) - - def set_end_run(self, span: "AbstractSpan", run: Optional[ThreadRun]) -> None: - if run and span and span.span_instance.is_recording: - span.add_attribute(GEN_AI_THREAD_RUN_STATUS, self._status_to_string(run.status)) - span.add_attribute(GEN_AI_RESPONSE_MODEL, run.model) - if run and run.usage: - span.add_attribute(GEN_AI_USAGE_INPUT_TOKENS, run.usage.prompt_tokens) - span.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, run.usage.completion_tokens) - - @staticmethod - def agent_api_response_to_str(response_format: Any) -> Optional[str]: - """ - Convert response_format to string. - - :param response_format: The response format. - :type response_format: ~azure.ai.projects._types.AgentsApiResponseFormatOption - :returns: string for the response_format. - :rtype: Optional[str] - :raises: Value error if response_format is not of type AgentsApiResponseFormatOption. - """ - if isinstance(response_format, str) or response_format is None: - return response_format - if isinstance(response_format, AgentsApiResponseFormatMode): - return response_format.value - if isinstance(response_format, _models.AgentsApiResponseFormat): - return response_format.type - if isinstance(response_format, _models.ResponseFormatJsonSchemaType): - return response_format.type - raise ValueError(f"Unknown response format {type(response_format)}") - - def start_thread_run_span( - self, - operation_name: OperationName, - project_name: str, - thread_id: Optional[str] = None, - agent_id: Optional[str] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[ThreadMessage]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - _tools: Optional[List[ToolDefinition]] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - ) -> "Optional[AbstractSpan]": - span = start_span( - operation_name, - project_name, - thread_id=thread_id, - agent_id=agent_id, - model=model, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=_AIAgentsInstrumentorPreview.agent_api_response_to_str(response_format), - ) - if span and span.span_instance.is_recording and instructions and additional_instructions: - self._add_instructions_event( - span, instructions, additional_instructions, thread_id=thread_id, agent_id=agent_id - ) - - if additional_messages: - for message in additional_messages: - self.add_thread_message_event(span, message) - return span - - def start_submit_tool_outputs_span( - self, - project_name: str, - thread_id: Optional[str] = None, - run_id: Optional[str] = None, - tool_outputs: Optional[List[ToolOutput]] = None, - event_handler: Optional[Union[AgentEventHandler, AsyncAgentEventHandler]] = None, - ) -> "Optional[AbstractSpan]": - run_span = event_handler.span if isinstance(event_handler, _AgentEventHandlerTraceWrapper) else None - if run_span is None: - run_span = event_handler.span if isinstance(event_handler, _AsyncAgentEventHandlerTraceWrapper) else None - - if run_span: - recorded = self._add_tool_message_events(run_span, tool_outputs) - else: - recorded = False - - span = start_span(OperationName.SUBMIT_TOOL_OUTPUTS, project_name, thread_id=thread_id, run_id=run_id) - if not recorded: - self._add_tool_message_events(span, tool_outputs) - return span - - def _add_tool_message_events( - self, span: "Optional[AbstractSpan]", tool_outputs: Optional[List[ToolOutput]] - ) -> bool: - if span and span.span_instance.is_recording and tool_outputs: - for tool_output in tool_outputs: - if _trace_agents_content: - body = {"content": tool_output["output"], "id": tool_output["tool_call_id"]} - else: - body = {"content": "", "id": tool_output["tool_call_id"]} - span.span_instance.add_event("gen_ai.tool.message", {"gen_ai.event.content": json.dumps(body)}) - return True - - return False - - def start_create_agent_span( - self, - project_name: str, - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - _tools: Optional[List[ToolDefinition]] = None, - _tool_resources: Optional[ToolResources] = None, - _toolset: Optional[ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - ) -> "Optional[AbstractSpan]": - span = start_span( - OperationName.CREATE_AGENT, - project_name, - span_name=f"{OperationName.CREATE_AGENT.value} {name}", - model=model, - temperature=temperature, - top_p=top_p, - response_format=_AIAgentsInstrumentorPreview.agent_api_response_to_str(response_format), - ) - if span and span.span_instance.is_recording: - if name: - span.add_attribute(GEN_AI_AGENT_NAME, name) - if description: - span.add_attribute(GEN_AI_AGENT_DESCRIPTION, description) - self._add_instructions_event(span, instructions, None) - - return span - - def start_create_thread_span( - self, - project_name: str, - messages: Optional[List[ThreadMessage]] = None, - _tool_resources: Optional[ToolResources] = None, - ) -> "Optional[AbstractSpan]": - span = start_span(OperationName.CREATE_THREAD, project_name) - if span and span.span_instance.is_recording: - for message in messages or []: - self.add_thread_message_event(span, message) - - return span - - def start_list_messages_span(self, project_name: str, thread_id: Optional[str] = None) -> "Optional[AbstractSpan]": - return start_span(OperationName.LIST_MESSAGES, project_name, thread_id=thread_id) - - def trace_create_agent(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - name = kwargs.get("name") - model = kwargs.get("model") - description = kwargs.get("description") - instructions = kwargs.get("instructions") - tools = kwargs.get("tools") - tool_resources = kwargs.get("tool_resources") - toolset = kwargs.get("toolset") - temperature = kwargs.get("temperature") - top_p = kwargs.get("top_p") - response_format = kwargs.get("response_format") - - span = self.start_create_agent_span( - project_name=project_name, - name=name, - model=model, - description=description, - instructions=instructions, - _tools=tools, - _tool_resources=tool_resources, - _toolset=toolset, - temperature=temperature, - top_p=top_p, - response_format=response_format, - ) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - span.add_attribute(GEN_AI_AGENT_ID, result.id) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_create_agent_async(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - name = kwargs.get("name") - model = kwargs.get("model") - description = kwargs.get("description") - instructions = kwargs.get("instructions") - tools = kwargs.get("tools") - tool_resources = kwargs.get("tool_resources") - toolset = kwargs.get("toolset") - temperature = kwargs.get("temperature") - top_p = kwargs.get("top_p") - response_format = kwargs.get("response_format") - - span = self.start_create_agent_span( - project_name=project_name, - name=name, - model=model, - description=description, - instructions=instructions, - _tools=tools, - _tool_resources=tool_resources, - _toolset=toolset, - temperature=temperature, - top_p=top_p, - response_format=response_format, - ) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - span.add_attribute(GEN_AI_AGENT_ID, result.id) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def trace_create_thread(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - messages = kwargs.get("messages") - - span = self.start_create_thread_span(project_name=project_name, messages=messages) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_create_thread_async(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - messages = kwargs.get("messages") - - span = self.start_create_thread_span(project_name=project_name, messages=messages) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def trace_create_message(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - role = kwargs.get("role") - content = kwargs.get("content") - attachments = kwargs.get("attachments") - - span = self.start_create_message_span( - project_name=project_name, thread_id=thread_id, content=content, role=role, attachments=attachments - ) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_create_message_async(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - role = kwargs.get("role") - content = kwargs.get("content") - attachments = kwargs.get("attachments") - - span = self.start_create_message_span( - project_name=project_name, thread_id=thread_id, content=content, role=role, attachments=attachments - ) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def trace_create_run(self, operation_name, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - assistant_id = kwargs.get("assistant_id") - model = kwargs.get("model") - instructions = kwargs.get("instructions") - additional_instructions = kwargs.get("additional_instructions") - additional_messages = kwargs.get("additional_messages") - temperature = kwargs.get("temperature") - tools = kwargs.get("tools") - top_p = kwargs.get("top_p") - max_prompt_tokens = kwargs.get("max_prompt_tokens") - max_completion_tokens = kwargs.get("max_completion_tokens") - response_format = kwargs.get("response_format") - - span = self.start_thread_run_span( - operation_name, - project_name, - thread_id, - assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - temperature=temperature, - _tools=tools, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=response_format, - ) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - self.set_end_run(span, result) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_create_run_async(self, operation_name, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - assistant_id = kwargs.get("assistant_id") - model = kwargs.get("model") - instructions = kwargs.get("instructions") - additional_instructions = kwargs.get("additional_instructions") - additional_messages = kwargs.get("additional_messages") - temperature = kwargs.get("temperature") - tools = kwargs.get("tools") - top_p = kwargs.get("top_p") - max_prompt_tokens = kwargs.get("max_prompt_tokens") - max_completion_tokens = kwargs.get("max_completion_tokens") - response_format = kwargs.get("response_format") - - span = self.start_thread_run_span( - operation_name, - project_name, - thread_id, - assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - temperature=temperature, - _tools=tools, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=response_format, - ) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - if span.span_instance.is_recording: - span.add_attribute(GEN_AI_THREAD_RUN_STATUS, self._status_to_string(result.status)) - span.add_attribute(GEN_AI_RESPONSE_MODEL, result.model) - if result.usage: - span.add_attribute(GEN_AI_USAGE_INPUT_TOKENS, result.usage.prompt_tokens) - span.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, result.usage.completion_tokens) - span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def trace_submit_tool_outputs(self, stream, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - run_id = kwargs.get("run_id") - tool_outputs = kwargs.get("tool_outputs") - event_handler = kwargs.get("event_handler") - - span = self.start_submit_tool_outputs_span( - project_name=project_name, - thread_id=thread_id, - run_id=run_id, - tool_outputs=tool_outputs, - event_handler=event_handler, - ) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - if stream and event_handler: - kwargs["event_handler"] = self.wrap_handler(event_handler, span) - - result = function(*args, **kwargs) - if not isinstance(result, AgentRunStream): - self.set_end_run(span, result) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_submit_tool_outputs_async(self, stream, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - run_id = kwargs.get("run_id") - tool_outputs = kwargs.get("tool_outputs") - event_handler = kwargs.get("event_handler") - - span = self.start_submit_tool_outputs_span( - project_name=project_name, - thread_id=thread_id, - run_id=run_id, - tool_outputs=tool_outputs, - event_handler=event_handler, - ) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - if stream: - kwargs["event_handler"] = self.wrap_async_handler(event_handler, span) - - result = await function(*args, **kwargs) - if not isinstance(result, AsyncAgentRunStream): - self.set_end_run(span, result) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def trace_handle_submit_tool_outputs(self, function, *args, **kwargs): - event_handler = kwargs.get("event_handler") - if event_handler is None: - event_handler = args[2] - span = getattr(event_handler, "span", None) - - if span is None: - return function(*args, **kwargs) - - with span.change_context(span.span_instance): - try: - result = function(*args, **kwargs) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_handle_submit_tool_outputs_async(self, function, *args, **kwargs): - event_handler = kwargs.get("event_handler") - if event_handler is None: - event_handler = args[2] - span = getattr(event_handler, "span", None) - - if span is None: - return await function(*args, **kwargs) - - with span.change_context(span.span_instance): - try: - result = await function(*args, **kwargs) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def trace_create_stream(self, function, *args, **kwargs): - operation_name = OperationName.PROCESS_THREAD_RUN - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - assistant_id = kwargs.get("assistant_id") - model = kwargs.get("model") - instructions = kwargs.get("instructions") - additional_instructions = kwargs.get("additional_instructions") - additional_messages = kwargs.get("additional_messages") - temperature = kwargs.get("temperature") - tools = kwargs.get("tools") - top_p = kwargs.get("top_p") - max_prompt_tokens = kwargs.get("max_prompt_tokens") - max_completion_tokens = kwargs.get("max_completion_tokens") - response_format = kwargs.get("response_format") - event_handler = kwargs.get("event_handler") - - span = self.start_thread_run_span( - operation_name, - project_name, - thread_id, - assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - temperature=temperature, - _tools=tools, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=response_format, - ) - - if span is None: - return function(*args, **kwargs) - - # TODO: how to keep span active in the current context without existing? - # TODO: dummy span for none - with span.change_context(span.span_instance): - try: - kwargs["event_handler"] = self.wrap_handler(event_handler, span) - result = function(*args, **kwargs) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_create_stream_async(self, function, *args, **kwargs): - operation_name = OperationName.PROCESS_THREAD_RUN - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - assistant_id = kwargs.get("assistant_id") - model = kwargs.get("model") - instructions = kwargs.get("instructions") - additional_instructions = kwargs.get("additional_instructions") - additional_messages = kwargs.get("additional_messages") - temperature = kwargs.get("temperature") - tools = kwargs.get("tools") - top_p = kwargs.get("top_p") - max_prompt_tokens = kwargs.get("max_prompt_tokens") - max_completion_tokens = kwargs.get("max_completion_tokens") - response_format = kwargs.get("response_format") - event_handler = kwargs.get("event_handler") - - span = self.start_thread_run_span( - operation_name, - project_name, - thread_id, - assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - temperature=temperature, - _tools=tools, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - response_format=response_format, - ) - - if span is None: - return await function(*args, **kwargs) - - # TODO: how to keep span active in the current context without existing? - # TODO: dummy span for none - with span.change_context(span.span_instance): - try: - kwargs["event_handler"] = self.wrap_async_handler(event_handler, span) - result = await function(*args, **kwargs) - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def trace_list_messages(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - - span = self.start_list_messages_span(project_name=project_name, thread_id=thread_id) - - if span is None: - return function(*args, **kwargs) - - with span: - try: - result = function(*args, **kwargs) - for message in result.data: - self.add_thread_message_event(span, message) - - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - async def trace_list_messages_async(self, function, *args, **kwargs): - project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - 0 - ]._config.project_name - thread_id = kwargs.get("thread_id") - - span = self.start_list_messages_span(project_name=project_name, thread_id=thread_id) - - if span is None: - return await function(*args, **kwargs) - - with span: - try: - result = await function(*args, **kwargs) - for message in result.data: - self.add_thread_message_event(span, message) - - except Exception as exc: - # Set the span status to error - if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] - span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - description=str(exc), - ) - module = getattr(exc, "__module__", "") - module = module if module != "builtins" else "" - error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ - self._set_attributes(span, ("error.type", error_type)) - raise - - return result - - def handle_run_stream_exit(self, _function, *args, **kwargs): - agent_run_stream = args[0] - exc_type = kwargs.get("exc_type") - exc_val = kwargs.get("exc_val") - exc_tb = kwargs.get("exc_tb") - # TODO: is it a good idea? - # if not, we'll need to wrap stream and call exit - if ( - agent_run_stream.event_handler - and agent_run_stream.event_handler.__class__.__name__ == "_AgentEventHandlerTraceWrapper" - ): - agent_run_stream.event_handler.__exit__(exc_type, exc_val, exc_tb) - elif ( - agent_run_stream.event_handler - and agent_run_stream.event_handler.__class__.__name__ == "_AsyncAgentEventHandlerTraceWrapper" - ): - agent_run_stream.event_handler.__aexit__(exc_type, exc_val, exc_tb) - - def wrap_handler( - self, handler: "Optional[AgentEventHandler]" = None, span: "Optional[AbstractSpan]" = None - ) -> "Optional[AgentEventHandler]": - if isinstance(handler, _AgentEventHandlerTraceWrapper): - return handler - - if span and span.span_instance.is_recording: - return _AgentEventHandlerTraceWrapper(self, span, handler) - - return handler - - def wrap_async_handler( - self, handler: "Optional[AsyncAgentEventHandler]" = None, span: "Optional[AbstractSpan]" = None - ) -> "Optional[AsyncAgentEventHandler]": - if isinstance(handler, _AsyncAgentEventHandlerTraceWrapper): - return handler - - if span and span.span_instance.is_recording: - return _AsyncAgentEventHandlerTraceWrapper(self, span, handler) - - return handler - - def start_create_message_span( - self, - project_name: str, - thread_id: Optional[str] = None, - content: Optional[str] = None, - role: Optional[Union[str, MessageRole]] = None, - attachments: Optional[List[MessageAttachment]] = None, - ) -> "Optional[AbstractSpan]": - role_str = self._get_role(role) - span = start_span(OperationName.CREATE_MESSAGE, project_name, thread_id=thread_id) - if span and span.span_instance.is_recording: - self._add_message_event(span, role_str, content, attachments=attachments, thread_id=thread_id) - return span - - def _trace_sync_function( - self, - function: Callable, - *, - _args_to_ignore: Optional[List[str]] = None, - _trace_type=TraceType.AGENTS, - _name: Optional[str] = None, - ) -> Callable: - """ - Decorator that adds tracing to a synchronous function. - - :param function: The function to be traced. - :type function: Callable - :param args_to_ignore: A list of argument names to be ignored in the trace. - Defaults to None. - :type: args_to_ignore: [List[str]], optional - :param trace_type: The type of the trace. Defaults to TraceType.AGENTS. - :type trace_type: TraceType, optional - :param name: The name of the trace, will set to func name if not provided. - :type name: str, optional - :return: The traced function. - :rtype: Callable - """ - - @functools.wraps(function) - def inner(*args, **kwargs): # pylint: disable=R0911 - span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 - if span_impl_type is None: - return function(*args, **kwargs) - - class_function_name = function.__qualname__ - - if class_function_name.startswith("AgentsOperations.create_agent"): - return self.trace_create_agent(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_thread"): - return self.trace_create_thread(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_message"): - return self.trace_create_message(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_run"): - return self.trace_create_run(OperationName.START_THREAD_RUN, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_and_process_run"): - return self.trace_create_run(OperationName.PROCESS_THREAD_RUN, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_run"): - return self.trace_submit_tool_outputs(False, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_stream"): - return self.trace_submit_tool_outputs(True, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations._handle_submit_tool_outputs"): - return self.trace_handle_submit_tool_outputs(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_stream"): - return self.trace_create_stream(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.list_messages"): - return self.trace_list_messages(function, *args, **kwargs) - if class_function_name.startswith("AgentRunStream.__exit__"): - return self.handle_run_stream_exit(function, *args, **kwargs) - # Handle the default case (if the function name does not match) - return None # Ensure all paths return - - return inner - - def _trace_async_function( - self, - function: Callable, - *, - _args_to_ignore: Optional[List[str]] = None, - _trace_type=TraceType.AGENTS, - _name: Optional[str] = None, - ) -> Callable: - """ - Decorator that adds tracing to an asynchronous function. - - :param function: The function to be traced. - :type function: Callable - :param args_to_ignore: A list of argument names to be ignored in the trace. - Defaults to None. - :type: args_to_ignore: [List[str]], optional - :param trace_type: The type of the trace. Defaults to TraceType.AGENTS. - :type trace_type: TraceType, optional - :param name: The name of the trace, will set to func name if not provided. - :type name: str, optional - :return: The traced function. - :rtype: Callable - """ - - @functools.wraps(function) - async def inner(*args, **kwargs): # pylint: disable=R0911 - span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 - if span_impl_type is None: - return function(*args, **kwargs) - - class_function_name = function.__qualname__ - - if class_function_name.startswith("AgentsOperations.create_agent"): - return await self.trace_create_agent_async(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_thread"): - return await self.trace_create_thread_async(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_message"): - return await self.trace_create_message_async(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_run"): - return await self.trace_create_run_async(OperationName.START_THREAD_RUN, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_and_process_run"): - return await self.trace_create_run_async(OperationName.PROCESS_THREAD_RUN, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_run"): - return await self.trace_submit_tool_outputs_async(False, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_stream"): - return await self.trace_submit_tool_outputs_async(True, function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations._handle_submit_tool_outputs"): - return await self.trace_handle_submit_tool_outputs_async(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.create_stream"): - return await self.trace_create_stream_async(function, *args, **kwargs) - if class_function_name.startswith("AgentsOperations.list_messages"): - return await self.trace_list_messages_async(function, *args, **kwargs) - if class_function_name.startswith("AsyncAgentRunStream.__aexit__"): - return self.handle_run_stream_exit(function, *args, **kwargs) - # Handle the default case (if the function name does not match) - return None # Ensure all paths return - - return inner - - def _inject_async(self, f, _trace_type, _name): - wrapper_fun = self._trace_async_function(f) - wrapper_fun._original = f # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - return wrapper_fun - - def _inject_sync(self, f, _trace_type, _name): - wrapper_fun = self._trace_sync_function(f) - wrapper_fun._original = f # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - return wrapper_fun - - def _agents_apis(self): - sync_apis = ( - ("azure.ai.projects.operations", "AgentsOperations", "create_agent", TraceType.AGENTS, "agent_create"), - ("azure.ai.projects.operations", "AgentsOperations", "create_thread", TraceType.AGENTS, "thread_create"), - ("azure.ai.projects.operations", "AgentsOperations", "create_message", TraceType.AGENTS, "message_create"), - ("azure.ai.projects.operations", "AgentsOperations", "create_run", TraceType.AGENTS, "create_run"), - ( - "azure.ai.projects.operations", - "AgentsOperations", - "create_and_process_run", - TraceType.AGENTS, - "create_and_process_run", - ), - ( - "azure.ai.projects.operations", - "AgentsOperations", - "submit_tool_outputs_to_run", - TraceType.AGENTS, - "submit_tool_outputs_to_run", - ), - ( - "azure.ai.projects.operations", - "AgentsOperations", - "submit_tool_outputs_to_stream", - TraceType.AGENTS, - "submit_tool_outputs_to_stream", - ), - ( - "azure.ai.projects.operations", - "AgentsOperations", - "_handle_submit_tool_outputs", - TraceType.AGENTS, - "_handle_submit_tool_outputs", - ), - ("azure.ai.projects.operations", "AgentsOperations", "create_stream", TraceType.AGENTS, "create_stream"), - ("azure.ai.projects.operations", "AgentsOperations", "list_messages", TraceType.AGENTS, "list_messages"), - ("azure.ai.projects.models", "AgentRunStream", "__exit__", TraceType.AGENTS, "__exit__"), - ) - async_apis = ( - ("azure.ai.projects.aio.operations", "AgentsOperations", "create_agent", TraceType.AGENTS, "agent_create"), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "create_thread", - TraceType.AGENTS, - "agents_thread_create", - ), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "create_message", - TraceType.AGENTS, - "agents_thread_message", - ), - ("azure.ai.projects.aio.operations", "AgentsOperations", "create_run", TraceType.AGENTS, "create_run"), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "create_and_process_run", - TraceType.AGENTS, - "create_and_process_run", - ), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "submit_tool_outputs_to_run", - TraceType.AGENTS, - "submit_tool_outputs_to_run", - ), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "submit_tool_outputs_to_stream", - TraceType.AGENTS, - "submit_tool_outputs_to_stream", - ), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "_handle_submit_tool_outputs", - TraceType.AGENTS, - "_handle_submit_tool_outputs", - ), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "create_stream", - TraceType.AGENTS, - "create_stream", - ), - ( - "azure.ai.projects.aio.operations", - "AgentsOperations", - "list_messages", - TraceType.AGENTS, - "list_messages", - ), - ("azure.ai.projects.models", "AsyncAgentRunStream", "__aexit__", TraceType.AGENTS, "__aexit__"), - ) - return sync_apis, async_apis - - def _agents_api_list(self): - sync_apis, async_apis = self._agents_apis() - yield sync_apis, self._inject_sync - yield async_apis, self._inject_async - - def _generate_api_and_injector(self, apis): - for api, injector in apis: - for module_name, class_name, method_name, trace_type, name in api: - try: - module = importlib.import_module(module_name) - api = getattr(module, class_name) - if hasattr(api, method_name): - yield api, method_name, trace_type, injector, name - except AttributeError as e: - # Log the attribute exception with the missing class information - logging.warning( - "AttributeError: The module '%s' does not have the class '%s'. %s", - module_name, - class_name, - str(e), - ) - except Exception as e: # pylint: disable=broad-except - # Log other exceptions as a warning, as we're not sure what they might be - logging.warning("An unexpected error occurred: '%s'", str(e)) - - def _available_agents_apis_and_injectors(self): - """ - Generates a sequence of tuples containing Agents API classes, method names, and - corresponding injector functions. - - :return: A generator yielding tuples. - :rtype: tuple - """ - yield from self._generate_api_and_injector(self._agents_api_list()) - - def _instrument_agents(self, enable_content_tracing: bool = False): - """This function modifies the methods of the Agents API classes to - inject logic before calling the original methods. - The original methods are stored as _original attributes of the methods. - - :param enable_content_tracing: Indicates whether tracing of message content should be enabled. - This also controls whether function call tool function names, - parameter names and parameter values are traced. - :type enable_content_tracing: bool - """ - # pylint: disable=W0603 - global _agents_traces_enabled - global _trace_agents_content - if _agents_traces_enabled: - raise RuntimeError("Traces already started for AI Agents") - _agents_traces_enabled = True - _trace_agents_content = enable_content_tracing - for ( - api, - method, - trace_type, - injector, - name, - ) in self._available_agents_apis_and_injectors(): - # Check if the method of the api class has already been modified - if not hasattr(getattr(api, method), "_original"): - setattr(api, method, injector(getattr(api, method), trace_type, name)) - - def _uninstrument_agents(self): - """This function restores the original methods of the Agents API classes - by assigning them back from the _original attributes of the modified methods. - """ - # pylint: disable=W0603 - global _agents_traces_enabled - global _trace_agents_content - _trace_agents_content = False - for api, method, _, _, _ in self._available_agents_apis_and_injectors(): - if hasattr(getattr(api, method), "_original"): - setattr(api, method, getattr(getattr(api, method), "_original")) - _agents_traces_enabled = False - - def _is_instrumented(self): - """This function returns True if Agents API has already been instrumented - for tracing and False if it has not been instrumented. - - :return: A value indicating whether the Agents API is currently instrumented or not. - :rtype: bool - """ - return _agents_traces_enabled - - def _set_enable_content_recording(self, enable_content_recording: bool = False) -> None: - """This function sets the content recording value. - - :param enable_content_recording: Indicates whether tracing of message content should be enabled. - This also controls whether function call tool function names, - parameter names and parameter values are traced. - :type enable_content_recording: bool - """ - global _trace_agents_content # pylint: disable=W0603 - _trace_agents_content = enable_content_recording - - def _is_content_recording_enabled(self) -> bool: - """This function gets the content recording value. - - :return: A bool value indicating whether content tracing is enabled. - :rtype bool - """ - return _trace_agents_content - - -class _AgentEventHandlerTraceWrapper(AgentEventHandler): - def __init__( - self, - instrumentor: _AIAgentsInstrumentorPreview, - span: "AbstractSpan", - inner_handler: Optional[AgentEventHandler] = None, - ): - super().__init__() - self.span = span - self.inner_handler = inner_handler - self.ended = False - self.last_run: Optional[ThreadRun] = None - self.last_message: Optional[ThreadMessage] = None - self.instrumentor = instrumentor - - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - if self.inner_handler: - self.inner_handler.on_message_delta(delta) - - def on_thread_message(self, message: "ThreadMessage") -> None: - if self.inner_handler: - self.inner_handler.on_thread_message(message) - - if message.status in {"completed", "incomplete"}: - self.last_message = message - - def on_thread_run(self, run: "ThreadRun") -> None: - if self.inner_handler: - self.inner_handler.on_thread_run(run) - self.last_run = run - - def on_run_step(self, step: "RunStep") -> None: - if self.inner_handler: - self.inner_handler.on_run_step(step) - - if step.status == RunStepStatus.IN_PROGRESS: - return - - # todo - report errors for failure statuses here and in run ? - if step.type == "tool_calls" and isinstance(step.step_details, RunStepToolCallDetails): - self.instrumentor._add_tool_assistant_message_event( # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - self.span, step - ) - elif step.type == "message_creation" and step.status == RunStepStatus.COMPLETED: - self.instrumentor.add_thread_message_event(self.span, cast(ThreadMessage, self.last_message), step.usage) - self.last_message = None - - def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: - if self.inner_handler: - self.inner_handler.on_run_step_delta(delta) - - def on_error(self, data: str) -> None: - if self.inner_handler: - self.inner_handler.on_error(data) - - def on_done(self) -> None: - if self.inner_handler: - self.inner_handler.on_done() - # it could be called multiple tines (for each step) __exit__ - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - if self.inner_handler: - self.inner_handler.on_unhandled_event(event_type, event_data) - - def __exit__(self, exc_type, exc_val, exc_tb): - if not self.ended: - self.ended = True - self.instrumentor.set_end_run(self.span, self.last_run) - - if self.last_run and self.last_run.last_error: - self.span.span_instance.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - self.last_run.last_error.message, - ) - self.span.add_attribute(ERROR_TYPE, self.last_run.last_error.code) - - self.span.__exit__(exc_type, exc_val, exc_tb) - self.span.finish() - - -class _AsyncAgentEventHandlerTraceWrapper(AsyncAgentEventHandler): - def __init__( - self, - instrumentor: _AIAgentsInstrumentorPreview, - span: "AbstractSpan", - inner_handler: Optional[AsyncAgentEventHandler] = None, - ): - super().__init__() - self.span = span - self.inner_handler = inner_handler - self.ended = False - self.last_run: Optional[ThreadRun] = None - self.last_message: Optional[ThreadMessage] = None - self.instrumentor = instrumentor - - async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: # type: ignore[func-returns-value] - if self.inner_handler: - await self.inner_handler.on_message_delta(delta) - - async def on_thread_message(self, message: "ThreadMessage") -> None: # type: ignore[func-returns-value] - if self.inner_handler: - await self.inner_handler.on_thread_message(message) - - if message.status in {"completed", "incomplete"}: - self.last_message = message - - async def on_thread_run(self, run: "ThreadRun") -> None: # type: ignore[func-returns-value] - if self.inner_handler: - await self.inner_handler.on_thread_run(run) - self.last_run = run - - async def on_run_step(self, step: "RunStep") -> None: # type: ignore[func-returns-value] - if self.inner_handler: - await self.inner_handler.on_run_step(step) - - if step.status == RunStepStatus.IN_PROGRESS: - return - - # todo - report errors for failure statuses here and in run ? - if step.type == "tool_calls" and isinstance(step.step_details, RunStepToolCallDetails): - self.instrumentor._add_tool_assistant_message_event( # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] - self.span, step - ) - elif step.type == "message_creation" and step.status == RunStepStatus.COMPLETED: - self.instrumentor.add_thread_message_event(self.span, cast(ThreadMessage, self.last_message), step.usage) - self.last_message = None - - async def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: # type: ignore[func-returns-value] - if self.inner_handler: - await self.inner_handler.on_run_step_delta(delta) - - async def on_error(self, data: str) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - await self.inner_handler.on_error(data) - - async def on_done(self) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - await self.inner_handler.on_done() - # it could be called multiple tines (for each step) __exit__ - - async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: # type: ignore[func-returns-value] - if self.inner_handler: - await self.inner_handler.on_unhandled_event(event_type, event_data) - - def __aexit__(self, exc_type, exc_val, exc_tb): - if not self.ended: - self.ended = True - self.instrumentor.set_end_run(self.span, self.last_run) - - if self.last_run and self.last_run.last_error: - self.span.set_status( - StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - self.last_run.last_error.message, - ) - self.span.add_attribute(ERROR_TYPE, self.last_run.last_error.code) - - self.span.__exit__(exc_type, exc_val, exc_tb) - self.span.finish() diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py deleted file mode 100644 index bdc18e1381e8..000000000000 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py +++ /dev/null @@ -1,139 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -from enum import Enum -from typing import Optional - -from azure.core.tracing import AbstractSpan, SpanKind # type: ignore -from azure.core.settings import settings # type: ignore - -try: - from opentelemetry.trace import StatusCode, Span # noqa: F401 # pylint: disable=unused-import - - _span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable -except ModuleNotFoundError: - _span_impl_type = None - - -GEN_AI_MESSAGE_ID = "gen_ai.message.id" -GEN_AI_MESSAGE_STATUS = "gen_ai.message.status" -GEN_AI_THREAD_ID = "gen_ai.thread.id" -GEN_AI_THREAD_RUN_ID = "gen_ai.thread.run.id" -GEN_AI_AGENT_ID = "gen_ai.agent.id" -GEN_AI_AGENT_NAME = "gen_ai.agent.name" -GEN_AI_AGENT_DESCRIPTION = "gen_ai.agent.description" -GEN_AI_OPERATION_NAME = "gen_ai.operation.name" -GEN_AI_THREAD_RUN_STATUS = "gen_ai.thread.run.status" -GEN_AI_REQUEST_MODEL = "gen_ai.request.model" -GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature" -GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p" -GEN_AI_REQUEST_MAX_INPUT_TOKENS = "gen_ai.request.max_input_tokens" -GEN_AI_REQUEST_MAX_OUTPUT_TOKENS = "gen_ai.request.max_output_tokens" -GEN_AI_RESPONSE_MODEL = "gen_ai.response.model" -GEN_AI_SYSTEM = "gen_ai.system" -SERVER_ADDRESS = "server.address" -AZ_AI_AGENT_SYSTEM = "az.ai.agents" -GEN_AI_TOOL_NAME = "gen_ai.tool.name" -GEN_AI_TOOL_CALL_ID = "gen_ai.tool.call.id" -GEN_AI_REQUEST_RESPONSE_FORMAT = "gen_ai.request.response_format" -GEN_AI_USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens" -GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens" -GEN_AI_SYSTEM_MESSAGE = "gen_ai.system.message" -GEN_AI_EVENT_CONTENT = "gen_ai.event.content" -ERROR_TYPE = "error.type" - - -class OperationName(Enum): - CREATE_AGENT = "create_agent" - CREATE_THREAD = "create_thread" - CREATE_MESSAGE = "create_message" - START_THREAD_RUN = "start_thread_run" - EXECUTE_TOOL = "execute_tool" - LIST_MESSAGES = "list_messages" - SUBMIT_TOOL_OUTPUTS = "submit_tool_outputs" - PROCESS_THREAD_RUN = "process_thread_run" - - -def trace_tool_execution( - tool_call_id: str, - tool_name: str, - thread_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow - agent_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow - run_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow -) -> "Optional[AbstractSpan]": - span = start_span( - OperationName.EXECUTE_TOOL, - server_address=None, - span_name=f"execute_tool {tool_name}", - thread_id=thread_id, - agent_id=agent_id, - run_id=run_id, - gen_ai_system=None, - ) # it's a client code execution, not GenAI span - if span is not None and span.span_instance.is_recording: - span.add_attribute(GEN_AI_TOOL_CALL_ID, tool_call_id) - span.add_attribute(GEN_AI_TOOL_NAME, tool_name) - - return span - - -def start_span( - operation_name: OperationName, - server_address: Optional[str], - span_name: Optional[str] = None, - thread_id: Optional[str] = None, - agent_id: Optional[str] = None, - run_id: Optional[str] = None, - model: Optional[str] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - response_format: Optional[str] = None, - gen_ai_system: Optional[str] = AZ_AI_AGENT_SYSTEM, - kind: SpanKind = SpanKind.CLIENT, -) -> "Optional[AbstractSpan]": - if _span_impl_type is None: - return None - - span = _span_impl_type(name=span_name or operation_name.value, kind=kind) - - if span and span.span_instance.is_recording: - if gen_ai_system: - span.add_attribute(GEN_AI_SYSTEM, AZ_AI_AGENT_SYSTEM) - - span.add_attribute(GEN_AI_OPERATION_NAME, operation_name.value) - - if server_address: - span.add_attribute(SERVER_ADDRESS, server_address) - - if thread_id: - span.add_attribute(GEN_AI_THREAD_ID, thread_id) - - if agent_id: - span.add_attribute(GEN_AI_AGENT_ID, agent_id) - - if run_id: - span.add_attribute(GEN_AI_THREAD_RUN_ID, run_id) - - if model: - span.add_attribute(GEN_AI_REQUEST_MODEL, model) - - if temperature: - span.add_attribute(GEN_AI_REQUEST_TEMPERATURE, str(temperature)) - - if top_p: - span.add_attribute(GEN_AI_REQUEST_TOP_P, str(top_p)) - - if max_prompt_tokens: - span.add_attribute(GEN_AI_REQUEST_MAX_INPUT_TOKENS, max_prompt_tokens) - - if max_completion_tokens: - span.add_attribute(GEN_AI_REQUEST_MAX_OUTPUT_TOKENS, max_completion_tokens) - - if response_format: - span.add_attribute(GEN_AI_REQUEST_RESPONSE_FORMAT, response_format) - - return span diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_azure_functions_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_azure_functions_async.py index 3b4c6bd54a30..ba59380b4ff7 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_azure_functions_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_azure_functions_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py index 11e75c3a4849..480f13bb8092 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py index ad93d01bc0b5..b3abdb389978 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py index d0bba841aca6..0c0452971a4b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_functions_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_functions_async.py index ae314c9c459e..a354d71e8785 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_functions_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_functions_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py index 868f384faa97..2f96630c3948 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py index 21ef6f2d8b6c..8a13c4b4bdbc 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py index 5b846c10ef93..a43b7d98629f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py index e63ae6629e4d..aeddb4e6a45c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py index bb39337f5896..91856614d85e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py index 320f73417c19..773f0ee7e4d7 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py index 057d6a07fd4b..2033bcc6d368 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_agent_team_custom_team_leader.py b/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_agent_team_custom_team_leader.py index d2cbca871ae2..62a270799eee 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_agent_team_custom_team_leader.py +++ b/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_agent_team_custom_team_leader.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/multiagent/user_functions_with_traces.py b/sdk/ai/azure-ai-projects/samples/agents/multiagent/user_functions_with_traces.py index 1a4910b19d83..2c4f2377ddaf 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/multiagent/user_functions_with_traces.py +++ b/sdk/ai/azure-ai-projects/samples/agents/multiagent/user_functions_with_traces.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py index 6783244c5d75..dbca4b5c2375 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_functions.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_functions.py index ec3e5115450a..4f25baa96c20 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_functions.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py index 0c0e64566991..c3c2a455832e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py index 51af96b959c5..ae45350938aa 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py index f2f3d32ec35f..89d56da186ca 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_openapi.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_openapi.py index a8ba498ad38b..01755554367b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_openapi.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_openapi.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py index a00c37187cdf..bcc9662eaa05 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py index e9edbb9c812e..342a0f031ec1 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration_with_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration_with_file_search.py index 9e39b0b0534b..5958341abcb1 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration_with_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration_with_file_search.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_with_base_override_eventhandler.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_with_base_override_eventhandler.py index f723f38e5d6c..e6d1262bd996 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_with_base_override_eventhandler.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_with_base_override_eventhandler.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py index ed49e3c6a0b7..c44cd09f1a9f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_code_interpreter_file_attachment.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_code_interpreter_file_attachment.py index 8bdac9039a5c..a5311724ffae 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_code_interpreter_file_attachment.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_code_interpreter_file_attachment.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # ------------------------------------ # Copyright (c) Microsoft Corporation. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py index 2a59a3e79307..3112a8be0ee8 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/user_functions.py b/sdk/ai/azure-ai-projects/samples/agents/user_functions.py index e0df6f3515cf..cb1e3d9cf43d 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/user_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/user_functions.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_inference_client_from_connection_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_inference_client_from_connection_async.py index 20c9d5edb311..1c0a85c59d0f 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_inference_client_from_connection_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_inference_client_from_connection_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_inference_client_from_connection.py b/sdk/ai/azure-ai-projects/samples/connections/sample_inference_client_from_connection.py index 3b8dafd613b4..3196eaf90d1d 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_inference_client_from_connection.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_inference_client_from_connection.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_image_embeddings_with_azure_ai_inference_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_image_embeddings_with_azure_ai_inference_client_async.py index 6d6d4179cd60..b9d7036ad44d 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_image_embeddings_with_azure_ai_inference_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_image_embeddings_with_azure_ai_inference_client_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/sdk_packaging.toml b/sdk/ai/azure-ai-projects/sdk_packaging.toml new file mode 100644 index 000000000000..e7687fdae93b --- /dev/null +++ b/sdk/ai/azure-ai-projects/sdk_packaging.toml @@ -0,0 +1,2 @@ +[packaging] +auto_update = false \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/setup.py b/sdk/ai/azure-ai-projects/setup.py index 3c806ef0e972..37a6290f3338 100644 --- a/sdk/ai/azure-ai-projects/setup.py +++ b/sdk/ai/azure-ai-projects/setup.py @@ -13,17 +13,7 @@ PACKAGE_NAME = "azure-ai-projects" -PACKAGE_PPRINT_NAME = "Azure AI Projects" - -PIPY_LONG_DESCRIPTION_BEGIN = "" -PIPY_LONG_DESCRIPTION_END = "" -LINKS_DIVIDER = "" - -GITHUB_URL = f"https://aka.ms/azsdk/azure-ai-projects/python/code" - -# Define the regular expression pattern to match links in the format [section name](#section_header) -pattern = re.compile(r"\[([^\]]+)\]\(#([^\)]+)\)") - +PACKAGE_PPRINT_NAME = "Azure Ai Projects" # a-b-c => a/b/c package_folder_path = PACKAGE_NAME.replace("-", "/") @@ -36,36 +26,17 @@ raise RuntimeError("Cannot find version information") -long_description = "" - -# When you click the links in the Table of Content which has the format of {URL/#section_header}, you are supposed to be redirected to the section header. -# However, this is not supported when the README is rendered in pypi.org. The README doesn't render with id={section_header} in HTML. -# To resolve this broken link, we make the long description to have top of the README content, the Table of Content, and the links at the bottom of the README -# And replace the links in Table of Content to redirect to github.com. -with open("README.md", "r") as f: - readme_content = f.read() - start_index = readme_content.find(PIPY_LONG_DESCRIPTION_BEGIN) + len(PIPY_LONG_DESCRIPTION_BEGIN) - end_index = readme_content.find(PIPY_LONG_DESCRIPTION_END) - long_description = readme_content[start_index:end_index].strip() - long_description = long_description.replace("{{package_name}}", PACKAGE_PPRINT_NAME) - long_description = re.sub(pattern, rf"[\1]({GITHUB_URL})", long_description) - links_index = readme_content.find(LINKS_DIVIDER) - long_description += "\n\n" + readme_content[links_index:].strip() - -with open("CHANGELOG.md", "r") as f: - long_description += "\n\n" + f.read() - setup( name=PACKAGE_NAME, version=version, description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), - long_description=long_description, + long_description=open("README.md", "r").read(), long_description_content_type="text/markdown", license="MIT License", author="Microsoft Corporation", author_email="azpysdkhelp@microsoft.com", - url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-projects", - keywords="azure sdk, azure, ai, agents, foundry, inference, chat completion, project, evaluation", + url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk", + keywords="azure, azure sdk", classifiers=[ "Development Status :: 4 - Beta", "Programming Language :: Python", @@ -76,9 +47,7 @@ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", "License :: OSI Approved :: MIT License", - "Topic :: Scientific/Engineering :: Artificial Intelligence", ], zip_safe=False, packages=find_packages( @@ -96,7 +65,7 @@ install_requires=[ "isodate>=0.6.1", "azure-core>=1.30.0", - "typing-extensions>=4.12.2", + "typing-extensions>=4.6.0", ], python_requires=">=3.8", ) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py index 6862bad9744b..361cde03a1d4 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines +# pylint: disable=too-many-lines,line-too-long,useless-suppression # # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -3132,4 +3132,4 @@ def test_client_with_thread_messages(self, **kwargs): assert client.agents.delete_agent(agent.id).deleted, "The agent was not deleted" messages = client.agents.list_messages(thread_id=thread.id) - assert len(messages.data), "The data from the agent was not received." + assert len(messages.data), "The data from the agent was not received." \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py index a1dcb52ce881..394bdd909c70 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines +# pylint: disable=too-many-lines,line-too-long,useless-suppression # # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -3037,4 +3037,4 @@ async def test_negative_create_delete_agent(self, **kwargs): # close client and confirm an exception was caught await client.close() assert exception_caught - """ + """ \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py b/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py index 5796e9a7fa3f..51998f9a7a68 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tests/agents/user_functions.py b/sdk/ai/azure-ai-projects/tests/agents/user_functions.py index 0dfada80689b..883fd2fa8e32 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/user_functions.py +++ b/sdk/ai/azure-ai-projects/tests/agents/user_functions.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tests/conftest.py b/sdk/ai/azure-ai-projects/tests/conftest.py index 0c9c0567346c..dfa929b049af 100644 --- a/sdk/ai/azure-ai-projects/tests/conftest.py +++ b/sdk/ai/azure-ai-projects/tests/conftest.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py index 7096f87493be..a25e9b98ea41 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py index dc28671bb667..d190a090c3d6 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tests/evaluation/evaluation_test_base.py b/sdk/ai/azure-ai-projects/tests/evaluation/evaluation_test_base.py index 8010ef7effcc..e3addb83f57f 100644 --- a/sdk/ai/azure-ai-projects/tests/evaluation/evaluation_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/evaluation/evaluation_test_base.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py index f654702c6634..cafed9389223 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py index 3860c7161917..b6567ae3574c 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py index 11dec67b0896..d3c8b1d7ac65 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py b/sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py index 732baa2acc16..c755ec1dd6ba 100644 --- a/sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/test_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/tests/telemetry/test_ai_agents_instrumentor.py index cb73287e472f..b97fbd741d61 100644 --- a/sdk/ai/azure-ai-projects/tests/telemetry/test_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/telemetry/test_ai_agents_instrumentor.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines +# pylint: disable=too-many-lines,line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -1048,4 +1048,4 @@ def on_done(self) -> None: print("Stream completed.") def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/test_ai_agents_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/telemetry/test_ai_agents_instrumentor_async.py index 81fd11a2615c..d82fcd32c502 100644 --- a/sdk/ai/azure-ai-projects/tests/telemetry/test_ai_agents_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/telemetry/test_ai_agents_instrumentor_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index 2e8d2adc8a0a..751bde4ec0dc 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: 6e507701253408679175e95176995c437f8e00d4 +commit: 799efd99712203ab387f69ec3d55378d36bc62ea repo: Azure/azure-rest-api-specs additionalDirectories: