Skip to content

Commit c032898

Browse files
authored
Fix-Sample-Code-Agents - Github Issue 44431 (#44441)
* sample-fix * revert * model fix * Fix * fix
1 parent 3a03f60 commit c032898

File tree

6 files changed

+50
-13
lines changed

6 files changed

+50
-13
lines changed

sdk/ai/azure-ai-projects/.env.template

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
# `https://<your-ai-services-account-name>.services.ai.azure.com/api/projects/<your-project-name>`
1818
AZURE_AI_PROJECT_ENDPOINT=
1919
AZURE_AI_MODEL_DEPLOYMENT_NAME=
20-
AGENT_NAME=
20+
AZURE_AI_AGENT_NAME=
2121
CONVERSATION_ID=
2222
CONNECTION_NAME=
2323
AZURE_AI_PROJECTS_AZURE_SUBSCRIPTION_ID=

sdk/ai/azure-ai-projects/README.md

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -632,7 +632,7 @@ with (
632632
agent = project_client.agents.create_version(
633633
agent_name=os.environ["AZURE_AI_AGENT_NAME"],
634634
definition=PromptAgentDefinition(
635-
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
635+
model=model_deployment_name,
636636
instructions="You are a helpful assistant that answers general questions",
637637
),
638638
)
@@ -643,13 +643,30 @@ with (
643643
item_schema={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]},
644644
include_sample_schema=True,
645645
)
646+
# Notes: for data_mapping:
647+
# sample.output_text is the string output of the agent
648+
# sample.output_items is the structured JSON output of the agent, including tool calls information
646649
testing_criteria = [
647650
{
648651
"type": "azure_ai_evaluator",
649652
"name": "violence_detection",
650653
"evaluator_name": "builtin.violence",
651-
"data_mapping": {"query": "{{item.query}}", "response": "{{item.response}}"},
652-
}
654+
"data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"},
655+
},
656+
{
657+
"type": "azure_ai_evaluator",
658+
"name": "fluency",
659+
"evaluator_name": "builtin.fluency",
660+
"initialization_parameters": {"deployment_name": f"{model_deployment_name}"},
661+
"data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"},
662+
},
663+
{
664+
"type": "azure_ai_evaluator",
665+
"name": "task_adherence",
666+
"evaluator_name": "builtin.task_adherence",
667+
"initialization_parameters": {"deployment_name": f"{model_deployment_name}"},
668+
"data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_items}}"},
669+
},
653670
]
654671
eval_object = openai_client.evals.create(
655672
name="Agent Evaluation",

sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
Set these environment variables with your own values:
2424
1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview
2525
page of your Microsoft Foundry portal.
26-
2) AGENT_NAME - The name of an existing Agent in your Microsoft Foundry project.
26+
2) AZURE_AI_AGENT_NAME - The name of an existing Agent in your Microsoft Foundry project.
2727
3) CONVERSATION_ID - The ID of an existing Conversation associated with the Agent
2828
"""
2929

@@ -34,7 +34,7 @@
3434

3535
load_dotenv()
3636

37-
agent_name = os.environ["AGENT_NAME"]
37+
agent_name = os.environ["AZURE_AI_AGENT_NAME"]
3838
conversation_id = os.environ["CONVERSATION_ID"]
3939

4040
endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]

sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
Set these environment variables with your own values:
2424
1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview
2525
page of your Microsoft Foundry portal.
26-
2) AGENT_NAME - The name of an existing Agent in your Microsoft Foundry project.
26+
2) AZURE_AI_AGENT_NAME - The name of an existing Agent in your Microsoft Foundry project.
2727
3) CONVERSATION_ID - The ID of an existing Conversation associated with the Agent
2828
"""
2929

@@ -36,7 +36,7 @@
3636
load_dotenv()
3737

3838
endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
39-
agent_name = os.environ["AGENT_NAME"]
39+
agent_name = os.environ["AZURE_AI_AGENT_NAME"]
4040
conversation_id = os.environ["CONVERSATION_ID"]
4141

4242

sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040

4141
load_dotenv()
4242
endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
43+
model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini
4344

4445
# [START agent_evaluation_basic]
4546
with (
@@ -50,7 +51,7 @@
5051
agent = project_client.agents.create_version(
5152
agent_name=os.environ["AZURE_AI_AGENT_NAME"],
5253
definition=PromptAgentDefinition(
53-
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
54+
model=model_deployment_name,
5455
instructions="You are a helpful assistant that answers general questions",
5556
),
5657
)
@@ -61,13 +62,30 @@
6162
item_schema={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]},
6263
include_sample_schema=True,
6364
)
65+
# Notes: for data_mapping:
66+
# sample.output_text is the string output of the agent
67+
# sample.output_items is the structured JSON output of the agent, including tool calls information
6468
testing_criteria = [
6569
{
6670
"type": "azure_ai_evaluator",
6771
"name": "violence_detection",
6872
"evaluator_name": "builtin.violence",
69-
"data_mapping": {"query": "{{item.query}}", "response": "{{item.response}}"},
70-
}
73+
"data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"},
74+
},
75+
{
76+
"type": "azure_ai_evaluator",
77+
"name": "fluency",
78+
"evaluator_name": "builtin.fluency",
79+
"initialization_parameters": {"deployment_name": f"{model_deployment_name}"},
80+
"data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"},
81+
},
82+
{
83+
"type": "azure_ai_evaluator",
84+
"name": "task_adherence",
85+
"evaluator_name": "builtin.task_adherence",
86+
"initialization_parameters": {"deployment_name": f"{model_deployment_name}"},
87+
"data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_items}}"},
88+
},
7189
]
7290
eval_object = openai_client.evals.create(
7391
name="Agent Evaluation",

sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,16 +51,18 @@
5151
item_schema={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]},
5252
include_sample_schema=True,
5353
)
54+
# Notes: for data_mapping:
55+
# {{sample.output_text}} is the string output of the provide model target for the given input in {{item.query}}
5456
testing_criteria = [
5557
{
5658
"type": "azure_ai_evaluator",
5759
"name": "violence_detection",
5860
"evaluator_name": "builtin.violence",
59-
"data_mapping": {"query": "{{item.query}}", "response": "{{item.response}}"},
61+
"data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"},
6062
}
6163
]
6264
eval_object = openai_client.evals.create(
63-
name="Agent Evaluation",
65+
name="Model Evaluation",
6466
data_source_config=data_source_config,
6567
testing_criteria=testing_criteria, # type: ignore
6668
)

0 commit comments

Comments
 (0)