Skip to content

Commit 5cc642a

Browse files
Copilotsingankit
andcommitted
Implement custom user agent support in evaluate function
- Added user_agent parameter to evaluate() and _evaluate() functions - Updated get_common_headers() to append custom user agent - Created context variable system to pass user_agent to evaluators - Updated all RAI service functions to support user_agent parameter - Added docstring updates and usage examples - Created sample demonstrating custom user agent usage - Added unit tests for new functionality Co-authored-by: singankit <[email protected]>
1 parent 2308b75 commit 5cc642a

File tree

7 files changed

+311
-40
lines changed

7 files changed

+311
-40
lines changed

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_common/rai_service.py

Lines changed: 65 additions & 38 deletions
Large diffs are not rendered by default.
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# ---------------------------------------------------------
2+
# Copyright (c) Microsoft Corporation. All rights reserved.
3+
# ---------------------------------------------------------
4+
import contextvars
5+
from typing import Optional
6+
7+
# Context variable to pass user_agent to evaluators
8+
_current_user_agent: contextvars.ContextVar[Optional[str]] = contextvars.ContextVar('current_user_agent', default=None)
9+
10+
def get_current_user_agent() -> Optional[str]:
11+
"""Get the current user agent from context.
12+
13+
:return: The current user agent if set, None otherwise.
14+
:rtype: Optional[str]
15+
"""
16+
return _current_user_agent.get()
17+
18+
def set_current_user_agent(user_agent: Optional[str]) -> contextvars.Token:
19+
"""Set the current user agent in context.
20+
21+
:param user_agent: The user agent to set.
22+
:type user_agent: Optional[str]
23+
:return: A token that can be used to reset the context.
24+
:rtype: contextvars.Token
25+
"""
26+
return _current_user_agent.set(user_agent)

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluate/_evaluate.py

Lines changed: 27 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
)
3232
from .._model_configurations import AzureAIProject, EvaluationResult, EvaluatorConfig
3333
from .._user_agent import USER_AGENT
34+
from .._context import set_current_user_agent
3435
from ._batch_run import (
3536
EvalRunContext,
3637
CodeClient,
@@ -54,7 +55,6 @@
5455
OAIEvalRunCreationInfo
5556
)
5657
LOGGER = logging.getLogger(__name__)
57-
5858
# For metrics (aggregates) whose metric names intentionally differ from their
5959
# originating column name, usually because the aggregation of the original value
6060
# means something sufficiently different.
@@ -697,6 +697,7 @@ def evaluate(
697697
azure_ai_project: Optional[Union[str, AzureAIProject]] = None,
698698
output_path: Optional[Union[str, os.PathLike]] = None,
699699
fail_on_evaluator_errors: bool = False,
700+
user_agent: Optional[str] = None,
700701
**kwargs,
701702
) -> EvaluationResult:
702703
"""Evaluates target or data with built-in or custom evaluators. If both target and data are provided,
@@ -728,6 +729,9 @@ def evaluate(
728729
Defaults to false, which means that evaluations will continue regardless of failures.
729730
If such failures occur, metrics may be missing, and evidence of failures can be found in the evaluation's logs.
730731
:paramtype fail_on_evaluator_errors: bool
732+
:keyword user_agent: Custom user agent string to append to the default user agent for HTTP requests.
733+
If provided, the final user agent will be: 'azure-ai-evaluation/<version> <user_agent>'.
734+
:paramtype user_agent: Optional[str]
731735
:return: Evaluation results.
732736
:rtype: ~azure.ai.evaluation.EvaluationResult
733737
@@ -749,6 +753,23 @@ def evaluate(
749753
:dedent: 8
750754
:caption: Run an evaluation on local data with one or more evaluators using Azure AI Project URL in following format
751755
https://{resource_name}.services.ai.azure.com/api/projects/{project_name}
756+
757+
.. admonition:: Example with custom user agent:
758+
759+
.. code-block:: python
760+
761+
from azure.ai.evaluation import evaluate
762+
from azure.identity import DefaultAzureCredential
763+
764+
# Run evaluation with custom user agent
765+
result = evaluate(
766+
data="path/to/data.jsonl",
767+
evaluators={
768+
"groundedness": GroundednessEvaluator(azure_ai_project=azure_ai_project, credential=credential)
769+
},
770+
azure_ai_project=azure_ai_project,
771+
user_agent="MyApp/1.0.0" # Custom user agent to append to default
772+
)
752773
"""
753774
try:
754775
return _evaluate(
@@ -760,6 +781,7 @@ def evaluate(
760781
azure_ai_project=azure_ai_project,
761782
output_path=output_path,
762783
fail_on_evaluator_errors=fail_on_evaluator_errors,
784+
user_agent=user_agent,
763785
**kwargs,
764786
)
765787
except Exception as e:
@@ -828,11 +850,15 @@ def _evaluate( # pylint: disable=too-many-locals,too-many-statements
828850
azure_ai_project: Optional[Union[str, AzureAIProject]] = None,
829851
output_path: Optional[Union[str, os.PathLike]] = None,
830852
fail_on_evaluator_errors: bool = False,
853+
user_agent: Optional[str] = None,
831854
**kwargs,
832855
) -> EvaluationResult:
833856
if fail_on_evaluator_errors:
834857
_print_fail_flag_warning()
835858

859+
# Set the user_agent in context for evaluators to access
860+
set_current_user_agent(user_agent)
861+
836862
# Turn inputted mess of data into a dataframe, apply targets if needed
837863
# split graders and evaluators, and verify that column mappings are sensible.
838864
validated_data = _preprocess_data(

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
from azure.ai.evaluation._common.utils import validate_conversation
1818
from azure.ai.evaluation._constants import _AggregationType
1919
from azure.core.credentials import TokenCredential
20+
from azure.ai.evaluation._context import get_current_user_agent
2021

2122
from . import EvaluatorBase
2223

@@ -122,6 +123,7 @@ async def _evaluate_conversation(self, conversation: Dict) -> Dict[str, T]:
122123
metric_name=self._eval_metric,
123124
project_scope=self._azure_ai_project,
124125
credential=self._credential,
126+
user_agent=get_current_user_agent(),
125127
)
126128
return result
127129

@@ -157,6 +159,7 @@ async def _evaluate_query_response(self, eval_input: Dict) -> Dict[str, T]:
157159
credential=self._credential,
158160
annotation_task=self._get_task(),
159161
evaluator_name=self.__class__.__name__,
162+
user_agent=get_current_user_agent(),
160163
)
161164

162165
def _get_task(self):
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
# ---------------------------------------------------------
2+
# Copyright (c) Microsoft Corporation. All rights reserved.
3+
# ---------------------------------------------------------
4+
5+
import json
6+
import os
7+
from azure.ai.evaluation import evaluate, GroundednessEvaluator
8+
from azure.identity import DefaultAzureCredential
9+
10+
11+
def main():
12+
"""Sample demonstrating how to use custom user agent with evaluate function."""
13+
14+
# Setup Azure AI Project (replace with your actual project details)
15+
azure_ai_project = {
16+
"subscription_id": "your-subscription-id",
17+
"resource_group_name": "your-resource-group",
18+
"project_name": "your-project-name",
19+
}
20+
credential = DefaultAzureCredential()
21+
22+
# Create sample data
23+
sample_data = [
24+
{
25+
"query": "What is the capital of France?",
26+
"response": "The capital of France is Paris.",
27+
"context": "France is a country in Western Europe. Its capital city is Paris, which is also the largest city in France."
28+
},
29+
{
30+
"query": "What is machine learning?",
31+
"response": "Machine learning is a subset of artificial intelligence that uses algorithms to analyze data and make predictions.",
32+
"context": "Artificial intelligence is a broad field that includes machine learning, deep learning, and other techniques for creating intelligent systems."
33+
}
34+
]
35+
36+
# Save sample data to a file
37+
data_file = "sample_evaluation_data.jsonl"
38+
with open(data_file, "w") as f:
39+
for item in sample_data:
40+
f.write(json.dumps(item) + "\n")
41+
42+
try:
43+
# [START evaluate_with_custom_user_agent]
44+
# Run evaluation with custom user agent
45+
result = evaluate(
46+
data=data_file,
47+
evaluators={
48+
"groundedness": GroundednessEvaluator(
49+
azure_ai_project=azure_ai_project,
50+
credential=credential
51+
)
52+
},
53+
azure_ai_project=azure_ai_project,
54+
user_agent="MyApp/1.0.0 CustomEvaluation" # Custom user agent to append to default
55+
)
56+
57+
print("Evaluation completed successfully!")
58+
print(f"Results: {result}")
59+
# [END evaluate_with_custom_user_agent]
60+
61+
finally:
62+
# Clean up sample data file
63+
if os.path.exists(data_file):
64+
os.remove(data_file)
65+
66+
67+
if __name__ == "__main__":
68+
main()

sdk/evaluation/azure-ai-evaluation/tests/unittests/test_evaluate.py

Lines changed: 64 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -963,4 +963,67 @@ def test_name_map_conversion(self):
963963
# Test failure case
964964
result = _convert_name_map_into_property_entries(test_map, segment_length=10, max_segments = 1)
965965
assert result[EvaluationRunProperties.NAME_MAP_LENGTH] == -1
966-
assert len(result) == 1
966+
assert len(result) == 1
967+
968+
969+
def test_evaluate_user_agent_parameter():
970+
"""Test that the user_agent parameter is accepted by the evaluate function."""
971+
import tempfile
972+
import json
973+
from unittest.mock import patch, MagicMock
974+
from azure.ai.evaluation._context import get_current_user_agent
975+
976+
# Create test data
977+
test_data = [
978+
{"query": "What is the capital of France?", "response": "Paris"}
979+
]
980+
981+
with tempfile.NamedTemporaryFile(mode='w', suffix='.jsonl', delete=False) as f:
982+
for item in test_data:
983+
f.write(json.dumps(item) + '\n')
984+
temp_file = f.name
985+
986+
try:
987+
# Mock evaluator
988+
def mock_evaluator(query, response):
989+
# Check that user agent is available in context
990+
user_agent = get_current_user_agent()
991+
return {"score": 1.0, "user_agent_in_context": user_agent}
992+
993+
# Test with custom user agent
994+
with patch('azure.ai.evaluation._evaluate._evaluate._preprocess_data') as mock_preprocess, \
995+
patch('azure.ai.evaluation._evaluate._evaluate._run_callable_evaluators') as mock_run_evaluators:
996+
997+
mock_preprocess.return_value = {
998+
"evaluators": {"test_evaluator": mock_evaluator},
999+
"graders": {},
1000+
"input_data_df": pd.DataFrame(test_data),
1001+
"column_mapping": {},
1002+
"target_run": None,
1003+
"batch_run_client": MagicMock(),
1004+
"batch_run_data": temp_file
1005+
}
1006+
1007+
mock_run_evaluators.return_value = (
1008+
pd.DataFrame([{"test_evaluator.score": 1.0}]),
1009+
{"test_evaluator.score": 1.0},
1010+
{}
1011+
)
1012+
1013+
# Test that user_agent parameter is accepted and doesn't cause errors
1014+
result = evaluate(
1015+
data=temp_file,
1016+
evaluators={"test_evaluator": mock_evaluator},
1017+
user_agent="TestApp/1.0.0"
1018+
)
1019+
1020+
# Verify the function was called
1021+
assert mock_preprocess.called
1022+
assert mock_run_evaluators.called
1023+
assert result is not None
1024+
1025+
finally:
1026+
# Clean up
1027+
import os
1028+
if os.path.exists(temp_file):
1029+
os.unlink(temp_file)
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
import pytest
2+
from azure.ai.evaluation._common.rai_service import get_common_headers
3+
4+
5+
def test_get_common_headers_default():
6+
"""Test get_common_headers with default parameters."""
7+
token = "test_token"
8+
headers = get_common_headers(token)
9+
10+
assert "Authorization" in headers
11+
assert headers["Authorization"] == "Bearer test_token"
12+
assert "User-Agent" in headers
13+
assert "azure-ai-evaluation" in headers["User-Agent"]
14+
15+
16+
def test_get_common_headers_with_evaluator_name():
17+
"""Test get_common_headers with evaluator name."""
18+
token = "test_token"
19+
evaluator_name = "TestEvaluator"
20+
headers = get_common_headers(token, evaluator_name)
21+
22+
assert "Authorization" in headers
23+
assert headers["Authorization"] == "Bearer test_token"
24+
assert "User-Agent" in headers
25+
assert "azure-ai-evaluation" in headers["User-Agent"]
26+
assert f"(type=evaluator; subtype={evaluator_name})" in headers["User-Agent"]
27+
28+
29+
def test_get_common_headers_with_custom_user_agent():
30+
"""Test get_common_headers with custom user agent."""
31+
token = "test_token"
32+
custom_user_agent = "MyApp/1.0.0"
33+
headers = get_common_headers(token, user_agent=custom_user_agent)
34+
35+
assert "Authorization" in headers
36+
assert headers["Authorization"] == "Bearer test_token"
37+
assert "User-Agent" in headers
38+
assert "azure-ai-evaluation" in headers["User-Agent"]
39+
assert custom_user_agent in headers["User-Agent"]
40+
# Verify format: "azure-ai-evaluation/<version> MyApp/1.0.0"
41+
assert headers["User-Agent"].endswith(f" {custom_user_agent}")
42+
43+
44+
def test_get_common_headers_with_evaluator_name_and_custom_user_agent():
45+
"""Test get_common_headers with both evaluator name and custom user agent."""
46+
token = "test_token"
47+
evaluator_name = "TestEvaluator"
48+
custom_user_agent = "MyApp/1.0.0"
49+
headers = get_common_headers(token, evaluator_name, custom_user_agent)
50+
51+
assert "Authorization" in headers
52+
assert headers["Authorization"] == "Bearer test_token"
53+
assert "User-Agent" in headers
54+
assert "azure-ai-evaluation" in headers["User-Agent"]
55+
assert f"(type=evaluator; subtype={evaluator_name})" in headers["User-Agent"]
56+
assert custom_user_agent in headers["User-Agent"]
57+
# Verify format: "azure-ai-evaluation/<version> (type=evaluator; subtype=TestEvaluator) MyApp/1.0.0"
58+
assert headers["User-Agent"].endswith(f" {custom_user_agent}")

0 commit comments

Comments
 (0)