Skip to content

Commit 385095f

Browse files
authored
Add-New-Prop-Eval-Run (Azure#38802)
1 parent c4b1d7c commit 385095f

File tree

2 files changed

+9
-0
lines changed

2 files changed

+9
-0
lines changed

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_constants.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ class EvaluationRunProperties:
5454

5555
RUN_TYPE = "runType"
5656
EVALUATION_RUN = "_azureml.evaluation_run"
57+
EVALUATION_SDK = "_azureml.evaluation_sdk_name"
5758

5859

5960
DEFAULT_EVALUATION_RESULTS_FILE_NAME = "evaluation_results.json"

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluate/_utils.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
)
2323
from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
2424
from azure.ai.evaluation._model_configurations import AzureAIProject
25+
from azure.ai.evaluation._version import VERSION
2526
from azure.ai.evaluation._azure._clients import LiteMLClient
2627

2728
LOGGER = logging.getLogger(__name__)
@@ -190,9 +191,16 @@ def _log_metrics_and_instance_results(
190191
properties={
191192
EvaluationRunProperties.RUN_TYPE: "eval_run",
192193
EvaluationRunProperties.EVALUATION_RUN: "promptflow.BatchRun",
194+
EvaluationRunProperties.EVALUATION_SDK: f"azure-ai-evaluation:{VERSION}",
193195
"_azureml.evaluate_artifacts": json.dumps([{"path": artifact_name, "type": "table"}]),
194196
}
195197
)
198+
else:
199+
ev_run.write_properties_to_run_history(
200+
properties={
201+
EvaluationRunProperties.EVALUATION_SDK: f"azure-ai-evaluation:{VERSION}",
202+
}
203+
)
196204

197205
for metric_name, metric_value in metrics.items():
198206
ev_run.log_metric(metric_name, metric_value)

0 commit comments

Comments
 (0)