Skip to content

Commit b24fae7

Browse files
authored
Make evaluators and data required in evaluate (#37471)
1 parent a62df9c commit b24fae7

File tree

3 files changed

+9
-14
lines changed

3 files changed

+9
-14
lines changed

sdk/evaluation/azure-ai-evaluation/CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
- The parameter name `project_scope` in content safety evaluators have been renamed to `azure_ai_project` for consistency with evaluate API and simulators.
2020
- Model configurations classes are now of type `TypedDict` and are exposed in the `azure.ai.evaluation` module instead of coming from `promptflow.core`.
2121
- Updated the parameter names for `question` and `answer` in built-in evaluators to more generic terms: `query` and `response`.
22+
- `data` and `evaluators` are now required keywords in `evaluate`.
2223

2324
### Features Added
2425

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluate/_evaluate.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -436,10 +436,10 @@ def _rename_columns_conditionally(df: pd.DataFrame) -> pd.DataFrame:
436436
# @log_evaluate_activity
437437
def evaluate(
438438
*,
439+
data: str,
440+
evaluators: Dict[str, Callable],
439441
evaluation_name: Optional[str] = None,
440442
target: Optional[Callable] = None,
441-
data: Optional[str] = None,
442-
evaluators: Optional[Dict[str, Callable]] = None,
443443
evaluator_config: Optional[Dict[str, Dict[str, str]]] = None,
444444
azure_ai_project: Optional[AzureAIProject] = None,
445445
output_path: Optional[str] = None,
@@ -448,16 +448,16 @@ def evaluate(
448448
"""Evaluates target or data with built-in or custom evaluators. If both target and data are provided,
449449
data will be run through target function and then results will be evaluated.
450450
451+
:keyword data: Path to the data to be evaluated or passed to target if target is set.
452+
Only .jsonl format files are supported. `target` and `data` both cannot be None. Required.
453+
:paramtype data: str
454+
:keyword evaluators: Evaluators to be used for evaluation. It should be a dictionary with key as alias for evaluator
455+
and value as the evaluator function. Required.
456+
:paramtype evaluators: Dict[str, Callable]
451457
:keyword evaluation_name: Display name of the evaluation.
452458
:paramtype evaluation_name: Optional[str]
453459
:keyword target: Target to be evaluated. `target` and `data` both cannot be None
454460
:paramtype target: Optional[Callable]
455-
:keyword data: Path to the data to be evaluated or passed to target if target is set.
456-
Only .jsonl format files are supported. `target` and `data` both cannot be None
457-
:paramtype data: Optional[str]
458-
:keyword evaluators: Evaluators to be used for evaluation. It should be a dictionary with key as alias for evaluator
459-
and value as the evaluator function.
460-
:paramtype evaluators: Optional[Dict[str, Callable]
461461
:keyword evaluator_config: Configuration for evaluators. The configuration should be a dictionary with evaluator
462462
names as keys and a dictionary of column mappings as values. The column mappings should be a dictionary with
463463
keys as the column names in the evaluator input and values as the column names in the input data or data

sdk/evaluation/azure-ai-evaluation/tests/unittests/test_evaluate.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -107,12 +107,6 @@ def _question_answer_override_target(query, response):
107107
@pytest.mark.usefixtures("mock_model_config")
108108
@pytest.mark.unittest
109109
class TestEvaluate:
110-
def test_evaluate_missing_data(self, mock_model_config):
111-
with pytest.raises(EvaluationException) as exc_info:
112-
evaluate(evaluators={"g": GroundednessEvaluator(model_config=mock_model_config)})
113-
114-
assert "data parameter must be provided for evaluation." in exc_info.value.args[0]
115-
116110
def test_evaluate_evaluators_not_a_dict(self, mock_model_config):
117111
with pytest.raises(EvaluationException) as exc_info:
118112
evaluate(

0 commit comments

Comments
 (0)