@@ -72,7 +72,7 @@ from azure.ai.evaluation import RelevanceEvaluator
72
72
query = " What is the cpital of life?"
73
73
response = " Paris."
74
74
75
- # Initializing an evaluator
75
+ # Initialize an evaluator:
76
76
relevance_eval = RelevanceEvaluator(model_config)
77
77
relevance_eval(query = query, response = response)
78
78
```
@@ -162,7 +162,7 @@ Our evaluators understand that the first turn of the conversation provides valid
162
162
For conversation mode, here's an example for ` GroundednessEvaluator ` :
163
163
164
164
``` python
165
- # Conversation mode
165
+ # Conversation mode:
166
166
import json
167
167
import os
168
168
from azure.ai.evaluation import GroundednessEvaluator, AzureOpenAIModelConfiguration
@@ -174,7 +174,7 @@ model_config = AzureOpenAIModelConfiguration(
174
174
api_version = os.environ.get(" AZURE_API_VERSION" ),
175
175
)
176
176
177
- # Initializing the Groundedness and Groundedness Pro evaluators:
177
+ # Initialize the Groundedness and Groundedness Pro evaluators:
178
178
groundedness_eval = GroundednessEvaluator(model_config)
179
179
180
180
conversation = {
@@ -350,12 +350,12 @@ To ensure the `evaluate()` API can correctly parse the data, you must specify co
350
350
from azure.ai.evaluation import evaluate
351
351
352
352
result = evaluate(
353
- data = " data.jsonl" , # Provide your data here
353
+ data = " data.jsonl" , # Provide your data here:
354
354
evaluators = {
355
355
" groundedness" : groundedness_eval,
356
356
" answer_length" : answer_length
357
357
},
358
- # Column mapping
358
+ # Column mapping:
359
359
evaluator_config = {
360
360
" groundedness" : {
361
361
" column_mapping" : {
@@ -367,7 +367,7 @@ result = evaluate(
367
367
},
368
368
# Optionally, provide your Azure AI Foundry project information to track your evaluation results in your project portal.
369
369
azure_ai_project = azure_ai_project,
370
- # Optionally, provide an output path to dump a JSON file of metric summary, row level data, and metric and Azure AI project URL.
370
+ # Optionally, provide an output path to dump a JSON file of metric summary, row- level data, and the metric and Azure AI project URL.
371
371
output_path = " ./myevalresults.json"
372
372
)
373
373
```
0 commit comments