@@ -174,7 +174,7 @@ model_config = AzureOpenAIModelConfiguration(
174
174
api_version = os.environ.get(" AZURE_API_VERSION" ),
175
175
)
176
176
177
- # Initializing Groundedness and Groundedness Pro evaluators
177
+ # Initializing the Groundedness and Groundedness Pro evaluators:
178
178
groundedness_eval = GroundednessEvaluator(model_config)
179
179
180
180
conversation = {
@@ -186,7 +186,7 @@ conversation = {
186
186
]
187
187
}
188
188
189
- # Alternatively, you can load the same content from a JSONL file
189
+ # Alternatively, you can load the same content from a JSONL file.
190
190
groundedness_conv_score = groundedness_eval(conversation = conversation)
191
191
print (json.dumps(groundedness_conv_score, indent = 4 ))
192
192
```
@@ -241,10 +241,10 @@ from pathlib import Path
241
241
from azure.ai.evaluation import ContentSafetyEvaluator
242
242
import base64
243
243
244
- # create an instance of an evaluator with image and multi-modal support
244
+ # Create an instance of an evaluator with image and multi-modal support.
245
245
safety_evaluator = ContentSafetyEvaluator(credential = azure_cred, azure_ai_project = project_scope)
246
246
247
- # example of a conversation with an image URL
247
+ # Example of a conversation with an image URL:
248
248
conversation_image_url = {
249
249
" messages" : [
250
250
{
@@ -277,7 +277,7 @@ conversation_image_url = {
277
277
]
278
278
}
279
279
280
- # example of a conversation with base64 encoded images
280
+ # Example of a conversation with base64 encoded images:
281
281
base64_image = " "
282
282
283
283
with Path.open(" Image1.jpg" , " rb" ) as image_file:
@@ -293,7 +293,7 @@ conversation_base64 = {
293
293
]
294
294
}
295
295
296
- # run the evaluation on the conversation to output the result
296
+ # Run the evaluation on the conversation to output the result.
297
297
safety_score = safety_evaluator(conversation = conversation_image_url)
298
298
```
299
299
@@ -350,12 +350,12 @@ To ensure the `evaluate()` API can correctly parse the data, you must specify co
350
350
from azure.ai.evaluation import evaluate
351
351
352
352
result = evaluate(
353
- data = " data.jsonl" , # provide your data here
353
+ data = " data.jsonl" , # Provide your data here
354
354
evaluators = {
355
355
" groundedness" : groundedness_eval,
356
356
" answer_length" : answer_length
357
357
},
358
- # column mapping
358
+ # Column mapping
359
359
evaluator_config = {
360
360
" groundedness" : {
361
361
" column_mapping" : {
@@ -365,9 +365,9 @@ result = evaluate(
365
365
}
366
366
}
367
367
},
368
- # Optionally provide your Azure AI Foundry project information to track your evaluation results in your project portal
368
+ # Optionally, provide your Azure AI Foundry project information to track your evaluation results in your project portal.
369
369
azure_ai_project = azure_ai_project,
370
- # Optionally provide an output path to dump a json of metric summary, row level data and metric and Azure AI project URL
370
+ # Optionally, provide an output path to dump a JSON file of metric summary, row level data, and metric and Azure AI project URL.
371
371
output_path = " ./myevalresults.json"
372
372
)
373
373
```
0 commit comments