@@ -50,12 +50,12 @@ If this is your first time running evaluations and logging it to your Azure AI F
50
50
``` python
51
51
import os
52
52
53
- # Required environment variables
53
+ # Required environment variables:
54
54
endpoint = os.environ[" PROJECT_ENDPOINT" ] # https://<account>.services.ai.azure.com/api/projects/<project>
55
55
model_endpoint = os.environ[" MODEL_ENDPOINT" ] # https://<account>.services.ai.azure.com
56
56
model_api_key = os.environ[" MODEL_API_KEY" ]
57
57
58
- # Optional – reuse an existing dataset
58
+ # Optional: reuse an existing dataset.
59
59
dataset_name = os.environ.get(" DATASET_NAME" , " dataset-test" )
60
60
dataset_version = os.environ.get(" DATASET_VERSION" , " 1.0" )
61
61
```
@@ -67,7 +67,7 @@ If this is your first time running evaluations and logging it to your Azure AI F
67
67
from azure.identity import DefaultAzureCredential
68
68
from azure.ai.projects import AIProjectClient
69
69
70
- # Create the project client (Foundry project and credentials)
70
+ # Create the project client (Foundry project and credentials):
71
71
project_client = AIProjectClient(
72
72
endpoint = endpoint,
73
73
credential = DefaultAzureCredential(),
@@ -77,7 +77,7 @@ If this is your first time running evaluations and logging it to your Azure AI F
77
77
## <a name = " uploading-evaluation-data " ></a > Upload evaluation data
78
78
79
79
``` python
80
- # Upload a local JSONL file (skip if you already have a Dataset registered)
80
+ # Upload a local JSONL file. Skip this step if you already have a dataset registered.
81
81
data_id = project_client.datasets.upload_file(
82
82
name = dataset_name,
83
83
version = dataset_version,
@@ -97,7 +97,7 @@ from azure.ai.projects.models import (
97
97
EvaluatorIds,
98
98
)
99
99
100
- # Built-in evaluator configurations
100
+ # Built-in evaluator configurations:
101
101
evaluators = {
102
102
" relevance" : EvaluatorConfiguration(
103
103
id = EvaluatorIds.RELEVANCE .value,
@@ -127,15 +127,15 @@ from azure.ai.projects.models import (
127
127
InputDataset
128
128
)
129
129
130
- # Create an evaluation with the dataset and evaluators specified
130
+ # Create an evaluation with the dataset and evaluators specified.
131
131
evaluation = Evaluation(
132
132
display_name = " Cloud evaluation" ,
133
133
description = " Evaluation of dataset" ,
134
134
data = InputDataset(id = data_id),
135
135
evaluators = evaluators,
136
136
)
137
137
138
- # Run the evaluation
138
+ # Run the evaluation.
139
139
evaluation_response = project_client.evaluations.create(
140
140
evaluation,
141
141
headers = {
@@ -162,26 +162,26 @@ from azure.ai.ml import MLClient
162
162
from azure.ai.ml.entities import Model
163
163
from promptflow.client import PFClient
164
164
165
- # Define ml_client to register custom evaluator
165
+ # Define ml_client to register the custom evaluator.
166
166
ml_client = MLClient(
167
167
subscription_id = os.environ[" AZURE_SUBSCRIPTION_ID" ],
168
168
resource_group_name = os.environ[" AZURE_RESOURCE_GROUP" ],
169
169
workspace_name = os.environ[" AZURE_PROJECT_NAME" ],
170
170
credential = DefaultAzureCredential()
171
171
)
172
172
173
- # Load evaluator from module
173
+ # Load the evaluator from the module.
174
174
from answer_len.answer_length import AnswerLengthEvaluator
175
175
176
- # Then we convert it to evaluation flow and save it locally
176
+ # Convert it to an evaluation flow, and save it locally.
177
177
pf_client = PFClient()
178
178
local_path = " answer_len_local"
179
179
pf_client.flows.save(entry = AnswerLengthEvaluator, path = local_path)
180
180
181
- # Specify evaluator name to appear in the Evaluator library
181
+ # Specify the evaluator name that appears in the Evaluator library.
182
182
evaluator_name = " AnswerLenEvaluator"
183
183
184
- # Finally register the evaluator to the Evaluator library
184
+ # Register the evaluator to the Evaluator library.
185
185
custom_evaluator = Model(
186
186
path = local_path,
187
187
name = evaluator_name,
@@ -201,10 +201,10 @@ After you register your custom evaluator to your Azure AI project, you can view
201
201
Follow the example to register a custom ` FriendlinessEvaluator ` built as described in [ Prompt-based evaluators] ( ../../concepts/evaluation-evaluators/custom-evaluators.md#prompt-based-evaluators ) :
202
202
203
203
``` python
204
- # Import your prompt-based custom evaluator
204
+ # Import your prompt-based custom evaluator.
205
205
from friendliness.friend import FriendlinessEvaluator
206
206
207
- # Define your deployment
207
+ # Define your deployment.
208
208
model_config = dict (
209
209
azure_endpoint = os.environ.get(" AZURE_ENDPOINT" ),
210
210
azure_deployment = os.environ.get(" AZURE_DEPLOYMENT_NAME" ),
@@ -213,23 +213,23 @@ model_config = dict(
213
213
type = " azure_openai"
214
214
)
215
215
216
- # Define ml_client to register custom evaluator
216
+ # Define ml_client to register the custom evaluator.
217
217
ml_client = MLClient(
218
218
subscription_id = os.environ[" AZURE_SUBSCRIPTION_ID" ],
219
219
resource_group_name = os.environ[" AZURE_RESOURCE_GROUP" ],
220
220
workspace_name = os.environ[" AZURE_PROJECT_NAME" ],
221
221
credential = DefaultAzureCredential()
222
222
)
223
223
224
- # # Convert evaluator to evaluation flow and save it locally
224
+ # # Convert the evaluator to evaluation flow and save it locally.
225
225
local_path = " friendliness_local"
226
226
pf_client = PFClient()
227
227
pf_client.flows.save(entry = FriendlinessEvaluator, path = local_path)
228
228
229
- # Specify evaluator name to appear in the Evaluator library
229
+ # Specify the evaluator name that appears in the Evaluator library.
230
230
evaluator_name = " FriendlinessEvaluator"
231
231
232
- # Register the evaluator to the Evaluator library
232
+ # Register the evaluator to the Evaluator library.
233
233
custom_evaluator = Model(
234
234
path = local_path,
235
235
name = evaluator_name,
0 commit comments