Skip to content

Commit b91f46d

Browse files
authored
Update cloud-evaluation.md
1 parent cff114a commit b91f46d

File tree

1 file changed

+18
-18
lines changed

1 file changed

+18
-18
lines changed

articles/ai-foundry/how-to/develop/cloud-evaluation.md

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -50,12 +50,12 @@ If this is your first time running evaluations and logging it to your Azure AI F
5050
```python
5151
import os
5252

53-
# Required environment variables
53+
# Required environment variables:
5454
endpoint = os.environ["PROJECT_ENDPOINT"] # https://<account>.services.ai.azure.com/api/projects/<project>
5555
model_endpoint = os.environ["MODEL_ENDPOINT"] # https://<account>.services.ai.azure.com
5656
model_api_key = os.environ["MODEL_API_KEY"]
5757

58-
# Optionalreuse an existing dataset
58+
# Optional: reuse an existing dataset.
5959
dataset_name = os.environ.get("DATASET_NAME", "dataset-test")
6060
dataset_version = os.environ.get("DATASET_VERSION", "1.0")
6161
```
@@ -67,7 +67,7 @@ If this is your first time running evaluations and logging it to your Azure AI F
6767
from azure.identity import DefaultAzureCredential
6868
from azure.ai.projects import AIProjectClient
6969

70-
# Create the project client (Foundry project and credentials)
70+
# Create the project client (Foundry project and credentials):
7171
project_client = AIProjectClient(
7272
endpoint=endpoint,
7373
credential=DefaultAzureCredential(),
@@ -77,7 +77,7 @@ If this is your first time running evaluations and logging it to your Azure AI F
7777
## <a name = "uploading-evaluation-data"></a> Upload evaluation data
7878

7979
```python
80-
# Upload a local JSONL file (skip if you already have a Dataset registered)
80+
# Upload a local JSONL file. Skip this step if you already have a dataset registered.
8181
data_id = project_client.datasets.upload_file(
8282
name=dataset_name,
8383
version=dataset_version,
@@ -97,7 +97,7 @@ from azure.ai.projects.models import (
9797
EvaluatorIds,
9898
)
9999

100-
# Built-in evaluator configurations
100+
# Built-in evaluator configurations:
101101
evaluators = {
102102
"relevance": EvaluatorConfiguration(
103103
id=EvaluatorIds.RELEVANCE.value,
@@ -127,15 +127,15 @@ from azure.ai.projects.models import (
127127
InputDataset
128128
)
129129

130-
# Create an evaluation with the dataset and evaluators specified
130+
# Create an evaluation with the dataset and evaluators specified.
131131
evaluation = Evaluation(
132132
display_name="Cloud evaluation",
133133
description="Evaluation of dataset",
134134
data=InputDataset(id=data_id),
135135
evaluators=evaluators,
136136
)
137137

138-
# Run the evaluation
138+
# Run the evaluation.
139139
evaluation_response = project_client.evaluations.create(
140140
evaluation,
141141
headers={
@@ -162,26 +162,26 @@ from azure.ai.ml import MLClient
162162
from azure.ai.ml.entities import Model
163163
from promptflow.client import PFClient
164164

165-
# Define ml_client to register custom evaluator
165+
# Define ml_client to register the custom evaluator.
166166
ml_client = MLClient(
167167
subscription_id=os.environ["AZURE_SUBSCRIPTION_ID"],
168168
resource_group_name=os.environ["AZURE_RESOURCE_GROUP"],
169169
workspace_name=os.environ["AZURE_PROJECT_NAME"],
170170
credential=DefaultAzureCredential()
171171
)
172172

173-
# Load evaluator from module
173+
# Load the evaluator from the module.
174174
from answer_len.answer_length import AnswerLengthEvaluator
175175

176-
# Then we convert it to evaluation flow and save it locally
176+
# Convert it to an evaluation flow, and save it locally.
177177
pf_client = PFClient()
178178
local_path = "answer_len_local"
179179
pf_client.flows.save(entry=AnswerLengthEvaluator, path=local_path)
180180

181-
# Specify evaluator name to appear in the Evaluator library
181+
# Specify the evaluator name that appears in the Evaluator library.
182182
evaluator_name = "AnswerLenEvaluator"
183183

184-
# Finally register the evaluator to the Evaluator library
184+
# Register the evaluator to the Evaluator library.
185185
custom_evaluator = Model(
186186
path=local_path,
187187
name=evaluator_name,
@@ -201,10 +201,10 @@ After you register your custom evaluator to your Azure AI project, you can view
201201
Follow the example to register a custom `FriendlinessEvaluator` built as described in [Prompt-based evaluators](../../concepts/evaluation-evaluators/custom-evaluators.md#prompt-based-evaluators):
202202

203203
```python
204-
# Import your prompt-based custom evaluator
204+
# Import your prompt-based custom evaluator.
205205
from friendliness.friend import FriendlinessEvaluator
206206

207-
# Define your deployment
207+
# Define your deployment.
208208
model_config = dict(
209209
azure_endpoint=os.environ.get("AZURE_ENDPOINT"),
210210
azure_deployment=os.environ.get("AZURE_DEPLOYMENT_NAME"),
@@ -213,23 +213,23 @@ model_config = dict(
213213
type="azure_openai"
214214
)
215215

216-
# Define ml_client to register custom evaluator
216+
# Define ml_client to register the custom evaluator.
217217
ml_client = MLClient(
218218
subscription_id=os.environ["AZURE_SUBSCRIPTION_ID"],
219219
resource_group_name=os.environ["AZURE_RESOURCE_GROUP"],
220220
workspace_name=os.environ["AZURE_PROJECT_NAME"],
221221
credential=DefaultAzureCredential()
222222
)
223223

224-
# # Convert evaluator to evaluation flow and save it locally
224+
# # Convert the evaluator to evaluation flow and save it locally.
225225
local_path = "friendliness_local"
226226
pf_client = PFClient()
227227
pf_client.flows.save(entry=FriendlinessEvaluator, path=local_path)
228228

229-
# Specify evaluator name to appear in the Evaluator library
229+
# Specify the evaluator name that appears in the Evaluator library.
230230
evaluator_name = "FriendlinessEvaluator"
231231

232-
# Register the evaluator to the Evaluator library
232+
# Register the evaluator to the Evaluator library.
233233
custom_evaluator = Model(
234234
path=local_path,
235235
name=evaluator_name,

0 commit comments

Comments
 (0)