Skip to content

Commit 559449f

Browse files
committed
update
1 parent 21347ae commit 559449f

File tree

2 files changed

+43
-14
lines changed

2 files changed

+43
-14
lines changed

articles/ai-foundry/openai/includes/evaluation-python.md

Lines changed: 35 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,11 @@ async def create_eval():
2323
response = await asyncio.to_thread(
2424
requests.post,
2525
f'{API_ENDPOINT}/openai/v1/evals',
26-
params={'api-version': f"{API_VERSION}"},
27-
headers={'api-key': API_KEY},
26+
params={'api-version': "preview"},
27+
headers={
28+
'api-key': API_KEY,
29+
'aoai-evals': 'preview'
30+
},
2831
json={
2932
'name': 'My Evaluation',
3033
'data_source_config': {
@@ -88,8 +91,11 @@ import json
8891
response = await asyncio.to_thread(
8992
requests.post,
9093
f'{API_ENDPOINT}/openai/v1/evals/{eval_id}/runs',
91-
params={'api-version': f"{API_VERSION}"},
92-
headers={'api-key': API_KEY},
94+
params={'api-version': "preview"},
95+
headers={
96+
'api-key': API_KEY,
97+
'aoai-evals': 'preview'
98+
},
9399
json={
94100
"name": "No sample",
95101
"metadata": {
@@ -120,8 +126,11 @@ async def update_eval():
120126
response = await asyncio.to_thread(
121127
requests.post,
122128
f'{API_ENDPOINT}/openai/v1/evals/{eval_id}',
123-
params={'api-version': f"{API_VERSION}"},
124-
headers={'api-key': API_KEY},
129+
params={'api-version': "preview"},
130+
headers={
131+
'api-key': API_KEY,
132+
'aoai-evals': 'preview'
133+
},
125134
json={
126135
"name": "Updated Eval Name",
127136
"metadata": {
@@ -147,8 +156,11 @@ async def get_eval():
147156
response = await asyncio.to_thread(
148157
requests.get,
149158
f'{API_ENDPOINT}/openai/v1/evals/{eval_id}',
150-
params={'api-version': f"{API_VERSION}"},
151-
headers={'api-key': API_KEY}
159+
params={'api-version': "preview"},
160+
headers={
161+
'api-key': API_KEY,
162+
'aoai-evals': 'preview'
163+
})
152164
153165
print(response.status_code)
154166
print(response.json())
@@ -167,8 +179,11 @@ async def get_eval_run():
167179
response = await asyncio.to_thread(
168180
requests.get,
169181
f'{API_ENDPOINT}/openai/v1/evals/eval_67fd95c864f08190817f0dff5f42f49e/runs/evalrun_67fe987a6c548190ba6f33f7cd89343d',
170-
params={'api-version': f"{API_VERSION}"},
171-
headers={'api-key': API_KEY})
182+
params={'api-version': "preview"},
183+
headers={
184+
'api-key': API_KEY,
185+
'aoai-evals': 'preview'
186+
})
172187
173188
print(response.status_code)
174189
print(json.dumps(response.json(), indent=2))
@@ -200,8 +215,11 @@ async def get_eval_list():
200215
response = await asyncio.to_thread(
201216
requests.get,
202217
f'{API_ENDPOINT}/openai/v1/evals',
203-
params={'api-version': f"{API_VERSION}"},
204-
headers={'api-key': API_KEY})
218+
params={'api-version': "preview"},
219+
headers={
220+
'api-key': API_KEY,
221+
'aoai-evals': 'preview'
222+
})
205223
206224
print(response.status_code)
207225
print(json.dumps(response.json(), indent=2))
@@ -220,8 +238,11 @@ async def get_eval_output_item_list():
220238
response = await asyncio.to_thread(
221239
requests.get,
222240
f'{API_ENDPOINT}/openai/v1/evals/eval_67fd95c864f08190817f0dff5f42f49e/runs/evalrun_67fe987a6c548190ba6f33f7cd89343d/output_items',
223-
params={'api-version': f"{API_VERSION}"},
224-
headers={'api-key': API_KEY})
241+
params={'api-version': "preview"},
242+
headers={
243+
'api-key': API_KEY,
244+
'aoai-evals': 'preview'
245+
})
225246
226247
print(response.status_code)
227248
print(json.dumps(response.json(), indent=2))

articles/ai-foundry/openai/includes/evaluation-rest.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ You can create an evaluation by specifying a data source config and the evaluati
1818
curl -X POST "$AZURE_OPENAI_ENDPOINT/openai/v1/evals?api-version=preview" \
1919
-H "Content-Type: application/json" \
2020
-H "api-key: $AZURE_OPENAI_API_KEY" \
21+
-H "aoai-evals: preview" \
2122
-d '{
2223
"name": "Math Quiz",
2324
"data_source_config": {
@@ -56,6 +57,7 @@ You can add new evaluation runs to the evaluation job you had created in the pre
5657
curl -X POST "$AZURE_OPENAI_ENDPOINT/openai/v1/evals/{eval-id}/runs?api-version=preview" \
5758
-H "Content-Type: application/json" \
5859
-H "api-key: $AZURE_OPENAI_API_KEY"
60+
-H "aoai-evals: preview" \
5961
```
6062

6163
### Update Existing Evaluation
@@ -64,6 +66,7 @@ curl -X POST "$AZURE_OPENAI_ENDPOINT/openai/v1/evals/{eval-id}/runs?api-version=
6466
curl -X POST "$AZURE_OPENAI_ENDPOINT/openai/v1/evals/{eval-id}?api-version=preview" \
6567
-H "Content-Type: application/json" \
6668
-H "api-key: $AZURE_OPENAI_API_KEY"
69+
-H "aoai-evals: preview" \
6770
```
6871

6972
## Evaluation Results
@@ -74,6 +77,7 @@ Once evaluation is complete, you can fetch the evaluation results for the evalua
7477
curl -X GET "$AZURE_OPENAI_ENDPOINT/openai/v1/evals/{eval-id}?api-version=preview" \
7578
-H "Content-Type: application/json" \
7679
-H "api-key: $AZURE_OPENAI_API_KEY"
80+
-H "aoai-evals: preview" \
7781
```
7882

7983
### Single Evaluation Run Result
@@ -84,6 +88,7 @@ Just like how you can create a single evaluation run under an existing evaluatio
8488
curl -X GET "$AZURE_OPENAI_ENDPOINT/openai/v1/evals/{eval-id}/runs/{run-id}?api-version=preview" \
8589
-H "Content-Type: application/json" \
8690
-H "api-key: $AZURE_OPENAI_API_KEY"
91+
-H "aoai-evals: preview" \
8792
```
8893

8994
In addition to the parameters in the examples above, you can optionally add these parameters for more specific drill-downs into the evaluation results:
@@ -107,6 +112,7 @@ To see the list of all evaluation jobs that were created:
107112
curl -X GET "$AZURE_OPENAI_ENDPOINT/openai/v1/evals/{eval-id}/runs?api-version=preview" \
108113
-H "Content-Type: application/json" \
109114
-H "api-key: $AZURE_OPENAI_API_KEY"
115+
-H "aoai-evals: preview" \
110116
```
111117

112118
### Output Details for a Run
@@ -117,6 +123,7 @@ You can view the individual outputs generated from the graders for a single eval
117123
curl -X GET "$AZURE_OPENAI_ENDPOINT/openai/v1/evals/{eval-id}/runs/{run-id}/output_items?api-version=preview" \
118124
-H "Content-Type: application/json" \
119125
-H "api-key: $AZURE_OPENAI_API_KEY"
126+
-H "aoai-evals: preview" \
120127
```
121128

122129
If you have a particular output result you would like to see, you can specify the output item ID:
@@ -125,4 +132,5 @@ If you have a particular output result you would like to see, you can specify th
125132
curl -X GET "$AZURE_OPENAI_ENDPOINT/openai/v1/evals/{eval-id}/runs/{run-id}/output_items/{output-item-id}?api-version=preview" \
126133
-H "Content-Type: application/json" \
127134
-H "api-key: $AZURE_OPENAI_API_KEY"
135+
-H "aoai-evals: preview" \
128136
```

0 commit comments

Comments
 (0)