Skip to content

Commit 505e884

Browse files
10-21 test updates (Azure#38595)
* 10-21 test updates * uncomment * fix parametrize
1 parent 6434497 commit 505e884

File tree

3 files changed

+110
-7
lines changed

3 files changed

+110
-7
lines changed

sdk/openai/azure-openai/tests/conftest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020

2121

2222
# for pytest.parametrize
23-
GA = "2024-06-01"
23+
GA = "2024-10-21"
2424
PREVIEW = "2024-10-01-preview"
2525
LATEST = PREVIEW
2626

sdk/openai/azure-openai/tests/test_chat_completions.py

Lines changed: 54 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -80,15 +80,15 @@ def test_chat_completion(self, client, api_type, api_version, **kwargs):
8080
@configure
8181
@pytest.mark.parametrize(
8282
"api_type, api_version",
83-
[(AZURE, GA), (AZURE, PREVIEW), (OPENAI, "v1")]
83+
[(GPT_4_AZURE, GA), (GPT_4_AZURE, PREVIEW), (OPENAI, "v1")]
8484
)
8585
def test_streamed_chat_completions(self, client, api_type, api_version, **kwargs):
8686
messages = [
8787
{"role": "system", "content": "You are a helpful assistant."},
8888
{"role": "user", "content": "How do I bake a chocolate cake?"}
8989
]
9090

91-
response = client.chat.completions.create(messages=messages, stream=True, **kwargs)
91+
response = client.chat.completions.create(messages=messages, stream=True, stream_options={"include_usage": True}, **kwargs)
9292

9393
for completion in response:
9494
# API versions after 2023-05-15 send an empty first completion with RAI
@@ -100,6 +100,10 @@ def test_streamed_chat_completions(self, client, api_type, api_version, **kwargs
100100
for c in completion.choices:
101101
assert c.index is not None
102102
assert c.delta is not None
103+
if completion.usage:
104+
assert completion.usage.completion_tokens is not None
105+
assert completion.usage.prompt_tokens is not None
106+
assert completion.usage.total_tokens == completion.usage.completion_tokens + completion.usage.prompt_tokens
103107

104108
@configure
105109
@pytest.mark.parametrize(
@@ -1164,7 +1168,7 @@ def test_chat_completion_logprobs(self, client, api_type, api_version, **kwargs)
11641168
assert logprob.bytes is not None
11651169

11661170
@configure
1167-
@pytest.mark.parametrize("api_type, api_version", [(GPT_4_AZURE, PREVIEW), (GPT_4_OPENAI, "v1")])
1171+
@pytest.mark.parametrize("api_type, api_version", [(GPT_4_AZURE, PREVIEW), (GPT_4_AZURE, GA), (GPT_4_OPENAI, "v1")])
11681172
def test_chat_completion_structured_outputs(self, client, api_type, api_version, **kwargs):
11691173

11701174
class Step(BaseModel):
@@ -1202,3 +1206,50 @@ class MathResponse(BaseModel):
12021206
assert step.explanation
12031207
assert step.output
12041208
assert completion.choices[0].message.parsed.final_answer
1209+
1210+
@configure
1211+
@pytest.mark.parametrize("api_type, api_version", [(GPT_4_AZURE, GA), (GPT_4_AZURE, PREVIEW), (GPT_4_OPENAI, "v1")])
1212+
def test_chat_completion_parallel_tool_calls_disable(self, client, api_type, api_version, **kwargs):
1213+
messages = [
1214+
{"role": "system", "content": "Don't make assumptions about what values to plug into tools. Ask for clarification if a user request is ambiguous."},
1215+
{"role": "user", "content": "What's the weather like today in Seattle and Los Angeles?"}
1216+
]
1217+
tools = [
1218+
{
1219+
"type": "function",
1220+
"function": {
1221+
"name": "get_current_weather",
1222+
"description": "Get the current weather in a given location",
1223+
"parameters": {
1224+
"type": "object",
1225+
"properties": {
1226+
"location": {
1227+
"type": "string",
1228+
"description": "The city and state, e.g. San Francisco, CA",
1229+
},
1230+
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
1231+
},
1232+
"required": ["location"],
1233+
},
1234+
}
1235+
}
1236+
]
1237+
1238+
completion = client.chat.completions.create(
1239+
messages=messages,
1240+
tools=tools,
1241+
parallel_tool_calls=False,
1242+
**kwargs
1243+
)
1244+
assert completion.id
1245+
assert completion.object == "chat.completion"
1246+
assert completion.model
1247+
assert completion.created
1248+
assert completion.usage.completion_tokens is not None
1249+
assert completion.usage.prompt_tokens is not None
1250+
assert completion.usage.total_tokens == completion.usage.completion_tokens + completion.usage.prompt_tokens
1251+
assert len(completion.choices) == 1
1252+
assert completion.choices[0].finish_reason
1253+
assert completion.choices[0].index is not None
1254+
assert completion.choices[0].message.role
1255+
assert len(completion.choices[0].message.tool_calls) == 1

sdk/openai/azure-openai/tests/test_chat_completions_async.py

Lines changed: 55 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -83,15 +83,15 @@ async def test_chat_completion(self, client_async, api_type, api_version, **kwar
8383
@pytest.mark.asyncio
8484
@pytest.mark.parametrize(
8585
"api_type, api_version",
86-
[(AZURE, GA), (AZURE, PREVIEW), (OPENAI, "v1")]
86+
[(GPT_4_AZURE, GA), (GPT_4_AZURE, PREVIEW), (OPENAI, "v1")]
8787
)
8888
async def test_streamed_chat_completions(self, client_async, api_type, api_version, **kwargs):
8989
messages = [
9090
{"role": "system", "content": "You are a helpful assistant."},
9191
{"role": "user", "content": "How do I bake a chocolate cake?"}
9292
]
9393

94-
response = await client_async.chat.completions.create(messages=messages, stream=True, **kwargs)
94+
response = await client_async.chat.completions.create(messages=messages, stream=True, stream_options={"include_usage": True}, **kwargs)
9595

9696
async for completion in response:
9797
# API versions after 2023-05-15 send an empty first completion with RAI
@@ -103,6 +103,10 @@ async def test_streamed_chat_completions(self, client_async, api_type, api_versi
103103
for c in completion.choices:
104104
assert c.index is not None
105105
assert c.delta is not None
106+
if completion.usage:
107+
assert completion.usage.completion_tokens is not None
108+
assert completion.usage.prompt_tokens is not None
109+
assert completion.usage.total_tokens == completion.usage.completion_tokens + completion.usage.prompt_tokens
106110

107111
@configure_async
108112
@pytest.mark.asyncio
@@ -1191,7 +1195,7 @@ async def test_chat_completion_logprobs(self, client_async, api_type, api_versio
11911195

11921196
@configure_async
11931197
@pytest.mark.asyncio
1194-
@pytest.mark.parametrize("api_type, api_version", [(GPT_4_AZURE, PREVIEW), (GPT_4_OPENAI, "v1")])
1198+
@pytest.mark.parametrize("api_type, api_version", [(GPT_4_AZURE, PREVIEW), (GPT_4_AZURE, GA), (GPT_4_OPENAI, "v1")])
11951199
async def test_chat_completion_structured_outputs(self, client_async, api_type, api_version, **kwargs):
11961200

11971201
class Step(BaseModel):
@@ -1228,3 +1232,51 @@ class MathResponse(BaseModel):
12281232
assert step.explanation
12291233
assert step.output
12301234
assert completion.choices[0].message.parsed.final_answer
1235+
1236+
@configure_async
1237+
@pytest.mark.asyncio
1238+
@pytest.mark.parametrize("api_type, api_version", [(GPT_4_AZURE, GA), (GPT_4_AZURE, PREVIEW), (GPT_4_OPENAI, "v1")])
1239+
async def test_chat_completion_parallel_tool_calls_disable(self, client_async, api_type, api_version, **kwargs):
1240+
messages = [
1241+
{"role": "system", "content": "Don't make assumptions about what values to plug into tools. Ask for clarification if a user request is ambiguous."},
1242+
{"role": "user", "content": "What's the weather like today in Seattle and Los Angeles?"}
1243+
]
1244+
tools = [
1245+
{
1246+
"type": "function",
1247+
"function": {
1248+
"name": "get_current_weather",
1249+
"description": "Get the current weather in a given location",
1250+
"parameters": {
1251+
"type": "object",
1252+
"properties": {
1253+
"location": {
1254+
"type": "string",
1255+
"description": "The city and state, e.g. San Francisco, CA",
1256+
},
1257+
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
1258+
},
1259+
"required": ["location"],
1260+
},
1261+
}
1262+
}
1263+
]
1264+
1265+
completion = await client_async.chat.completions.create(
1266+
messages=messages,
1267+
tools=tools,
1268+
parallel_tool_calls=False,
1269+
**kwargs
1270+
)
1271+
assert completion.id
1272+
assert completion.object == "chat.completion"
1273+
assert completion.model
1274+
assert completion.created
1275+
assert completion.usage.completion_tokens is not None
1276+
assert completion.usage.prompt_tokens is not None
1277+
assert completion.usage.total_tokens == completion.usage.completion_tokens + completion.usage.prompt_tokens
1278+
assert len(completion.choices) == 1
1279+
assert completion.choices[0].finish_reason
1280+
assert completion.choices[0].index is not None
1281+
assert completion.choices[0].message.role
1282+
assert len(completion.choices[0].message.tool_calls) == 1

0 commit comments

Comments
 (0)