Skip to content

Commit 15a5cfa

Browse files
committed
add tests back
1 parent a061e82 commit 15a5cfa

File tree

1 file changed

+176
-1
lines changed

1 file changed

+176
-1
lines changed

tests/lib/chat/test_completions.py

Lines changed: 176 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import os
44
import json
55
from enum import Enum
6-
from typing import Any, List, Callable, Optional
6+
from typing import Any, List, Callable, Optional, Awaitable
77
from typing_extensions import Literal, TypeVar
88

99
import httpx
@@ -773,6 +773,139 @@ def test_parse_non_strict_tools(client: OpenAI) -> None:
773773
)
774774

775775

776+
@pytest.mark.respx(base_url=base_url)
777+
def test_parse_pydantic_raw_response(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
778+
class Location(BaseModel):
779+
city: str
780+
temperature: float
781+
units: Literal["c", "f"]
782+
783+
response = _make_snapshot_request(
784+
lambda c: c.beta.chat.completions.with_raw_response.parse(
785+
model="gpt-4o-2024-08-06",
786+
messages=[
787+
{
788+
"role": "user",
789+
"content": "What's the weather like in SF?",
790+
},
791+
],
792+
response_format=Location,
793+
),
794+
content_snapshot=snapshot(
795+
'{"id": "chatcmpl-ABrDYCa8W1w66eUxKDO8TQF1m6trT", "object": "chat.completion", "created": 1727389540, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":58,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 14, "total_tokens": 93, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}'
796+
),
797+
mock_client=client,
798+
respx_mock=respx_mock,
799+
)
800+
assert response.http_request.headers.get("x-stainless-helper-method") == "beta.chat.completions.parse"
801+
802+
completion = response.parse()
803+
message = completion.choices[0].message
804+
assert message.parsed is not None
805+
assert isinstance(message.parsed.city, str)
806+
assert print_obj(completion, monkeypatch) == snapshot(
807+
"""\
808+
ParsedChatCompletion[Location](
809+
choices=[
810+
ParsedChoice[Location](
811+
finish_reason='stop',
812+
index=0,
813+
logprobs=None,
814+
message=ParsedChatCompletionMessage[Location](
815+
content='{"city":"San Francisco","temperature":58,"units":"f"}',
816+
function_call=None,
817+
parsed=Location(city='San Francisco', temperature=58.0, units='f'),
818+
refusal=None,
819+
role='assistant',
820+
tool_calls=[]
821+
)
822+
)
823+
],
824+
created=1727389540,
825+
id='chatcmpl-ABrDYCa8W1w66eUxKDO8TQF1m6trT',
826+
model='gpt-4o-2024-08-06',
827+
object='chat.completion',
828+
service_tier=None,
829+
system_fingerprint='fp_5050236cbd',
830+
usage=CompletionUsage(
831+
completion_tokens=14,
832+
completion_tokens_details=CompletionTokensDetails(reasoning_tokens=0),
833+
prompt_tokens=79,
834+
total_tokens=93
835+
)
836+
)
837+
"""
838+
)
839+
840+
841+
@pytest.mark.respx(base_url=base_url)
842+
@pytest.mark.asyncio
843+
async def test_async_parse_pydantic_raw_response(
844+
async_client: AsyncOpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch
845+
) -> None:
846+
class Location(BaseModel):
847+
city: str
848+
temperature: float
849+
units: Literal["c", "f"]
850+
851+
response = await _make_async_snapshot_request(
852+
lambda c: c.beta.chat.completions.with_raw_response.parse(
853+
model="gpt-4o-2024-08-06",
854+
messages=[
855+
{
856+
"role": "user",
857+
"content": "What's the weather like in SF?",
858+
},
859+
],
860+
response_format=Location,
861+
),
862+
content_snapshot=snapshot(
863+
'{"id": "chatcmpl-ABrDQWOiw0PK5JOsxl1D9ooeQgznq", "object": "chat.completion", "created": 1727389532, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 14, "total_tokens": 93, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}'
864+
),
865+
mock_client=async_client,
866+
respx_mock=respx_mock,
867+
)
868+
assert response.http_request.headers.get("x-stainless-helper-method") == "beta.chat.completions.parse"
869+
870+
completion = response.parse()
871+
message = completion.choices[0].message
872+
assert message.parsed is not None
873+
assert isinstance(message.parsed.city, str)
874+
assert print_obj(completion, monkeypatch) == snapshot(
875+
"""\
876+
ParsedChatCompletion[Location](
877+
choices=[
878+
ParsedChoice[Location](
879+
finish_reason='stop',
880+
index=0,
881+
logprobs=None,
882+
message=ParsedChatCompletionMessage[Location](
883+
content='{"city":"San Francisco","temperature":65,"units":"f"}',
884+
function_call=None,
885+
parsed=Location(city='San Francisco', temperature=65.0, units='f'),
886+
refusal=None,
887+
role='assistant',
888+
tool_calls=[]
889+
)
890+
)
891+
],
892+
created=1727389532,
893+
id='chatcmpl-ABrDQWOiw0PK5JOsxl1D9ooeQgznq',
894+
model='gpt-4o-2024-08-06',
895+
object='chat.completion',
896+
service_tier=None,
897+
system_fingerprint='fp_5050236cbd',
898+
usage=CompletionUsage(
899+
completion_tokens=14,
900+
completion_tokens_details=CompletionTokensDetails(reasoning_tokens=0),
901+
prompt_tokens=79,
902+
total_tokens=93
903+
)
904+
)
905+
"""
906+
)
907+
908+
776909
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
777910
def test_parse_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
778911
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
@@ -824,3 +957,45 @@ def _on_response(response: httpx.Response) -> None:
824957
client.close()
825958

826959
return result
960+
961+
962+
async def _make_async_snapshot_request(
963+
func: Callable[[AsyncOpenAI], Awaitable[_T]],
964+
*,
965+
content_snapshot: Any,
966+
respx_mock: MockRouter,
967+
mock_client: AsyncOpenAI,
968+
) -> _T:
969+
live = os.environ.get("OPENAI_LIVE") == "1"
970+
if live:
971+
972+
async def _on_response(response: httpx.Response) -> None:
973+
# update the content snapshot
974+
assert json.dumps(json.loads(await response.aread())) == content_snapshot
975+
976+
respx_mock.stop()
977+
978+
client = AsyncOpenAI(
979+
http_client=httpx.AsyncClient(
980+
event_hooks={
981+
"response": [_on_response],
982+
}
983+
)
984+
)
985+
else:
986+
respx_mock.post("/chat/completions").mock(
987+
return_value=httpx.Response(
988+
200,
989+
content=content_snapshot._old_value,
990+
headers={"content-type": "application/json"},
991+
)
992+
)
993+
994+
client = mock_client
995+
996+
result = await func(client)
997+
998+
if live:
999+
await client.close()
1000+
1001+
return result

0 commit comments

Comments
 (0)