diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 804a6039aa..5d9ceab581 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.99.7" + ".": "1.99.8" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 74d0da964a..33e0e8e948 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Changelog +## 1.99.8 (2025-08-11) + +Full Changelog: [v1.99.7...v1.99.8](https://github.com/openai/openai-python/compare/v1.99.7...v1.99.8) + +### Bug Fixes + +* **internal/tests:** correct snapshot update comment ([2784a7a](https://github.com/openai/openai-python/commit/2784a7a7da24ddba74b5717f07d67546864472b9)) +* **types:** revert ChatCompletionMessageToolCallUnion breaking change ([ba54e03](https://github.com/openai/openai-python/commit/ba54e03bc2d21825d891685bf3bad4a9253cbeb0)) + + +### Chores + +* **internal/tests:** add inline snapshot format command ([8107db8](https://github.com/openai/openai-python/commit/8107db8ff738baa65fe4cf2f2d7f1acd29219c78)) +* **internal:** fix formatting ([f03a03d](https://github.com/openai/openai-python/commit/f03a03de8c84740209d021598ff8bf56b6d3c684)) +* **tests:** add responses output_text test ([971347b](https://github.com/openai/openai-python/commit/971347b3a05f79c51abd11c86b382ca73c28cefb)) + + +### Refactors + +* **tests:** share snapshot utils ([791c567](https://github.com/openai/openai-python/commit/791c567cd87fb8d587965773b1da0404c7848c68)) + ## 1.99.7 (2025-08-11) Full Changelog: [v1.99.6...v1.99.7](https://github.com/openai/openai-python/compare/v1.99.6...v1.99.7) diff --git a/pyproject.toml b/pyproject.toml index d58b9b1eb2..b4a7d01a2b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.99.7" +version = "1.99.8" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" @@ -150,6 +150,9 @@ filterwarnings = [ "error" ] +[tool.inline-snapshot] +format-command="ruff format --stdin-filename {filename}" + [tool.pyright] # this enables practically every flag given by pyright. # there are a couple of flags that are still disabled by diff --git a/src/openai/_version.py b/src/openai/_version.py index 3db3f866cf..9d1f1f4e96 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.99.7" # x-release-please-version +__version__ = "1.99.8" # x-release-please-version diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index 2aecaf7d0c..50bdac7c65 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -31,7 +31,10 @@ from .chat_completion_tool_union_param import ChatCompletionToolUnionParam as ChatCompletionToolUnionParam from .chat_completion_content_part_text import ChatCompletionContentPartText as ChatCompletionContentPartText from .chat_completion_custom_tool_param import ChatCompletionCustomToolParam as ChatCompletionCustomToolParam -from .chat_completion_message_tool_call import ChatCompletionMessageToolCallUnion as ChatCompletionMessageToolCallUnion +from .chat_completion_message_tool_call import ( + ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, + ChatCompletionMessageToolCallUnion as ChatCompletionMessageToolCallUnion, +) from .chat_completion_content_part_image import ChatCompletionContentPartImage as ChatCompletionContentPartImage from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam diff --git a/src/openai/types/chat/chat_completion_message_tool_call.py b/src/openai/types/chat/chat_completion_message_tool_call.py index df687b19bd..845e639089 100644 --- a/src/openai/types/chat/chat_completion_message_tool_call.py +++ b/src/openai/types/chat/chat_completion_message_tool_call.py @@ -7,9 +7,11 @@ from .chat_completion_message_custom_tool_call import ChatCompletionMessageCustomToolCall from .chat_completion_message_function_tool_call import Function as Function, ChatCompletionMessageFunctionToolCall -__all__ = [ "Function", "ChatCompletionMessageToolCallUnion"] +__all__ = ["Function", "ChatCompletionMessageToolCallUnion"] ChatCompletionMessageToolCallUnion: TypeAlias = Annotated[ Union[ChatCompletionMessageFunctionToolCall, ChatCompletionMessageCustomToolCall], PropertyInfo(discriminator="type"), ] + +ChatCompletionMessageToolCall: TypeAlias = ChatCompletionMessageToolCallUnion diff --git a/tests/lib/chat/test_completions.py b/tests/lib/chat/test_completions.py index d0bd14ce9e..f04a0e3782 100644 --- a/tests/lib/chat/test_completions.py +++ b/tests/lib/chat/test_completions.py @@ -1,12 +1,9 @@ from __future__ import annotations -import os -import json from enum import Enum -from typing import Any, List, Callable, Optional, Awaitable +from typing import List, Optional from typing_extensions import Literal, TypeVar -import httpx import pytest from respx import MockRouter from pydantic import Field, BaseModel @@ -17,8 +14,9 @@ from openai._utils import assert_signatures_in_sync from openai._compat import PYDANTIC_V2 -from ._utils import print_obj, get_snapshot_value +from ..utils import print_obj from ...conftest import base_url +from ..snapshots import make_snapshot_request, make_async_snapshot_request from ..schema_types.query import Query _T = TypeVar("_T") @@ -27,12 +25,12 @@ # # you can update them with # -# `OPENAI_LIVE=1 pytest --inline-snapshot=fix` +# `OPENAI_LIVE=1 pytest --inline-snapshot=fix -p no:xdist -o addopts=""` @pytest.mark.respx(base_url=base_url) def test_parse_nothing(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None: - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -45,6 +43,7 @@ def test_parse_nothing(client: OpenAI, respx_mock: MockRouter, monkeypatch: pyte content_snapshot=snapshot( '{"id": "chatcmpl-ABfvaueLEMLNYbT8YzpJxsmiQ6HSY", "object": "chat.completion", "created": 1727346142, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "I\'m unable to provide real-time weather updates. To get the current weather in San Francisco, I recommend checking a reliable weather website or app like the Weather Channel or a local news station.", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 14, "completion_tokens": 37, "total_tokens": 51, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_b40fb1c6fb"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -100,7 +99,7 @@ class Location(BaseModel): temperature: float units: Literal["c", "f"] - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -114,6 +113,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvbtVnTu5DeC4EFnRYj8mtfOM99", "object": "chat.completion", "created": 1727346143, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 14, "total_tokens": 93, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -170,7 +170,7 @@ class Location(BaseModel): temperature: float units: Optional[Literal["c", "f"]] = None - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -184,6 +184,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvcC8grKYsRkSoMp9CCAhbXAd0b", "object": "chat.completion", "created": 1727346144, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 88, "completion_tokens": 14, "total_tokens": 102, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_b40fb1c6fb"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -247,7 +248,7 @@ class ColorDetection(BaseModel): if not PYDANTIC_V2: ColorDetection.update_forward_refs(**locals()) # type: ignore - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -258,6 +259,7 @@ class ColorDetection(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvjIatz0zrZu50gRbMtlp0asZpz", "object": "chat.completion", "created": 1727346151, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"color\\":\\"red\\",\\"hex_color_code\\":\\"#FF0000\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 109, "completion_tokens": 14, "total_tokens": 123, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -292,7 +294,7 @@ class Location(BaseModel): temperature: float units: Literal["c", "f"] - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -307,6 +309,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvp8qzboW92q8ONDF4DPHlI7ckC", "object": "chat.completion", "created": 1727346157, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":64,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}, {"index": 1, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}, {"index": 2, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":63.0,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 44, "total_tokens": 123, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_b40fb1c6fb"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -375,7 +378,7 @@ class CalendarEvent: date: str participants: List[str] - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -387,6 +390,7 @@ class CalendarEvent: content_snapshot=snapshot( '{"id": "chatcmpl-ABfvqhz4uUUWsw8Ohw2Mp9B4sKKV8", "object": "chat.completion", "created": 1727346158, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"name\\":\\"Science Fair\\",\\"date\\":\\"Friday\\",\\"participants\\":[\\"Alice\\",\\"Bob\\"]}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 92, "completion_tokens": 17, "total_tokens": 109, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_7568d46099"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -436,7 +440,7 @@ class CalendarEvent: @pytest.mark.respx(base_url=base_url) def test_pydantic_tool_model_all_types(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None: - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -451,6 +455,7 @@ def test_pydantic_tool_model_all_types(client: OpenAI, respx_mock: MockRouter, m content_snapshot=snapshot( '{"id": "chatcmpl-ABfvtNiaTNUF6OymZUnEFc9lPq9p1", "object": "chat.completion", "created": 1727346161, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_NKpApJybW1MzOjZO2FzwYw0d", "type": "function", "function": {"name": "Query", "arguments": "{\\"name\\":\\"May 2022 Fulfilled Orders Not Delivered on Time\\",\\"table_name\\":\\"orders\\",\\"columns\\":[\\"id\\",\\"status\\",\\"expected_delivery_date\\",\\"delivered_at\\",\\"shipped_at\\",\\"ordered_at\\",\\"canceled_at\\"],\\"conditions\\":[{\\"column\\":\\"ordered_at\\",\\"operator\\":\\">=\\",\\"value\\":\\"2022-05-01\\"},{\\"column\\":\\"ordered_at\\",\\"operator\\":\\"<=\\",\\"value\\":\\"2022-05-31\\"},{\\"column\\":\\"status\\",\\"operator\\":\\"=\\",\\"value\\":\\"fulfilled\\"},{\\"column\\":\\"delivered_at\\",\\"operator\\":\\">\\",\\"value\\":{\\"column_name\\":\\"expected_delivery_date\\"}}],\\"order_by\\":\\"asc\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 512, "completion_tokens": 132, "total_tokens": 644, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_7568d46099"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -521,7 +526,7 @@ class Location(BaseModel): units: Literal["c", "f"] with pytest.raises(openai.LengthFinishReasonError): - _make_snapshot_request( + make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -536,6 +541,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvvX7eB1KsfeZj8VcF3z7G7SbaA", "object": "chat.completion", "created": 1727346163, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"", "refusal": null}, "logprobs": null, "finish_reason": "length"}], "usage": {"prompt_tokens": 79, "completion_tokens": 1, "total_tokens": 80, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_7568d46099"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -548,7 +554,7 @@ class Location(BaseModel): temperature: float units: Literal["c", "f"] - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -562,6 +568,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvwoKVWPQj2UPlAcAKM7s40GsRx", "object": "chat.completion", "created": 1727346164, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "refusal": "I\'m very sorry, but I can\'t assist with that."}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 12, "total_tokens": 91, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -596,7 +603,7 @@ class GetWeatherArgs(BaseModel): country: str units: Literal["c", "f"] = "c" - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -612,6 +619,7 @@ class GetWeatherArgs(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvx6Z4dchiW2nya1N8KMsHFrQRE", "object": "chat.completion", "created": 1727346165, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_Y6qJ7ofLgOrBnMD5WbVAeiRV", "type": "function", "function": {"name": "GetWeatherArgs", "arguments": "{\\"city\\":\\"Edinburgh\\",\\"country\\":\\"UK\\",\\"units\\":\\"c\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 76, "completion_tokens": 24, "total_tokens": 100, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_e45dabd248"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -662,7 +670,7 @@ class GetStockPrice(BaseModel): ticker: str exchange: str - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -685,6 +693,7 @@ class GetStockPrice(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvyvfNWKcl7Ohqos4UFrmMs1v4C", "object": "chat.completion", "created": 1727346166, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_fdNz3vOBKYgOIpMdWotB9MjY", "type": "function", "function": {"name": "GetWeatherArgs", "arguments": "{\\"city\\": \\"Edinburgh\\", \\"country\\": \\"GB\\", \\"units\\": \\"c\\"}"}}, {"id": "call_h1DWI1POMJLb0KwIyQHWXD4p", "type": "function", "function": {"name": "get_stock_price", "arguments": "{\\"ticker\\": \\"AAPL\\", \\"exchange\\": \\"NASDAQ\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 149, "completion_tokens": 60, "total_tokens": 209, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_b40fb1c6fb"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -733,7 +742,7 @@ class GetStockPrice(BaseModel): @pytest.mark.respx(base_url=base_url) def test_parse_strict_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None: - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -767,6 +776,7 @@ def test_parse_strict_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: content_snapshot=snapshot( '{"id": "chatcmpl-ABfvzdvCI6RaIkiEFNjqGXCSYnlzf", "object": "chat.completion", "created": 1727346167, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_CUdUoJpsWWVdxXntucvnol1M", "type": "function", "function": {"name": "get_weather", "arguments": "{\\"city\\":\\"San Francisco\\",\\"state\\":\\"CA\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 48, "completion_tokens": 19, "total_tokens": 67, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -830,7 +840,7 @@ class Location(BaseModel): temperature: float units: Literal["c", "f"] - response = _make_snapshot_request( + response = make_snapshot_request( lambda c: c.chat.completions.with_raw_response.parse( model="gpt-4o-2024-08-06", messages=[ @@ -844,6 +854,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABrDYCa8W1w66eUxKDO8TQF1m6trT", "object": "chat.completion", "created": 1727389540, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":58,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 14, "total_tokens": 93, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -906,7 +917,7 @@ class Location(BaseModel): temperature: float units: Literal["c", "f"] - response = await _make_async_snapshot_request( + response = await make_async_snapshot_request( lambda c: c.chat.completions.with_raw_response.parse( model="gpt-4o-2024-08-06", messages=[ @@ -920,6 +931,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABrDQWOiw0PK5JOsxl1D9ooeQgznq", "object": "chat.completion", "created": 1727389532, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 14, "total_tokens": 93, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}' ), + path="/chat/completions", mock_client=async_client, respx_mock=respx_mock, ) @@ -981,87 +993,3 @@ def test_parse_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpe checking_client.chat.completions.parse, exclude_params={"response_format", "stream"}, ) - - -def _make_snapshot_request( - func: Callable[[OpenAI], _T], - *, - content_snapshot: Any, - respx_mock: MockRouter, - mock_client: OpenAI, -) -> _T: - live = os.environ.get("OPENAI_LIVE") == "1" - if live: - - def _on_response(response: httpx.Response) -> None: - # update the content snapshot - assert json.dumps(json.loads(response.read())) == content_snapshot - - respx_mock.stop() - - client = OpenAI( - http_client=httpx.Client( - event_hooks={ - "response": [_on_response], - } - ) - ) - else: - respx_mock.post("/chat/completions").mock( - return_value=httpx.Response( - 200, - content=get_snapshot_value(content_snapshot), - headers={"content-type": "application/json"}, - ) - ) - - client = mock_client - - result = func(client) - - if live: - client.close() - - return result - - -async def _make_async_snapshot_request( - func: Callable[[AsyncOpenAI], Awaitable[_T]], - *, - content_snapshot: Any, - respx_mock: MockRouter, - mock_client: AsyncOpenAI, -) -> _T: - live = os.environ.get("OPENAI_LIVE") == "1" - if live: - - async def _on_response(response: httpx.Response) -> None: - # update the content snapshot - assert json.dumps(json.loads(await response.aread())) == content_snapshot - - respx_mock.stop() - - client = AsyncOpenAI( - http_client=httpx.AsyncClient( - event_hooks={ - "response": [_on_response], - } - ) - ) - else: - respx_mock.post("/chat/completions").mock( - return_value=httpx.Response( - 200, - content=get_snapshot_value(content_snapshot), - headers={"content-type": "application/json"}, - ) - ) - - client = mock_client - - result = await func(client) - - if live: - await client.close() - - return result diff --git a/tests/lib/chat/test_completions_streaming.py b/tests/lib/chat/test_completions_streaming.py index 1daa98c6a0..fa17f67177 100644 --- a/tests/lib/chat/test_completions_streaming.py +++ b/tests/lib/chat/test_completions_streaming.py @@ -30,7 +30,7 @@ ) from openai.lib._parsing._completions import ResponseFormatT -from ._utils import print_obj, get_snapshot_value +from ..utils import print_obj, get_snapshot_value from ...conftest import base_url _T = TypeVar("_T") @@ -39,7 +39,7 @@ # # you can update them with # -# `OPENAI_LIVE=1 pytest --inline-snapshot=fix` +# `OPENAI_LIVE=1 pytest --inline-snapshot=fix -p no:xdist -o addopts=""` @pytest.mark.respx(base_url=base_url) diff --git a/tests/lib/responses/__init__.py b/tests/lib/responses/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/lib/responses/test_responses.py b/tests/lib/responses/test_responses.py new file mode 100644 index 0000000000..8ce3462e76 --- /dev/null +++ b/tests/lib/responses/test_responses.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +from typing_extensions import TypeVar + +import pytest +from respx import MockRouter +from inline_snapshot import snapshot + +from openai import OpenAI + +from ...conftest import base_url +from ..snapshots import make_snapshot_request + +_T = TypeVar("_T") + +# all the snapshots in this file are auto-generated from the live API +# +# you can update them with +# +# `OPENAI_LIVE=1 pytest --inline-snapshot=fix -p no:xdist -o addopts=""` + + +@pytest.mark.respx(base_url=base_url) +def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None: + response = make_snapshot_request( + lambda c: c.responses.create( + model="gpt-4o-mini", + input="What's the weather like in SF?", + ), + content_snapshot=snapshot( + '{"id": "resp_689a0b2545288193953c892439b42e2800b2e36c65a1fd4b", "object": "response", "created_at": 1754925861, "status": "completed", "background": false, "error": null, "incomplete_details": null, "instructions": null, "max_output_tokens": null, "max_tool_calls": null, "model": "gpt-4o-mini-2024-07-18", "output": [{"id": "msg_689a0b2637b08193ac478e568f49e3f900b2e36c65a1fd4b", "type": "message", "status": "completed", "content": [{"type": "output_text", "annotations": [], "logprobs": [], "text": "I can\'t provide real-time updates, but you can easily check the current weather in San Francisco using a weather website or app. Typically, San Francisco has cool, foggy summers and mild winters, so it\'s good to be prepared for variable weather!"}], "role": "assistant"}], "parallel_tool_calls": true, "previous_response_id": null, "prompt_cache_key": null, "reasoning": {"effort": null, "summary": null}, "safety_identifier": null, "service_tier": "default", "store": true, "temperature": 1.0, "text": {"format": {"type": "text"}, "verbosity": "medium"}, "tool_choice": "auto", "tools": [], "top_logprobs": 0, "top_p": 1.0, "truncation": "disabled", "usage": {"input_tokens": 14, "input_tokens_details": {"cached_tokens": 0}, "output_tokens": 50, "output_tokens_details": {"reasoning_tokens": 0}, "total_tokens": 64}, "user": null, "metadata": {}}' + ), + path="/responses", + mock_client=client, + respx_mock=respx_mock, + ) + + assert response.output_text == snapshot( + "I can't provide real-time updates, but you can easily check the current weather in San Francisco using a weather website or app. Typically, San Francisco has cool, foggy summers and mild winters, so it's good to be prepared for variable weather!" + ) diff --git a/tests/lib/snapshots.py b/tests/lib/snapshots.py new file mode 100644 index 0000000000..ed53edebcb --- /dev/null +++ b/tests/lib/snapshots.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +import os +import json +from typing import Any, Callable, Awaitable +from typing_extensions import TypeVar + +import httpx +from respx import MockRouter + +from openai import OpenAI, AsyncOpenAI + +from .utils import get_snapshot_value + +_T = TypeVar("_T") + + +def make_snapshot_request( + func: Callable[[OpenAI], _T], + *, + content_snapshot: Any, + respx_mock: MockRouter, + mock_client: OpenAI, + path: str, +) -> _T: + live = os.environ.get("OPENAI_LIVE") == "1" + if live: + + def _on_response(response: httpx.Response) -> None: + # update the content snapshot + assert json.dumps(json.loads(response.read())) == content_snapshot + + respx_mock.stop() + + client = OpenAI( + http_client=httpx.Client( + event_hooks={ + "response": [_on_response], + } + ) + ) + else: + respx_mock.post(path).mock( + return_value=httpx.Response( + 200, + content=get_snapshot_value(content_snapshot), + headers={"content-type": "application/json"}, + ) + ) + + client = mock_client + + result = func(client) + + if live: + client.close() + + return result + + +async def make_async_snapshot_request( + func: Callable[[AsyncOpenAI], Awaitable[_T]], + *, + content_snapshot: Any, + respx_mock: MockRouter, + mock_client: AsyncOpenAI, + path: str, +) -> _T: + live = os.environ.get("OPENAI_LIVE") == "1" + if live: + + async def _on_response(response: httpx.Response) -> None: + # update the content snapshot + assert json.dumps(json.loads(await response.aread())) == content_snapshot + + respx_mock.stop() + + client = AsyncOpenAI( + http_client=httpx.AsyncClient( + event_hooks={ + "response": [_on_response], + } + ) + ) + else: + respx_mock.post(path).mock( + return_value=httpx.Response( + 200, + content=get_snapshot_value(content_snapshot), + headers={"content-type": "application/json"}, + ) + ) + + client = mock_client + + result = await func(client) + + if live: + await client.close() + + return result diff --git a/tests/lib/chat/_utils.py b/tests/lib/utils.py similarity index 98% rename from tests/lib/chat/_utils.py rename to tests/lib/utils.py index 0cc1c99952..2129ee811a 100644 --- a/tests/lib/chat/_utils.py +++ b/tests/lib/utils.py @@ -7,7 +7,7 @@ import pytest import pydantic -from ...utils import rich_print_str +from ..utils import rich_print_str ReprArgs: TypeAlias = "Iterable[tuple[str | None, Any]]"