From e1bad015b8a2b98bfee955a24bc931347a58efc1 Mon Sep 17 00:00:00 2001 From: "Johan Stenberg (MSFT)" Date: Wed, 3 Sep 2025 08:52:05 -0700 Subject: [PATCH 1/3] feat(client): support callable api_key (#2588) Co-authored-by: Krista Pratico --- src/openai/_client.py | 47 ++++-- src/openai/lib/azure.py | 24 +-- .../resources/beta/realtime/realtime.py | 2 + src/openai/resources/realtime/realtime.py | 2 + tests/test_client.py | 138 +++++++++++++++++- 5 files changed, 188 insertions(+), 25 deletions(-) diff --git a/src/openai/_client.py b/src/openai/_client.py index fe5ebac42a..2be32fe13f 100644 --- a/src/openai/_client.py +++ b/src/openai/_client.py @@ -3,7 +3,7 @@ from __future__ import annotations import os -from typing import TYPE_CHECKING, Any, Union, Mapping +from typing import TYPE_CHECKING, Any, Union, Mapping, Callable, Awaitable from typing_extensions import Self, override import httpx @@ -25,6 +25,7 @@ get_async_library, ) from ._compat import cached_property +from ._models import FinalRequestOptions from ._version import __version__ from ._streaming import Stream as Stream, AsyncStream as AsyncStream from ._exceptions import OpenAIError, APIStatusError @@ -96,7 +97,7 @@ class OpenAI(SyncAPIClient): def __init__( self, *, - api_key: str | None = None, + api_key: str | None | Callable[[], str] = None, organization: str | None = None, project: str | None = None, webhook_secret: str | None = None, @@ -134,7 +135,12 @@ def __init__( raise OpenAIError( "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable" ) - self.api_key = api_key + if callable(api_key): + self.api_key = "" + self._api_key_provider: Callable[[], str] | None = api_key + else: + self.api_key = api_key + self._api_key_provider = None if organization is None: organization = os.environ.get("OPENAI_ORG_ID") @@ -295,6 +301,15 @@ def with_streaming_response(self) -> OpenAIWithStreamedResponse: def qs(self) -> Querystring: return Querystring(array_format="brackets") + def _refresh_api_key(self) -> None: + if self._api_key_provider: + self.api_key = self._api_key_provider() + + @override + def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions: + self._refresh_api_key() + return super()._prepare_options(options) + @property @override def auth_headers(self) -> dict[str, str]: @@ -318,7 +333,7 @@ def default_headers(self) -> dict[str, str | Omit]: def copy( self, *, - api_key: str | None = None, + api_key: str | Callable[[], str] | None = None, organization: str | None = None, project: str | None = None, webhook_secret: str | None = None, @@ -356,7 +371,7 @@ def copy( http_client = http_client or self._client return self.__class__( - api_key=api_key or self.api_key, + api_key=api_key or self._api_key_provider or self.api_key, organization=organization or self.organization, project=project or self.project, webhook_secret=webhook_secret or self.webhook_secret, @@ -427,7 +442,7 @@ class AsyncOpenAI(AsyncAPIClient): def __init__( self, *, - api_key: str | None = None, + api_key: str | Callable[[], Awaitable[str]] | None = None, organization: str | None = None, project: str | None = None, webhook_secret: str | None = None, @@ -465,7 +480,12 @@ def __init__( raise OpenAIError( "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable" ) - self.api_key = api_key + if callable(api_key): + self.api_key = "" + self._api_key_provider: Callable[[], Awaitable[str]] | None = api_key + else: + self.api_key = api_key + self._api_key_provider = None if organization is None: organization = os.environ.get("OPENAI_ORG_ID") @@ -626,6 +646,15 @@ def with_streaming_response(self) -> AsyncOpenAIWithStreamedResponse: def qs(self) -> Querystring: return Querystring(array_format="brackets") + async def _refresh_api_key(self) -> None: + if self._api_key_provider: + self.api_key = await self._api_key_provider() + + @override + async def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions: + await self._refresh_api_key() + return await super()._prepare_options(options) + @property @override def auth_headers(self) -> dict[str, str]: @@ -649,7 +678,7 @@ def default_headers(self) -> dict[str, str | Omit]: def copy( self, *, - api_key: str | None = None, + api_key: str | Callable[[], Awaitable[str]] | None = None, organization: str | None = None, project: str | None = None, webhook_secret: str | None = None, @@ -687,7 +716,7 @@ def copy( http_client = http_client or self._client return self.__class__( - api_key=api_key or self.api_key, + api_key=api_key or self._api_key_provider or self.api_key, organization=organization or self.organization, project=project or self.project, webhook_secret=webhook_secret or self.webhook_secret, diff --git a/src/openai/lib/azure.py b/src/openai/lib/azure.py index a994e4256c..ad64707261 100644 --- a/src/openai/lib/azure.py +++ b/src/openai/lib/azure.py @@ -94,7 +94,7 @@ def __init__( azure_endpoint: str, azure_deployment: str | None = None, api_version: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], str] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, @@ -114,7 +114,7 @@ def __init__( *, azure_deployment: str | None = None, api_version: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], str] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, @@ -134,7 +134,7 @@ def __init__( *, base_url: str, api_version: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], str] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, @@ -154,7 +154,7 @@ def __init__( api_version: str | None = None, azure_endpoint: str | None = None, azure_deployment: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], str] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, @@ -258,7 +258,7 @@ def __init__( def copy( self, *, - api_key: str | None = None, + api_key: str | Callable[[], str] | None = None, organization: str | None = None, project: str | None = None, webhook_secret: str | None = None, @@ -345,7 +345,7 @@ def _configure_realtime(self, model: str, extra_query: Query) -> tuple[httpx.URL "api-version": self._api_version, "deployment": self._azure_deployment or model, } - if self.api_key != "": + if self.api_key and self.api_key != "": auth_headers = {"api-key": self.api_key} else: token = self._get_azure_ad_token() @@ -372,7 +372,7 @@ def __init__( azure_endpoint: str, azure_deployment: str | None = None, api_version: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], Awaitable[str]] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, @@ -393,7 +393,7 @@ def __init__( *, azure_deployment: str | None = None, api_version: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], Awaitable[str]] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, @@ -414,7 +414,7 @@ def __init__( *, base_url: str, api_version: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], Awaitable[str]] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, @@ -435,7 +435,7 @@ def __init__( azure_endpoint: str | None = None, azure_deployment: str | None = None, api_version: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], Awaitable[str]] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, @@ -539,7 +539,7 @@ def __init__( def copy( self, *, - api_key: str | None = None, + api_key: str | Callable[[], Awaitable[str]] | None = None, organization: str | None = None, project: str | None = None, webhook_secret: str | None = None, @@ -628,7 +628,7 @@ async def _configure_realtime(self, model: str, extra_query: Query) -> tuple[htt "api-version": self._api_version, "deployment": self._azure_deployment or model, } - if self.api_key != "": + if self.api_key and self.api_key != "": auth_headers = {"api-key": self.api_key} else: token = await self._get_azure_ad_token() diff --git a/src/openai/resources/beta/realtime/realtime.py b/src/openai/resources/beta/realtime/realtime.py index 7b99c7f6c4..4fa35963b6 100644 --- a/src/openai/resources/beta/realtime/realtime.py +++ b/src/openai/resources/beta/realtime/realtime.py @@ -358,6 +358,7 @@ async def __aenter__(self) -> AsyncRealtimeConnection: raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc extra_query = self.__extra_query + await self.__client._refresh_api_key() auth_headers = self.__client.auth_headers if is_async_azure_client(self.__client): url, auth_headers = await self.__client._configure_realtime(self.__model, extra_query) @@ -540,6 +541,7 @@ def __enter__(self) -> RealtimeConnection: raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc extra_query = self.__extra_query + self.__client._refresh_api_key() auth_headers = self.__client.auth_headers if is_azure_client(self.__client): url, auth_headers = self.__client._configure_realtime(self.__model, extra_query) diff --git a/src/openai/resources/realtime/realtime.py b/src/openai/resources/realtime/realtime.py index ebdfce86e3..2f5adf6548 100644 --- a/src/openai/resources/realtime/realtime.py +++ b/src/openai/resources/realtime/realtime.py @@ -326,6 +326,7 @@ async def __aenter__(self) -> AsyncRealtimeConnection: raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc extra_query = self.__extra_query + await self.__client._refresh_api_key() auth_headers = self.__client.auth_headers if is_async_azure_client(self.__client): url, auth_headers = await self.__client._configure_realtime(self.__model, extra_query) @@ -507,6 +508,7 @@ def __enter__(self) -> RealtimeConnection: raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc extra_query = self.__extra_query + self.__client._refresh_api_key() auth_headers = self.__client.auth_headers if is_azure_client(self.__client): url, auth_headers = self.__client._configure_realtime(self.__model, extra_query) diff --git a/tests/test_client.py b/tests/test_client.py index ccda50a7f0..e5300e55d7 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -11,7 +11,7 @@ import inspect import subprocess import tracemalloc -from typing import Any, Union, cast +from typing import Any, Union, Protocol, cast from textwrap import dedent from unittest import mock from typing_extensions import Literal @@ -41,6 +41,10 @@ api_key = "My API Key" +class MockRequestCall(Protocol): + request: httpx.Request + + def _get_params(client: BaseClient[Any, Any]) -> dict[str, str]: request = client._build_request(FinalRequestOptions(method="get", url="/foo")) url = httpx.URL(request.url) @@ -337,7 +341,9 @@ def test_default_headers_option(self) -> None: def test_validate_headers(self) -> None: client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) - request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + options = client._prepare_options(FinalRequestOptions(method="get", url="/foo")) + request = client._build_request(options) + assert request.headers.get("Authorization") == f"Bearer {api_key}" with pytest.raises(OpenAIError): @@ -939,6 +945,62 @@ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: assert exc_info.value.response.status_code == 302 assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" + def test_api_key_before_after_refresh_provider(self) -> None: + client = OpenAI(base_url=base_url, api_key=lambda: "test_bearer_token") + + assert client.api_key == "" + assert "Authorization" not in client.auth_headers + + client._refresh_api_key() + + assert client.api_key == "test_bearer_token" + assert client.auth_headers.get("Authorization") == "Bearer test_bearer_token" + + def test_api_key_before_after_refresh_str(self) -> None: + client = OpenAI(base_url=base_url, api_key="test_api_key") + + assert client.auth_headers.get("Authorization") == "Bearer test_api_key" + client._refresh_api_key() + + assert client.auth_headers.get("Authorization") == "Bearer test_api_key" + + @pytest.mark.respx() + def test_api_key_refresh_on_retry(self, respx_mock: MockRouter) -> None: + respx_mock.post(base_url + "/chat/completions").mock( + side_effect=[ + httpx.Response(500, json={"error": "server error"}), + httpx.Response(200, json={"foo": "bar"}), + ] + ) + + counter = 0 + + def token_provider() -> str: + nonlocal counter + + counter += 1 + + if counter == 1: + return "first" + + return "second" + + client = OpenAI(base_url=base_url, api_key=token_provider) + client.chat.completions.create(messages=[], model="gpt-4") + + calls = cast("list[MockRequestCall]", respx_mock.calls) + assert len(calls) == 2 + + assert calls[0].request.headers.get("Authorization") == "Bearer first" + assert calls[1].request.headers.get("Authorization") == "Bearer second" + + def test_copy_auth(self) -> None: + client = OpenAI(base_url=base_url, api_key=lambda: "test_bearer_token_1").copy( + api_key=lambda: "test_bearer_token_2" + ) + client._refresh_api_key() + assert client.auth_headers == {"Authorization": "Bearer test_bearer_token_2"} + class TestAsyncOpenAI: client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) @@ -1220,9 +1282,10 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-foo") == "stainless" assert request.headers.get("x-stainless-lang") == "my-overriding-header" - def test_validate_headers(self) -> None: + async def test_validate_headers(self) -> None: client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) - request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + options = await client._prepare_options(FinalRequestOptions(method="get", url="/foo")) + request = client._build_request(options) assert request.headers.get("Authorization") == f"Bearer {api_key}" with pytest.raises(OpenAIError): @@ -1887,3 +1950,70 @@ async def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: assert exc_info.value.response.status_code == 302 assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" + + @pytest.mark.asyncio + async def test_api_key_before_after_refresh_provider(self) -> None: + async def mock_api_key_provider(): + return "test_bearer_token" + + client = AsyncOpenAI(base_url=base_url, api_key=mock_api_key_provider) + + assert client.api_key == "" + assert "Authorization" not in client.auth_headers + + await client._refresh_api_key() + + assert client.api_key == "test_bearer_token" + assert client.auth_headers.get("Authorization") == "Bearer test_bearer_token" + + @pytest.mark.asyncio + async def test_api_key_before_after_refresh_str(self) -> None: + client = AsyncOpenAI(base_url=base_url, api_key="test_api_key") + + assert client.auth_headers.get("Authorization") == "Bearer test_api_key" + await client._refresh_api_key() + + assert client.auth_headers.get("Authorization") == "Bearer test_api_key" + + @pytest.mark.asyncio + @pytest.mark.respx() + async def test_bearer_token_refresh_async(self, respx_mock: MockRouter) -> None: + respx_mock.post(base_url + "/chat/completions").mock( + side_effect=[ + httpx.Response(500, json={"error": "server error"}), + httpx.Response(200, json={"foo": "bar"}), + ] + ) + + counter = 0 + + async def token_provider() -> str: + nonlocal counter + + counter += 1 + + if counter == 1: + return "first" + + return "second" + + client = AsyncOpenAI(base_url=base_url, api_key=token_provider) + await client.chat.completions.create(messages=[], model="gpt-4") + + calls = cast("list[MockRequestCall]", respx_mock.calls) + assert len(calls) == 2 + + assert calls[0].request.headers.get("Authorization") == "Bearer first" + assert calls[1].request.headers.get("Authorization") == "Bearer second" + + @pytest.mark.asyncio + async def test_copy_auth(self) -> None: + async def token_provider_1() -> str: + return "test_bearer_token_1" + + async def token_provider_2() -> str: + return "test_bearer_token_2" + + client = AsyncOpenAI(base_url=base_url, api_key=token_provider_1).copy(api_key=token_provider_2) + await client._refresh_api_key() + assert client.auth_headers == {"Authorization": "Bearer test_bearer_token_2"} From 6645d9317a240982928b92c2f4af0381db6edc09 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 3 Sep 2025 22:05:40 +0000 Subject: [PATCH 2/3] feat: improve future compat with pydantic v3 --- src/openai/_base_client.py | 6 +- src/openai/_compat.py | 108 +++++++++---------- src/openai/_models.py | 82 +++++++------- src/openai/_utils/__init__.py | 10 +- src/openai/_utils/_compat.py | 45 ++++++++ src/openai/_utils/_datetime_parse.py | 136 ++++++++++++++++++++++++ src/openai/_utils/_transform.py | 6 +- src/openai/_utils/_typing.py | 2 +- src/openai/_utils/_utils.py | 1 - src/openai/cli/_cli.py | 12 +-- src/openai/cli/_models.py | 8 +- src/openai/lib/_parsing/_completions.py | 4 +- src/openai/lib/_parsing/_responses.py | 4 +- src/openai/lib/_pydantic.py | 4 +- tests/lib/chat/test_completions.py | 6 +- tests/lib/test_pydantic.py | 8 +- tests/test_models.py | 48 ++++----- tests/test_transform.py | 16 +-- tests/test_utils/test_datetime_parse.py | 110 +++++++++++++++++++ tests/utils.py | 8 +- 20 files changed, 462 insertions(+), 162 deletions(-) create mode 100644 src/openai/_utils/_compat.py create mode 100644 src/openai/_utils/_datetime_parse.py create mode 100644 tests/test_utils/test_datetime_parse.py diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index f71e00f51f..d5f1ab0903 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -59,7 +59,7 @@ ModelBuilderProtocol, ) from ._utils import SensitiveHeadersFilter, is_dict, is_list, asyncify, is_given, lru_cache, is_mapping -from ._compat import PYDANTIC_V2, model_copy, model_dump +from ._compat import PYDANTIC_V1, model_copy, model_dump from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type from ._response import ( APIResponse, @@ -234,7 +234,7 @@ def _set_private_attributes( model: Type[_T], options: FinalRequestOptions, ) -> None: - if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None: + if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None: self.__pydantic_private__ = {} self._model = model @@ -322,7 +322,7 @@ def _set_private_attributes( client: AsyncAPIClient, options: FinalRequestOptions, ) -> None: - if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None: + if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None: self.__pydantic_private__ = {} self._model = model diff --git a/src/openai/_compat.py b/src/openai/_compat.py index 87fc370765..73a1f3ea93 100644 --- a/src/openai/_compat.py +++ b/src/openai/_compat.py @@ -12,14 +12,13 @@ _T = TypeVar("_T") _ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel) -# --------------- Pydantic v2 compatibility --------------- +# --------------- Pydantic v2, v3 compatibility --------------- # Pyright incorrectly reports some of our functions as overriding a method when they don't # pyright: reportIncompatibleMethodOverride=false -PYDANTIC_V2 = pydantic.VERSION.startswith("2.") +PYDANTIC_V1 = pydantic.VERSION.startswith("1.") -# v1 re-exports if TYPE_CHECKING: def parse_date(value: date | StrBytesIntFloat) -> date: # noqa: ARG001 @@ -44,90 +43,92 @@ def is_typeddict(type_: type[Any]) -> bool: # noqa: ARG001 ... else: - if PYDANTIC_V2: - from pydantic.v1.typing import ( + # v1 re-exports + if PYDANTIC_V1: + from pydantic.typing import ( get_args as get_args, is_union as is_union, get_origin as get_origin, is_typeddict as is_typeddict, is_literal_type as is_literal_type, ) - from pydantic.v1.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime + from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime else: - from pydantic.typing import ( + from ._utils import ( get_args as get_args, is_union as is_union, get_origin as get_origin, + parse_date as parse_date, is_typeddict as is_typeddict, + parse_datetime as parse_datetime, is_literal_type as is_literal_type, ) - from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime # refactored config if TYPE_CHECKING: from pydantic import ConfigDict as ConfigDict else: - if PYDANTIC_V2: - from pydantic import ConfigDict - else: + if PYDANTIC_V1: # TODO: provide an error message here? ConfigDict = None + else: + from pydantic import ConfigDict as ConfigDict # renamed methods / properties def parse_obj(model: type[_ModelT], value: object) -> _ModelT: - if PYDANTIC_V2: - return model.model_validate(value) - else: + if PYDANTIC_V1: return cast(_ModelT, model.parse_obj(value)) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + else: + return model.model_validate(value) def field_is_required(field: FieldInfo) -> bool: - if PYDANTIC_V2: - return field.is_required() - return field.required # type: ignore + if PYDANTIC_V1: + return field.required # type: ignore + return field.is_required() def field_get_default(field: FieldInfo) -> Any: value = field.get_default() - if PYDANTIC_V2: - from pydantic_core import PydanticUndefined - - if value == PydanticUndefined: - return None + if PYDANTIC_V1: return value + from pydantic_core import PydanticUndefined + + if value == PydanticUndefined: + return None return value def field_outer_type(field: FieldInfo) -> Any: - if PYDANTIC_V2: - return field.annotation - return field.outer_type_ # type: ignore + if PYDANTIC_V1: + return field.outer_type_ # type: ignore + return field.annotation def get_model_config(model: type[pydantic.BaseModel]) -> Any: - if PYDANTIC_V2: - return model.model_config - return model.__config__ # type: ignore + if PYDANTIC_V1: + return model.__config__ # type: ignore + return model.model_config def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]: - if PYDANTIC_V2: - return model.model_fields - return model.__fields__ # type: ignore + if PYDANTIC_V1: + return model.__fields__ # type: ignore + return model.model_fields def model_copy(model: _ModelT, *, deep: bool = False) -> _ModelT: - if PYDANTIC_V2: - return model.model_copy(deep=deep) - return model.copy(deep=deep) # type: ignore + if PYDANTIC_V1: + return model.copy(deep=deep) # type: ignore + return model.model_copy(deep=deep) def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str: - if PYDANTIC_V2: - return model.model_dump_json(indent=indent) - return model.json(indent=indent) # type: ignore + if PYDANTIC_V1: + return model.json(indent=indent) # type: ignore + return model.model_dump_json(indent=indent) def model_dump( @@ -139,14 +140,14 @@ def model_dump( warnings: bool = True, mode: Literal["json", "python"] = "python", ) -> dict[str, Any]: - if PYDANTIC_V2 or hasattr(model, "model_dump"): + if (not PYDANTIC_V1) or hasattr(model, "model_dump"): return model.model_dump( mode=mode, exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, # warnings are not supported in Pydantic v1 - warnings=warnings if PYDANTIC_V2 else True, + warnings=True if PYDANTIC_V1 else warnings, ) return cast( "dict[str, Any]", @@ -159,21 +160,21 @@ def model_dump( def model_parse(model: type[_ModelT], data: Any) -> _ModelT: - if PYDANTIC_V2: - return model.model_validate(data) - return model.parse_obj(data) # pyright: ignore[reportDeprecated] + if PYDANTIC_V1: + return model.parse_obj(data) # pyright: ignore[reportDeprecated] + return model.model_validate(data) def model_parse_json(model: type[_ModelT], data: str | bytes) -> _ModelT: - if PYDANTIC_V2: - return model.model_validate_json(data) - return model.parse_raw(data) # pyright: ignore[reportDeprecated] + if PYDANTIC_V1: + return model.parse_raw(data) # pyright: ignore[reportDeprecated] + return model.model_validate_json(data) def model_json_schema(model: type[_ModelT]) -> dict[str, Any]: - if PYDANTIC_V2: - return model.model_json_schema() - return model.schema() # pyright: ignore[reportDeprecated] + if PYDANTIC_V1: + return model.schema() # pyright: ignore[reportDeprecated] + return model.model_json_schema() # generic models @@ -182,17 +183,16 @@ def model_json_schema(model: type[_ModelT]) -> dict[str, Any]: class GenericModel(pydantic.BaseModel): ... else: - if PYDANTIC_V2: + if PYDANTIC_V1: + import pydantic.generics + + class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ... + else: # there no longer needs to be a distinction in v2 but # we still have to create our own subclass to avoid # inconsistent MRO ordering errors class GenericModel(pydantic.BaseModel): ... - else: - import pydantic.generics - - class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ... - # cached properties if TYPE_CHECKING: diff --git a/src/openai/_models.py b/src/openai/_models.py index 50eb0af751..8ee8612d1e 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -51,7 +51,7 @@ strip_annotated_type, ) from ._compat import ( - PYDANTIC_V2, + PYDANTIC_V1, ConfigDict, GenericModel as BaseGenericModel, get_args, @@ -84,11 +84,7 @@ class _ConfigProtocol(Protocol): class BaseModel(pydantic.BaseModel): - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict( - extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true")) - ) - else: + if PYDANTIC_V1: @property @override @@ -103,6 +99,10 @@ class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] def __repr_args__(self) -> ReprArgs: # we don't want these attributes to be included when something like `rich.print` is used return [arg for arg in super().__repr_args__() if arg[0] not in {"_request_id", "__exclude_fields__"}] + else: + model_config: ClassVar[ConfigDict] = ConfigDict( + extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true")) + ) if TYPE_CHECKING: _request_id: Optional[str] = None @@ -240,25 +240,25 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride] if key not in model_fields: parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value - if PYDANTIC_V2: - _extra[key] = parsed - else: + if PYDANTIC_V1: _fields_set.add(key) fields_values[key] = parsed + else: + _extra[key] = parsed object.__setattr__(m, "__dict__", fields_values) - if PYDANTIC_V2: - # these properties are copied from Pydantic's `model_construct()` method - object.__setattr__(m, "__pydantic_private__", None) - object.__setattr__(m, "__pydantic_extra__", _extra) - object.__setattr__(m, "__pydantic_fields_set__", _fields_set) - else: + if PYDANTIC_V1: # init_private_attributes() does not exist in v2 m._init_private_attributes() # type: ignore # copied from Pydantic v1's `construct()` method object.__setattr__(m, "__fields_set__", _fields_set) + else: + # these properties are copied from Pydantic's `model_construct()` method + object.__setattr__(m, "__pydantic_private__", None) + object.__setattr__(m, "__pydantic_extra__", _extra) + object.__setattr__(m, "__pydantic_fields_set__", _fields_set) return m @@ -268,7 +268,7 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride] # although not in practice model_construct = construct - if not PYDANTIC_V2: + if PYDANTIC_V1: # we define aliases for some of the new pydantic v2 methods so # that we can just document these methods without having to specify # a specific pydantic version as some users may not know which @@ -388,10 +388,10 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object: if value is None: return field_get_default(field) - if PYDANTIC_V2: - type_ = field.annotation - else: + if PYDANTIC_V1: type_ = cast(type, field.outer_type_) # type: ignore + else: + type_ = field.annotation # type: ignore if type_ is None: raise RuntimeError(f"Unexpected field type is None for {key}") @@ -400,7 +400,7 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object: def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None: - if not PYDANTIC_V2: + if PYDANTIC_V1: # TODO return None @@ -653,30 +653,30 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, for variant in get_args(union): variant = strip_annotated_type(variant) if is_basemodel_type(variant): - if PYDANTIC_V2: - field = _extract_field_schema_pv2(variant, discriminator_field_name) - if not field: + if PYDANTIC_V1: + field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + if not field_info: continue # Note: if one variant defines an alias then they all should - discriminator_alias = field.get("serialization_alias") - - field_schema = field["schema"] + discriminator_alias = field_info.alias - if field_schema["type"] == "literal": - for entry in cast("LiteralSchema", field_schema)["expected"]: + if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation): + for entry in get_args(annotation): if isinstance(entry, str): mapping[entry] = variant else: - field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] - if not field_info: + field = _extract_field_schema_pv2(variant, discriminator_field_name) + if not field: continue # Note: if one variant defines an alias then they all should - discriminator_alias = field_info.alias + discriminator_alias = field.get("serialization_alias") - if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation): - for entry in get_args(annotation): + field_schema = field["schema"] + + if field_schema["type"] == "literal": + for entry in cast("LiteralSchema", field_schema)["expected"]: if isinstance(entry, str): mapping[entry] = variant @@ -735,7 +735,7 @@ def add_request_id(obj: BaseModel, request_id: str | None) -> None: # in Pydantic v1, using setattr like we do above causes the attribute # to be included when serializing the model which we don't want in this # case so we need to explicitly exclude it - if not PYDANTIC_V2: + if PYDANTIC_V1: try: exclude_fields = obj.__exclude_fields__ # type: ignore except AttributeError: @@ -754,7 +754,7 @@ class GenericModel(BaseGenericModel, BaseModel): pass -if PYDANTIC_V2: +if not PYDANTIC_V1: from pydantic import TypeAdapter as _TypeAdapter _CachedTypeAdapter = cast("TypeAdapter[object]", lru_cache(maxsize=None)(_TypeAdapter)) @@ -822,12 +822,12 @@ class FinalRequestOptions(pydantic.BaseModel): json_data: Union[Body, None] = None extra_json: Union[AnyMapping, None] = None - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True) - else: + if PYDANTIC_V1: class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] arbitrary_types_allowed: bool = True + else: + model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True) def get_max_retries(self, max_retries: int) -> int: if isinstance(self.max_retries, NotGiven): @@ -860,9 +860,9 @@ def construct( # type: ignore key: strip_not_given(value) for key, value in values.items() } - if PYDANTIC_V2: - return super().model_construct(_fields_set, **kwargs) - return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated] + if PYDANTIC_V1: + return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated] + return super().model_construct(_fields_set, **kwargs) if not TYPE_CHECKING: # type checkers incorrectly complain about this assignment diff --git a/src/openai/_utils/__init__.py b/src/openai/_utils/__init__.py index 6471aa4c0d..963c83b6d4 100644 --- a/src/openai/_utils/__init__.py +++ b/src/openai/_utils/__init__.py @@ -11,7 +11,6 @@ lru_cache as lru_cache, is_mapping as is_mapping, is_tuple_t as is_tuple_t, - parse_date as parse_date, is_iterable as is_iterable, is_sequence as is_sequence, coerce_float as coerce_float, @@ -24,7 +23,6 @@ coerce_boolean as coerce_boolean, coerce_integer as coerce_integer, file_from_path as file_from_path, - parse_datetime as parse_datetime, is_azure_client as is_azure_client, strip_not_given as strip_not_given, deepcopy_minimal as deepcopy_minimal, @@ -35,6 +33,13 @@ maybe_coerce_integer as maybe_coerce_integer, is_async_azure_client as is_async_azure_client, ) +from ._compat import ( + get_args as get_args, + is_union as is_union, + get_origin as get_origin, + is_typeddict as is_typeddict, + is_literal_type as is_literal_type, +) from ._typing import ( is_list_type as is_list_type, is_union_type as is_union_type, @@ -59,3 +64,4 @@ function_has_argument as function_has_argument, assert_signatures_in_sync as assert_signatures_in_sync, ) +from ._datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime diff --git a/src/openai/_utils/_compat.py b/src/openai/_utils/_compat.py new file mode 100644 index 0000000000..dd703233c5 --- /dev/null +++ b/src/openai/_utils/_compat.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +import sys +import typing_extensions +from typing import Any, Type, Union, Literal, Optional +from datetime import date, datetime +from typing_extensions import get_args as _get_args, get_origin as _get_origin + +from .._types import StrBytesIntFloat +from ._datetime_parse import parse_date as _parse_date, parse_datetime as _parse_datetime + +_LITERAL_TYPES = {Literal, typing_extensions.Literal} + + +def get_args(tp: type[Any]) -> tuple[Any, ...]: + return _get_args(tp) + + +def get_origin(tp: type[Any]) -> type[Any] | None: + return _get_origin(tp) + + +def is_union(tp: Optional[Type[Any]]) -> bool: + if sys.version_info < (3, 10): + return tp is Union # type: ignore[comparison-overlap] + else: + import types + + return tp is Union or tp is types.UnionType + + +def is_typeddict(tp: Type[Any]) -> bool: + return typing_extensions.is_typeddict(tp) + + +def is_literal_type(tp: Type[Any]) -> bool: + return get_origin(tp) in _LITERAL_TYPES + + +def parse_date(value: Union[date, StrBytesIntFloat]) -> date: + return _parse_date(value) + + +def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: + return _parse_datetime(value) diff --git a/src/openai/_utils/_datetime_parse.py b/src/openai/_utils/_datetime_parse.py new file mode 100644 index 0000000000..7cb9d9e668 --- /dev/null +++ b/src/openai/_utils/_datetime_parse.py @@ -0,0 +1,136 @@ +""" +This file contains code from https://github.com/pydantic/pydantic/blob/main/pydantic/v1/datetime_parse.py +without the Pydantic v1 specific errors. +""" + +from __future__ import annotations + +import re +from typing import Dict, Union, Optional +from datetime import date, datetime, timezone, timedelta + +from .._types import StrBytesIntFloat + +date_expr = r"(?P\d{4})-(?P\d{1,2})-(?P\d{1,2})" +time_expr = ( + r"(?P\d{1,2}):(?P\d{1,2})" + r"(?::(?P\d{1,2})(?:\.(?P\d{1,6})\d{0,6})?)?" + r"(?PZ|[+-]\d{2}(?::?\d{2})?)?$" +) + +date_re = re.compile(f"{date_expr}$") +datetime_re = re.compile(f"{date_expr}[T ]{time_expr}") + + +EPOCH = datetime(1970, 1, 1) +# if greater than this, the number is in ms, if less than or equal it's in seconds +# (in seconds this is 11th October 2603, in ms it's 20th August 1970) +MS_WATERSHED = int(2e10) +# slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9 +MAX_NUMBER = int(3e20) + + +def _get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]: + if isinstance(value, (int, float)): + return value + try: + return float(value) + except ValueError: + return None + except TypeError: + raise TypeError(f"invalid type; expected {native_expected_type}, string, bytes, int or float") from None + + +def _from_unix_seconds(seconds: Union[int, float]) -> datetime: + if seconds > MAX_NUMBER: + return datetime.max + elif seconds < -MAX_NUMBER: + return datetime.min + + while abs(seconds) > MS_WATERSHED: + seconds /= 1000 + dt = EPOCH + timedelta(seconds=seconds) + return dt.replace(tzinfo=timezone.utc) + + +def _parse_timezone(value: Optional[str]) -> Union[None, int, timezone]: + if value == "Z": + return timezone.utc + elif value is not None: + offset_mins = int(value[-2:]) if len(value) > 3 else 0 + offset = 60 * int(value[1:3]) + offset_mins + if value[0] == "-": + offset = -offset + return timezone(timedelta(minutes=offset)) + else: + return None + + +def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: + """ + Parse a datetime/int/float/string and return a datetime.datetime. + + This function supports time zone offsets. When the input contains one, + the output uses a timezone with a fixed offset from UTC. + + Raise ValueError if the input is well formatted but not a valid datetime. + Raise ValueError if the input isn't well formatted. + """ + if isinstance(value, datetime): + return value + + number = _get_numeric(value, "datetime") + if number is not None: + return _from_unix_seconds(number) + + if isinstance(value, bytes): + value = value.decode() + + assert not isinstance(value, (float, int)) + + match = datetime_re.match(value) + if match is None: + raise ValueError("invalid datetime format") + + kw = match.groupdict() + if kw["microsecond"]: + kw["microsecond"] = kw["microsecond"].ljust(6, "0") + + tzinfo = _parse_timezone(kw.pop("tzinfo")) + kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None} + kw_["tzinfo"] = tzinfo + + return datetime(**kw_) # type: ignore + + +def parse_date(value: Union[date, StrBytesIntFloat]) -> date: + """ + Parse a date/int/float/string and return a datetime.date. + + Raise ValueError if the input is well formatted but not a valid date. + Raise ValueError if the input isn't well formatted. + """ + if isinstance(value, date): + if isinstance(value, datetime): + return value.date() + else: + return value + + number = _get_numeric(value, "date") + if number is not None: + return _from_unix_seconds(number).date() + + if isinstance(value, bytes): + value = value.decode() + + assert not isinstance(value, (float, int)) + match = date_re.match(value) + if match is None: + raise ValueError("invalid date format") + + kw = {k: int(v) for k, v in match.groupdict().items()} + + try: + return date(**kw) + except ValueError: + raise ValueError("invalid date format") from None diff --git a/src/openai/_utils/_transform.py b/src/openai/_utils/_transform.py index f5c41c09c4..bc262ea339 100644 --- a/src/openai/_utils/_transform.py +++ b/src/openai/_utils/_transform.py @@ -19,6 +19,7 @@ is_sequence, ) from .._files import is_base64_file_input +from ._compat import get_origin, is_typeddict from ._typing import ( is_list_type, is_union_type, @@ -29,7 +30,6 @@ is_annotated_type, strip_annotated_type, ) -from .._compat import get_origin, model_dump, is_typeddict _T = TypeVar("_T") @@ -169,6 +169,8 @@ def _transform_recursive( Defaults to the same value as the `annotation` argument. """ + from .._compat import model_dump + if inner_type is None: inner_type = annotation @@ -333,6 +335,8 @@ async def _async_transform_recursive( Defaults to the same value as the `annotation` argument. """ + from .._compat import model_dump + if inner_type is None: inner_type = annotation diff --git a/src/openai/_utils/_typing.py b/src/openai/_utils/_typing.py index 845cd6b287..193109f3ad 100644 --- a/src/openai/_utils/_typing.py +++ b/src/openai/_utils/_typing.py @@ -15,7 +15,7 @@ from ._utils import lru_cache from .._types import InheritsGeneric -from .._compat import is_union as _is_union +from ._compat import is_union as _is_union def is_annotated_type(typ: type) -> bool: diff --git a/src/openai/_utils/_utils.py b/src/openai/_utils/_utils.py index 1e7d013b51..4a23c96c0a 100644 --- a/src/openai/_utils/_utils.py +++ b/src/openai/_utils/_utils.py @@ -23,7 +23,6 @@ import sniffio from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike -from .._compat import parse_date as parse_date, parse_datetime as parse_datetime _T = TypeVar("_T") _TupleT = TypeVar("_TupleT", bound=Tuple[object, ...]) diff --git a/src/openai/cli/_cli.py b/src/openai/cli/_cli.py index fd165f48ab..d31196da50 100644 --- a/src/openai/cli/_cli.py +++ b/src/openai/cli/_cli.py @@ -16,7 +16,7 @@ from ._api import register_commands from ._utils import can_use_http2 from ._errors import CLIError, display_error -from .._compat import PYDANTIC_V2, ConfigDict, model_parse +from .._compat import PYDANTIC_V1, ConfigDict, model_parse from .._models import BaseModel from .._exceptions import APIError @@ -28,14 +28,14 @@ class Arguments(BaseModel): - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict( - extra="ignore", - ) - else: + if PYDANTIC_V1: class Config(pydantic.BaseConfig): # type: ignore extra: Any = pydantic.Extra.ignore # type: ignore + else: + model_config: ClassVar[ConfigDict] = ConfigDict( + extra="ignore", + ) verbosity: int version: Optional[str] = None diff --git a/src/openai/cli/_models.py b/src/openai/cli/_models.py index 5583db2609..a88608961b 100644 --- a/src/openai/cli/_models.py +++ b/src/openai/cli/_models.py @@ -4,14 +4,14 @@ import pydantic from .. import _models -from .._compat import PYDANTIC_V2, ConfigDict +from .._compat import PYDANTIC_V1, ConfigDict class BaseModel(_models.BaseModel): - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore", arbitrary_types_allowed=True) - else: + if PYDANTIC_V1: class Config(pydantic.BaseConfig): # type: ignore extra: Any = pydantic.Extra.ignore # type: ignore arbitrary_types_allowed: bool = True + else: + model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore", arbitrary_types_allowed=True) diff --git a/src/openai/lib/_parsing/_completions.py b/src/openai/lib/_parsing/_completions.py index fc0bd05e4d..4b8b78b70a 100644 --- a/src/openai/lib/_parsing/_completions.py +++ b/src/openai/lib/_parsing/_completions.py @@ -10,7 +10,7 @@ from .._tools import PydanticFunctionTool from ..._types import NOT_GIVEN, NotGiven from ..._utils import is_dict, is_given -from ..._compat import PYDANTIC_V2, model_parse_json +from ..._compat import PYDANTIC_V1, model_parse_json from ..._models import construct_type_unchecked from .._pydantic import is_basemodel_type, to_strict_json_schema, is_dataclass_like_type from ...types.chat import ( @@ -262,7 +262,7 @@ def _parse_content(response_format: type[ResponseFormatT], content: str) -> Resp return cast(ResponseFormatT, model_parse_json(response_format, content)) if is_dataclass_like_type(response_format): - if not PYDANTIC_V2: + if PYDANTIC_V1: raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {response_format}") return pydantic.TypeAdapter(response_format).validate_json(content) diff --git a/src/openai/lib/_parsing/_responses.py b/src/openai/lib/_parsing/_responses.py index 2a30ac836c..b6ebde0e8e 100644 --- a/src/openai/lib/_parsing/_responses.py +++ b/src/openai/lib/_parsing/_responses.py @@ -9,7 +9,7 @@ from .._tools import ResponsesPydanticFunctionTool from ..._types import NotGiven from ..._utils import is_given -from ..._compat import PYDANTIC_V2, model_parse_json +from ..._compat import PYDANTIC_V1, model_parse_json from ..._models import construct_type_unchecked from .._pydantic import is_basemodel_type, is_dataclass_like_type from ._completions import solve_response_format_t, type_to_response_format_param @@ -138,7 +138,7 @@ def parse_text(text: str, text_format: type[TextFormatT] | NotGiven) -> TextForm return cast(TextFormatT, model_parse_json(text_format, text)) if is_dataclass_like_type(text_format): - if not PYDANTIC_V2: + if PYDANTIC_V1: raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {text_format}") return pydantic.TypeAdapter(text_format).validate_json(text) diff --git a/src/openai/lib/_pydantic.py b/src/openai/lib/_pydantic.py index c2d73e5fc6..3cfe224cb1 100644 --- a/src/openai/lib/_pydantic.py +++ b/src/openai/lib/_pydantic.py @@ -8,7 +8,7 @@ from .._types import NOT_GIVEN from .._utils import is_dict as _is_dict, is_list -from .._compat import PYDANTIC_V2, model_json_schema +from .._compat import PYDANTIC_V1, model_json_schema _T = TypeVar("_T") @@ -16,7 +16,7 @@ def to_strict_json_schema(model: type[pydantic.BaseModel] | pydantic.TypeAdapter[Any]) -> dict[str, Any]: if inspect.isclass(model) and is_basemodel_type(model): schema = model_json_schema(model) - elif PYDANTIC_V2 and isinstance(model, pydantic.TypeAdapter): + elif (not PYDANTIC_V1) and isinstance(model, pydantic.TypeAdapter): schema = model.json_schema() else: raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {model}") diff --git a/tests/lib/chat/test_completions.py b/tests/lib/chat/test_completions.py index f69bc09ca3..afad5a1391 100644 --- a/tests/lib/chat/test_completions.py +++ b/tests/lib/chat/test_completions.py @@ -12,7 +12,7 @@ import openai from openai import OpenAI, AsyncOpenAI from openai._utils import assert_signatures_in_sync -from openai._compat import PYDANTIC_V2 +from openai._compat import PYDANTIC_V1 from ..utils import print_obj from ...conftest import base_url @@ -245,7 +245,7 @@ class ColorDetection(BaseModel): color: Color hex_color_code: str = Field(description="The hex color code of the detected color") - if not PYDANTIC_V2: + if PYDANTIC_V1: ColorDetection.update_forward_refs(**locals()) # type: ignore completion = make_snapshot_request( @@ -368,7 +368,7 @@ class Location(BaseModel): @pytest.mark.respx(base_url=base_url) -@pytest.mark.skipif(not PYDANTIC_V2, reason="dataclasses only supported in v2") +@pytest.mark.skipif(PYDANTIC_V1, reason="dataclasses only supported in v2") def test_parse_pydantic_dataclass(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None: from pydantic.dataclasses import dataclass diff --git a/tests/lib/test_pydantic.py b/tests/lib/test_pydantic.py index 7e128b70c0..754a15151c 100644 --- a/tests/lib/test_pydantic.py +++ b/tests/lib/test_pydantic.py @@ -6,14 +6,14 @@ from inline_snapshot import snapshot import openai -from openai._compat import PYDANTIC_V2 +from openai._compat import PYDANTIC_V1 from openai.lib._pydantic import to_strict_json_schema from .schema_types.query import Query def test_most_types() -> None: - if PYDANTIC_V2: + if not PYDANTIC_V1: assert openai.pydantic_function_tool(Query)["function"] == snapshot( { "name": "Query", @@ -181,7 +181,7 @@ class ColorDetection(BaseModel): def test_enums() -> None: - if PYDANTIC_V2: + if not PYDANTIC_V1: assert openai.pydantic_function_tool(ColorDetection)["function"] == snapshot( { "name": "ColorDetection", @@ -253,7 +253,7 @@ class Universe(BaseModel): def test_nested_inline_ref_expansion() -> None: - if PYDANTIC_V2: + if not PYDANTIC_V1: assert to_strict_json_schema(Universe) == snapshot( { "title": "Universe", diff --git a/tests/test_models.py b/tests/test_models.py index 54a3a32048..410ec3bf4e 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -8,7 +8,7 @@ from pydantic import Field from openai._utils import PropertyInfo -from openai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json +from openai._compat import PYDANTIC_V1, parse_obj, model_dump, model_json from openai._models import BaseModel, construct_type @@ -294,12 +294,12 @@ class Model(BaseModel): assert cast(bool, m.foo) is True m = Model.construct(foo={"name": 3}) - if PYDANTIC_V2: - assert isinstance(m.foo, Submodel1) - assert m.foo.name == 3 # type: ignore - else: + if PYDANTIC_V1: assert isinstance(m.foo, Submodel2) assert m.foo.name == "3" + else: + assert isinstance(m.foo, Submodel1) + assert m.foo.name == 3 # type: ignore def test_list_of_unions() -> None: @@ -426,10 +426,10 @@ class Model(BaseModel): expected = datetime(2019, 12, 27, 18, 11, 19, 117000, tzinfo=timezone.utc) - if PYDANTIC_V2: - expected_json = '{"created_at":"2019-12-27T18:11:19.117000Z"}' - else: + if PYDANTIC_V1: expected_json = '{"created_at": "2019-12-27T18:11:19.117000+00:00"}' + else: + expected_json = '{"created_at":"2019-12-27T18:11:19.117000Z"}' model = Model.construct(created_at="2019-12-27T18:11:19.117Z") assert model.created_at == expected @@ -531,7 +531,7 @@ class Model2(BaseModel): assert m4.to_dict(mode="python") == {"created_at": datetime.fromisoformat(time_str)} assert m4.to_dict(mode="json") == {"created_at": time_str} - if not PYDANTIC_V2: + if PYDANTIC_V1: with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): m.to_dict(warnings=False) @@ -556,7 +556,7 @@ class Model(BaseModel): assert m3.model_dump() == {"foo": None} assert m3.model_dump(exclude_none=True) == {} - if not PYDANTIC_V2: + if PYDANTIC_V1: with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"): m.model_dump(round_trip=True) @@ -580,10 +580,10 @@ class Model(BaseModel): assert json.loads(m.to_json()) == {"FOO": "hello"} assert json.loads(m.to_json(use_api_names=False)) == {"foo": "hello"} - if PYDANTIC_V2: - assert m.to_json(indent=None) == '{"FOO":"hello"}' - else: + if PYDANTIC_V1: assert m.to_json(indent=None) == '{"FOO": "hello"}' + else: + assert m.to_json(indent=None) == '{"FOO":"hello"}' m2 = Model() assert json.loads(m2.to_json()) == {} @@ -595,7 +595,7 @@ class Model(BaseModel): assert json.loads(m3.to_json()) == {"FOO": None} assert json.loads(m3.to_json(exclude_none=True)) == {} - if not PYDANTIC_V2: + if PYDANTIC_V1: with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): m.to_json(warnings=False) @@ -622,7 +622,7 @@ class Model(BaseModel): assert json.loads(m3.model_dump_json()) == {"foo": None} assert json.loads(m3.model_dump_json(exclude_none=True)) == {} - if not PYDANTIC_V2: + if PYDANTIC_V1: with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"): m.model_dump_json(round_trip=True) @@ -679,12 +679,12 @@ class B(BaseModel): ) assert isinstance(m, A) assert m.type == "a" - if PYDANTIC_V2: - assert m.data == 100 # type: ignore[comparison-overlap] - else: + if PYDANTIC_V1: # pydantic v1 automatically converts inputs to strings # if the expected type is a str assert m.data == "100" + else: + assert m.data == 100 # type: ignore[comparison-overlap] def test_discriminated_unions_unknown_variant() -> None: @@ -768,12 +768,12 @@ class B(BaseModel): ) assert isinstance(m, A) assert m.foo_type == "a" - if PYDANTIC_V2: - assert m.data == 100 # type: ignore[comparison-overlap] - else: + if PYDANTIC_V1: # pydantic v1 automatically converts inputs to strings # if the expected type is a str assert m.data == "100" + else: + assert m.data == 100 # type: ignore[comparison-overlap] def test_discriminated_unions_overlapping_discriminators_invalid_data() -> None: @@ -833,7 +833,7 @@ class B(BaseModel): assert UnionType.__discriminator__ is discriminator -@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1") +@pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1") def test_type_alias_type() -> None: Alias = TypeAliasType("Alias", str) # pyright: ignore @@ -849,7 +849,7 @@ class Model(BaseModel): assert m.union == "bar" -@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1") +@pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1") def test_field_named_cls() -> None: class Model(BaseModel): cls: str @@ -936,7 +936,7 @@ class Type2(BaseModel): assert isinstance(model.value, InnerType2) -@pytest.mark.skipif(not PYDANTIC_V2, reason="this is only supported in pydantic v2 for now") +@pytest.mark.skipif(PYDANTIC_V1, reason="this is only supported in pydantic v2 for now") def test_extra_properties() -> None: class Item(BaseModel): prop: int diff --git a/tests/test_transform.py b/tests/test_transform.py index 965f65f74f..036cfdfb06 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -15,7 +15,7 @@ parse_datetime, async_transform as _async_transform, ) -from openai._compat import PYDANTIC_V2 +from openai._compat import PYDANTIC_V1 from openai._models import BaseModel _T = TypeVar("_T") @@ -189,7 +189,7 @@ class DateModel(BaseModel): @pytest.mark.asyncio async def test_iso8601_format(use_async: bool) -> None: dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") - tz = "Z" if PYDANTIC_V2 else "+00:00" + tz = "+00:00" if PYDANTIC_V1 else "Z" assert await transform({"foo": dt}, DatetimeDict, use_async) == {"foo": "2023-02-23T14:16:36.337692+00:00"} # type: ignore[comparison-overlap] assert await transform(DatetimeModel(foo=dt), Any, use_async) == {"foo": "2023-02-23T14:16:36.337692" + tz} # type: ignore[comparison-overlap] @@ -297,11 +297,11 @@ async def test_pydantic_unknown_field(use_async: bool) -> None: @pytest.mark.asyncio async def test_pydantic_mismatched_types(use_async: bool) -> None: model = MyModel.construct(foo=True) - if PYDANTIC_V2: + if PYDANTIC_V1: + params = await transform(model, Any, use_async) + else: with pytest.warns(UserWarning): params = await transform(model, Any, use_async) - else: - params = await transform(model, Any, use_async) assert cast(Any, params) == {"foo": True} @@ -309,11 +309,11 @@ async def test_pydantic_mismatched_types(use_async: bool) -> None: @pytest.mark.asyncio async def test_pydantic_mismatched_object_type(use_async: bool) -> None: model = MyModel.construct(foo=MyModel.construct(hello="world")) - if PYDANTIC_V2: + if PYDANTIC_V1: + params = await transform(model, Any, use_async) + else: with pytest.warns(UserWarning): params = await transform(model, Any, use_async) - else: - params = await transform(model, Any, use_async) assert cast(Any, params) == {"foo": {"hello": "world"}} diff --git a/tests/test_utils/test_datetime_parse.py b/tests/test_utils/test_datetime_parse.py new file mode 100644 index 0000000000..44c33a4ccb --- /dev/null +++ b/tests/test_utils/test_datetime_parse.py @@ -0,0 +1,110 @@ +""" +Copied from https://github.com/pydantic/pydantic/blob/v1.10.22/tests/test_datetime_parse.py +with modifications so it works without pydantic v1 imports. +""" + +from typing import Type, Union +from datetime import date, datetime, timezone, timedelta + +import pytest + +from openai._utils import parse_date, parse_datetime + + +def create_tz(minutes: int) -> timezone: + return timezone(timedelta(minutes=minutes)) + + +@pytest.mark.parametrize( + "value,result", + [ + # Valid inputs + ("1494012444.883309", date(2017, 5, 5)), + (b"1494012444.883309", date(2017, 5, 5)), + (1_494_012_444.883_309, date(2017, 5, 5)), + ("1494012444", date(2017, 5, 5)), + (1_494_012_444, date(2017, 5, 5)), + (0, date(1970, 1, 1)), + ("2012-04-23", date(2012, 4, 23)), + (b"2012-04-23", date(2012, 4, 23)), + ("2012-4-9", date(2012, 4, 9)), + (date(2012, 4, 9), date(2012, 4, 9)), + (datetime(2012, 4, 9, 12, 15), date(2012, 4, 9)), + # Invalid inputs + ("x20120423", ValueError), + ("2012-04-56", ValueError), + (19_999_999_999, date(2603, 10, 11)), # just before watershed + (20_000_000_001, date(1970, 8, 20)), # just after watershed + (1_549_316_052, date(2019, 2, 4)), # nowish in s + (1_549_316_052_104, date(2019, 2, 4)), # nowish in ms + (1_549_316_052_104_324, date(2019, 2, 4)), # nowish in μs + (1_549_316_052_104_324_096, date(2019, 2, 4)), # nowish in ns + ("infinity", date(9999, 12, 31)), + ("inf", date(9999, 12, 31)), + (float("inf"), date(9999, 12, 31)), + ("infinity ", date(9999, 12, 31)), + (int("1" + "0" * 100), date(9999, 12, 31)), + (1e1000, date(9999, 12, 31)), + ("-infinity", date(1, 1, 1)), + ("-inf", date(1, 1, 1)), + ("nan", ValueError), + ], +) +def test_date_parsing(value: Union[str, bytes, int, float], result: Union[date, Type[Exception]]) -> None: + if type(result) == type and issubclass(result, Exception): # pyright: ignore[reportUnnecessaryIsInstance] + with pytest.raises(result): + parse_date(value) + else: + assert parse_date(value) == result + + +@pytest.mark.parametrize( + "value,result", + [ + # Valid inputs + # values in seconds + ("1494012444.883309", datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)), + (1_494_012_444.883_309, datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)), + ("1494012444", datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)), + (b"1494012444", datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)), + (1_494_012_444, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)), + # values in ms + ("1494012444000.883309", datetime(2017, 5, 5, 19, 27, 24, 883, tzinfo=timezone.utc)), + ("-1494012444000.883309", datetime(1922, 8, 29, 4, 32, 35, 999117, tzinfo=timezone.utc)), + (1_494_012_444_000, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)), + ("2012-04-23T09:15:00", datetime(2012, 4, 23, 9, 15)), + ("2012-4-9 4:8:16", datetime(2012, 4, 9, 4, 8, 16)), + ("2012-04-23T09:15:00Z", datetime(2012, 4, 23, 9, 15, 0, 0, timezone.utc)), + ("2012-4-9 4:8:16-0320", datetime(2012, 4, 9, 4, 8, 16, 0, create_tz(-200))), + ("2012-04-23T10:20:30.400+02:30", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(150))), + ("2012-04-23T10:20:30.400+02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(120))), + ("2012-04-23T10:20:30.400-02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))), + (b"2012-04-23T10:20:30.400-02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))), + (datetime(2017, 5, 5), datetime(2017, 5, 5)), + (0, datetime(1970, 1, 1, 0, 0, 0, tzinfo=timezone.utc)), + # Invalid inputs + ("x20120423091500", ValueError), + ("2012-04-56T09:15:90", ValueError), + ("2012-04-23T11:05:00-25:00", ValueError), + (19_999_999_999, datetime(2603, 10, 11, 11, 33, 19, tzinfo=timezone.utc)), # just before watershed + (20_000_000_001, datetime(1970, 8, 20, 11, 33, 20, 1000, tzinfo=timezone.utc)), # just after watershed + (1_549_316_052, datetime(2019, 2, 4, 21, 34, 12, 0, tzinfo=timezone.utc)), # nowish in s + (1_549_316_052_104, datetime(2019, 2, 4, 21, 34, 12, 104_000, tzinfo=timezone.utc)), # nowish in ms + (1_549_316_052_104_324, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in μs + (1_549_316_052_104_324_096, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in ns + ("infinity", datetime(9999, 12, 31, 23, 59, 59, 999999)), + ("inf", datetime(9999, 12, 31, 23, 59, 59, 999999)), + ("inf ", datetime(9999, 12, 31, 23, 59, 59, 999999)), + (1e50, datetime(9999, 12, 31, 23, 59, 59, 999999)), + (float("inf"), datetime(9999, 12, 31, 23, 59, 59, 999999)), + ("-infinity", datetime(1, 1, 1, 0, 0)), + ("-inf", datetime(1, 1, 1, 0, 0)), + ("nan", ValueError), + ], +) +def test_datetime_parsing(value: Union[str, bytes, int, float], result: Union[datetime, Type[Exception]]) -> None: + if type(result) == type and issubclass(result, Exception): # pyright: ignore[reportUnnecessaryIsInstance] + with pytest.raises(result): + parse_datetime(value) + else: + assert parse_datetime(value) == result diff --git a/tests/utils.py b/tests/utils.py index a07052140b..e03ed1a039 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -22,7 +22,7 @@ is_annotated_type, is_type_alias_type, ) -from openai._compat import PYDANTIC_V2, field_outer_type, get_model_fields +from openai._compat import PYDANTIC_V1, field_outer_type, get_model_fields from openai._models import BaseModel BaseModelT = TypeVar("BaseModelT", bound=BaseModel) @@ -35,12 +35,12 @@ def evaluate_forwardref(forwardref: ForwardRef, globalns: dict[str, Any]) -> typ def assert_matches_model(model: type[BaseModelT], value: BaseModelT, *, path: list[str]) -> bool: for name, field in get_model_fields(model).items(): field_value = getattr(value, name) - if PYDANTIC_V2: - allow_none = False - else: + if PYDANTIC_V1: # in v1 nullability was structured differently # https://docs.pydantic.dev/2.0/migration/#required-optional-and-nullable-fields allow_none = getattr(field, "allow_none", False) + else: + allow_none = False assert_matches_type( field_outer_type(field), From cd2a98c8d1130a23eb13b74c1aa399275870979d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 4 Sep 2025 13:34:31 +0000 Subject: [PATCH 3/3] release: 1.106.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 1e15251d64..3064ce9554 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.105.0" + ".": "1.106.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 3ed3bbe6ed..423ace20c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 1.106.0 (2025-09-04) + +Full Changelog: [v1.105.0...v1.106.0](https://github.com/openai/openai-python/compare/v1.105.0...v1.106.0) + +### Features + +* **client:** support callable api_key ([#2588](https://github.com/openai/openai-python/issues/2588)) ([e1bad01](https://github.com/openai/openai-python/commit/e1bad015b8a2b98bfee955a24bc931347a58efc1)) +* improve future compat with pydantic v3 ([6645d93](https://github.com/openai/openai-python/commit/6645d9317a240982928b92c2f4af0381db6edc09)) + ## 1.105.0 (2025-09-03) Full Changelog: [v1.104.2...v1.105.0](https://github.com/openai/openai-python/compare/v1.104.2...v1.105.0) diff --git a/pyproject.toml b/pyproject.toml index 587ca41e01..94d85b8442 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.105.0" +version = "1.106.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 5509cd4d8e..6a8d9a3e2d 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.105.0" # x-release-please-version +__version__ = "1.106.0" # x-release-please-version