Skip to content

release: 1.99.8 #2552

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Aug 11, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "1.99.7"
".": "1.99.8"
}
21 changes: 21 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,26 @@
# Changelog

## 1.99.8 (2025-08-11)

Full Changelog: [v1.99.7...v1.99.8](https://github.com/openai/openai-python/compare/v1.99.7...v1.99.8)

### Bug Fixes

* **internal/tests:** correct snapshot update comment ([2784a7a](https://github.com/openai/openai-python/commit/2784a7a7da24ddba74b5717f07d67546864472b9))
* **types:** revert ChatCompletionMessageToolCallUnion breaking change ([ba54e03](https://github.com/openai/openai-python/commit/ba54e03bc2d21825d891685bf3bad4a9253cbeb0))


### Chores

* **internal/tests:** add inline snapshot format command ([8107db8](https://github.com/openai/openai-python/commit/8107db8ff738baa65fe4cf2f2d7f1acd29219c78))
* **internal:** fix formatting ([f03a03d](https://github.com/openai/openai-python/commit/f03a03de8c84740209d021598ff8bf56b6d3c684))
* **tests:** add responses output_text test ([971347b](https://github.com/openai/openai-python/commit/971347b3a05f79c51abd11c86b382ca73c28cefb))


### Refactors

* **tests:** share snapshot utils ([791c567](https://github.com/openai/openai-python/commit/791c567cd87fb8d587965773b1da0404c7848c68))

## 1.99.7 (2025-08-11)

Full Changelog: [v1.99.6...v1.99.7](https://github.com/openai/openai-python/compare/v1.99.6...v1.99.7)
Expand Down
5 changes: 4 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "openai"
version = "1.99.7"
version = "1.99.8"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
Expand Down Expand Up @@ -150,6 +150,9 @@ filterwarnings = [
"error"
]

[tool.inline-snapshot]
format-command="ruff format --stdin-filename {filename}"

[tool.pyright]
# this enables practically every flag given by pyright.
# there are a couple of flags that are still disabled by
Expand Down
2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "openai"
__version__ = "1.99.7" # x-release-please-version
__version__ = "1.99.8" # x-release-please-version
5 changes: 4 additions & 1 deletion src/openai/types/chat/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,10 @@
from .chat_completion_tool_union_param import ChatCompletionToolUnionParam as ChatCompletionToolUnionParam
from .chat_completion_content_part_text import ChatCompletionContentPartText as ChatCompletionContentPartText
from .chat_completion_custom_tool_param import ChatCompletionCustomToolParam as ChatCompletionCustomToolParam
from .chat_completion_message_tool_call import ChatCompletionMessageToolCallUnion as ChatCompletionMessageToolCallUnion
from .chat_completion_message_tool_call import (
ChatCompletionMessageToolCall as ChatCompletionMessageToolCall,
ChatCompletionMessageToolCallUnion as ChatCompletionMessageToolCallUnion,
)
from .chat_completion_content_part_image import ChatCompletionContentPartImage as ChatCompletionContentPartImage
from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam
from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,11 @@
from .chat_completion_message_custom_tool_call import ChatCompletionMessageCustomToolCall
from .chat_completion_message_function_tool_call import Function as Function, ChatCompletionMessageFunctionToolCall

__all__ = [ "Function", "ChatCompletionMessageToolCallUnion"]
__all__ = ["Function", "ChatCompletionMessageToolCallUnion"]

ChatCompletionMessageToolCallUnion: TypeAlias = Annotated[
Union[ChatCompletionMessageFunctionToolCall, ChatCompletionMessageCustomToolCall],
PropertyInfo(discriminator="type"),
]

ChatCompletionMessageToolCall: TypeAlias = ChatCompletionMessageToolCallUnion
136 changes: 32 additions & 104 deletions tests/lib/chat/test_completions.py

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions tests/lib/chat/test_completions_streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
)
from openai.lib._parsing._completions import ResponseFormatT

from ._utils import print_obj, get_snapshot_value
from ..utils import print_obj, get_snapshot_value
from ...conftest import base_url

_T = TypeVar("_T")
Expand All @@ -39,7 +39,7 @@
#
# you can update them with
#
# `OPENAI_LIVE=1 pytest --inline-snapshot=fix`
# `OPENAI_LIVE=1 pytest --inline-snapshot=fix -p no:xdist -o addopts=""`


@pytest.mark.respx(base_url=base_url)
Expand Down
Empty file.
40 changes: 40 additions & 0 deletions tests/lib/responses/test_responses.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
from __future__ import annotations

from typing_extensions import TypeVar

import pytest
from respx import MockRouter
from inline_snapshot import snapshot

from openai import OpenAI

from ...conftest import base_url
from ..snapshots import make_snapshot_request

_T = TypeVar("_T")

# all the snapshots in this file are auto-generated from the live API
#
# you can update them with
#
# `OPENAI_LIVE=1 pytest --inline-snapshot=fix -p no:xdist -o addopts=""`


@pytest.mark.respx(base_url=base_url)
def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None:
response = make_snapshot_request(
lambda c: c.responses.create(
model="gpt-4o-mini",
input="What's the weather like in SF?",
),
content_snapshot=snapshot(
'{"id": "resp_689a0b2545288193953c892439b42e2800b2e36c65a1fd4b", "object": "response", "created_at": 1754925861, "status": "completed", "background": false, "error": null, "incomplete_details": null, "instructions": null, "max_output_tokens": null, "max_tool_calls": null, "model": "gpt-4o-mini-2024-07-18", "output": [{"id": "msg_689a0b2637b08193ac478e568f49e3f900b2e36c65a1fd4b", "type": "message", "status": "completed", "content": [{"type": "output_text", "annotations": [], "logprobs": [], "text": "I can\'t provide real-time updates, but you can easily check the current weather in San Francisco using a weather website or app. Typically, San Francisco has cool, foggy summers and mild winters, so it\'s good to be prepared for variable weather!"}], "role": "assistant"}], "parallel_tool_calls": true, "previous_response_id": null, "prompt_cache_key": null, "reasoning": {"effort": null, "summary": null}, "safety_identifier": null, "service_tier": "default", "store": true, "temperature": 1.0, "text": {"format": {"type": "text"}, "verbosity": "medium"}, "tool_choice": "auto", "tools": [], "top_logprobs": 0, "top_p": 1.0, "truncation": "disabled", "usage": {"input_tokens": 14, "input_tokens_details": {"cached_tokens": 0}, "output_tokens": 50, "output_tokens_details": {"reasoning_tokens": 0}, "total_tokens": 64}, "user": null, "metadata": {}}'
),
path="/responses",
mock_client=client,
respx_mock=respx_mock,
)

assert response.output_text == snapshot(
"I can't provide real-time updates, but you can easily check the current weather in San Francisco using a weather website or app. Typically, San Francisco has cool, foggy summers and mild winters, so it's good to be prepared for variable weather!"
)
101 changes: 101 additions & 0 deletions tests/lib/snapshots.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
from __future__ import annotations

import os
import json
from typing import Any, Callable, Awaitable
from typing_extensions import TypeVar

import httpx
from respx import MockRouter

from openai import OpenAI, AsyncOpenAI

from .utils import get_snapshot_value

_T = TypeVar("_T")


def make_snapshot_request(
func: Callable[[OpenAI], _T],
*,
content_snapshot: Any,
respx_mock: MockRouter,
mock_client: OpenAI,
path: str,
) -> _T:
live = os.environ.get("OPENAI_LIVE") == "1"
if live:

def _on_response(response: httpx.Response) -> None:
# update the content snapshot
assert json.dumps(json.loads(response.read())) == content_snapshot

respx_mock.stop()

client = OpenAI(
http_client=httpx.Client(
event_hooks={
"response": [_on_response],
}
)
)
else:
respx_mock.post(path).mock(
return_value=httpx.Response(
200,
content=get_snapshot_value(content_snapshot),
headers={"content-type": "application/json"},
)
)

client = mock_client

result = func(client)

if live:
client.close()

return result


async def make_async_snapshot_request(
func: Callable[[AsyncOpenAI], Awaitable[_T]],
*,
content_snapshot: Any,
respx_mock: MockRouter,
mock_client: AsyncOpenAI,
path: str,
) -> _T:
live = os.environ.get("OPENAI_LIVE") == "1"
if live:

async def _on_response(response: httpx.Response) -> None:
# update the content snapshot
assert json.dumps(json.loads(await response.aread())) == content_snapshot

respx_mock.stop()

client = AsyncOpenAI(
http_client=httpx.AsyncClient(
event_hooks={
"response": [_on_response],
}
)
)
else:
respx_mock.post(path).mock(
return_value=httpx.Response(
200,
content=get_snapshot_value(content_snapshot),
headers={"content-type": "application/json"},
)
)

client = mock_client

result = await func(client)

if live:
await client.close()

return result
2 changes: 1 addition & 1 deletion tests/lib/chat/_utils.py → tests/lib/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import pytest
import pydantic

from ...utils import rich_print_str
from ..utils import rich_print_str

ReprArgs: TypeAlias = "Iterable[tuple[str | None, Any]]"

Expand Down