Skip to content

Commit caf837b

Browse files
feat(api): adds GPT-5 and new API features: platform.openai.com/docs/guides/gpt-5
1 parent 936b2f0 commit caf837b

File tree

76 files changed

+1293
-267
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

76 files changed

+1293
-267
lines changed

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 111
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d6a16b25b969c3e5382e7d413de15bf83d5f7534d5c3ecce64d3a7e847418f9e.yml
3-
openapi_spec_hash: 0c0bcf4aee9ca2a948dd14b890dfe728
4-
config_hash: aeff9289bd7f8c8482e4d738c3c2fde1
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f5c45f4ae5c2075cbc603d6910bba3da31c23714c209fbd3fd82a94f634a126b.yml
3+
openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba
4+
config_hash: 9a64321968e21ed72f5c0e02164ea00d

api.md

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ from openai.types import (
66
ChatModel,
77
ComparisonFilter,
88
CompoundFilter,
9+
CustomToolInputFormat,
910
ErrorObject,
1011
FunctionDefinition,
1112
FunctionParameters,
@@ -15,6 +16,8 @@ from openai.types import (
1516
ResponseFormatJSONObject,
1617
ResponseFormatJSONSchema,
1718
ResponseFormatText,
19+
ResponseFormatTextGrammar,
20+
ResponseFormatTextPython,
1821
ResponsesModel,
1922
)
2023
```
@@ -46,6 +49,7 @@ Types:
4649
```python
4750
from openai.types.chat import (
4851
ChatCompletion,
52+
ChatCompletionAllowedToolChoice,
4953
ChatCompletionAssistantMessageParam,
5054
ChatCompletionAudio,
5155
ChatCompletionAudioParam,
@@ -55,15 +59,20 @@ from openai.types.chat import (
5559
ChatCompletionContentPartInputAudio,
5660
ChatCompletionContentPartRefusal,
5761
ChatCompletionContentPartText,
62+
ChatCompletionCustomTool,
5863
ChatCompletionDeleted,
5964
ChatCompletionDeveloperMessageParam,
6065
ChatCompletionFunctionCallOption,
6166
ChatCompletionFunctionMessageParam,
67+
ChatCompletionFunctionTool,
6268
ChatCompletionMessage,
69+
ChatCompletionMessageCustomToolCall,
70+
ChatCompletionMessageFunctionToolCall,
6371
ChatCompletionMessageParam,
6472
ChatCompletionMessageToolCall,
6573
ChatCompletionModality,
6674
ChatCompletionNamedToolChoice,
75+
ChatCompletionNamedToolChoiceCustom,
6776
ChatCompletionPredictionContent,
6877
ChatCompletionRole,
6978
ChatCompletionStoreMessage,
@@ -74,6 +83,7 @@ from openai.types.chat import (
7483
ChatCompletionToolChoiceOption,
7584
ChatCompletionToolMessageParam,
7685
ChatCompletionUserMessageParam,
86+
ChatCompletionAllowedTools,
7787
ChatCompletionReasoningEffort,
7888
)
7989
```
@@ -719,6 +729,7 @@ Types:
719729
```python
720730
from openai.types.responses import (
721731
ComputerTool,
732+
CustomTool,
722733
EasyInputMessage,
723734
FileSearchTool,
724735
FunctionTool,
@@ -741,6 +752,10 @@ from openai.types.responses import (
741752
ResponseContentPartAddedEvent,
742753
ResponseContentPartDoneEvent,
743754
ResponseCreatedEvent,
755+
ResponseCustomToolCall,
756+
ResponseCustomToolCallInputDeltaEvent,
757+
ResponseCustomToolCallInputDoneEvent,
758+
ResponseCustomToolCallOutput,
744759
ResponseError,
745760
ResponseErrorEvent,
746761
ResponseFailedEvent,
@@ -810,6 +825,8 @@ from openai.types.responses import (
810825
ResponseWebSearchCallInProgressEvent,
811826
ResponseWebSearchCallSearchingEvent,
812827
Tool,
828+
ToolChoiceAllowed,
829+
ToolChoiceCustom,
813830
ToolChoiceFunction,
814831
ToolChoiceMcp,
815832
ToolChoiceOptions,

src/openai/lib/_parsing/_completions.py

Lines changed: 49 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from __future__ import annotations
22

33
import json
4+
import logging
45
from typing import TYPE_CHECKING, Any, Iterable, cast
56
from typing_extensions import TypeVar, TypeGuard, assert_never
67

@@ -19,14 +20,15 @@
1920
ParsedChatCompletion,
2021
ChatCompletionMessage,
2122
ParsedFunctionToolCall,
22-
ChatCompletionToolParam,
2323
ParsedChatCompletionMessage,
24+
ChatCompletionFunctionToolParam,
2425
completion_create_params,
2526
)
2627
from ..._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError
2728
from ...types.shared_params import FunctionDefinition
2829
from ...types.chat.completion_create_params import ResponseFormat as ResponseFormatParam
29-
from ...types.chat.chat_completion_message_tool_call import Function
30+
from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam
31+
from ...types.chat.chat_completion_message_function_tool_call import Function
3032

3133
ResponseFormatT = TypeVar(
3234
"ResponseFormatT",
@@ -35,12 +37,36 @@
3537
)
3638
_default_response_format: None = None
3739

40+
log: logging.Logger = logging.getLogger("openai.lib.parsing")
41+
42+
43+
def is_strict_chat_completion_tool_param(
44+
tool: ChatCompletionToolParam,
45+
) -> TypeGuard[ChatCompletionFunctionToolParam]:
46+
"""Check if the given tool is a strict ChatCompletionFunctionToolParam."""
47+
if not tool["type"] == "function":
48+
return False
49+
if tool["function"].get("strict") is not True:
50+
return False
51+
52+
return True
53+
54+
55+
def select_strict_chat_completion_tools(
56+
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
57+
) -> Iterable[ChatCompletionFunctionToolParam] | NotGiven:
58+
"""Select only the strict ChatCompletionFunctionToolParams from the given tools."""
59+
if not is_given(tools):
60+
return NOT_GIVEN
61+
62+
return [t for t in tools if is_strict_chat_completion_tool_param(t)]
63+
3864

3965
def validate_input_tools(
4066
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
41-
) -> None:
67+
) -> Iterable[ChatCompletionFunctionToolParam] | NotGiven:
4268
if not is_given(tools):
43-
return
69+
return NOT_GIVEN
4470

4571
for tool in tools:
4672
if tool["type"] != "function":
@@ -54,6 +80,8 @@ def validate_input_tools(
5480
f"`{tool['function']['name']}` is not strict. Only `strict` function tools can be auto-parsed"
5581
)
5682

83+
return cast(Iterable[ChatCompletionFunctionToolParam], tools)
84+
5785

5886
def parse_chat_completion(
5987
*,
@@ -95,6 +123,14 @@ def parse_chat_completion(
95123
type_=ParsedFunctionToolCall,
96124
)
97125
)
126+
elif tool_call.type == "custom":
127+
# warn user that custom tool calls are not callable here
128+
log.warning(
129+
"Custom tool calls are not callable. Ignoring tool call: %s - %s",
130+
tool_call.id,
131+
tool_call.custom.name,
132+
stacklevel=2,
133+
)
98134
elif TYPE_CHECKING: # type: ignore[unreachable]
99135
assert_never(tool_call)
100136
else:
@@ -129,13 +165,15 @@ def parse_chat_completion(
129165
)
130166

131167

132-
def get_input_tool_by_name(*, input_tools: list[ChatCompletionToolParam], name: str) -> ChatCompletionToolParam | None:
133-
return next((t for t in input_tools if t.get("function", {}).get("name") == name), None)
168+
def get_input_tool_by_name(
169+
*, input_tools: list[ChatCompletionToolParam], name: str
170+
) -> ChatCompletionFunctionToolParam | None:
171+
return next((t for t in input_tools if t["type"] == "function" and t.get("function", {}).get("name") == name), None)
134172

135173

136174
def parse_function_tool_arguments(
137175
*, input_tools: list[ChatCompletionToolParam], function: Function | ParsedFunction
138-
) -> object:
176+
) -> object | None:
139177
input_tool = get_input_tool_by_name(input_tools=input_tools, name=function.name)
140178
if not input_tool:
141179
return None
@@ -149,7 +187,7 @@ def parse_function_tool_arguments(
149187
if not input_fn.get("strict"):
150188
return None
151189

152-
return json.loads(function.arguments)
190+
return json.loads(function.arguments) # type: ignore[no-any-return]
153191

154192

155193
def maybe_parse_content(
@@ -209,6 +247,9 @@ def is_response_format_param(response_format: object) -> TypeGuard[ResponseForma
209247

210248

211249
def is_parseable_tool(input_tool: ChatCompletionToolParam) -> bool:
250+
if input_tool["type"] != "function":
251+
return False
252+
212253
input_fn = cast(object, input_tool.get("function"))
213254
if isinstance(input_fn, PydanticFunctionTool):
214255
return True

src/openai/lib/_parsing/_responses.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,7 @@ def parse_response(
110110
or output.type == "local_shell_call"
111111
or output.type == "mcp_list_tools"
112112
or output.type == "exec"
113+
or output.type == "custom_tool_call"
113114
):
114115
output_list.append(output)
115116
elif TYPE_CHECKING: # type: ignore

src/openai/lib/_tools.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
import pydantic
66

77
from ._pydantic import to_strict_json_schema
8-
from ..types.chat import ChatCompletionToolParam
8+
from ..types.chat import ChatCompletionFunctionToolParam
99
from ..types.shared_params import FunctionDefinition
1010
from ..types.responses.function_tool_param import FunctionToolParam as ResponsesFunctionToolParam
1111

@@ -42,7 +42,7 @@ def pydantic_function_tool(
4242
*,
4343
name: str | None = None, # inferred from class name by default
4444
description: str | None = None, # inferred from class docstring by default
45-
) -> ChatCompletionToolParam:
45+
) -> ChatCompletionFunctionToolParam:
4646
if description is None:
4747
# note: we intentionally don't use `.getdoc()` to avoid
4848
# including pydantic's docstrings

src/openai/lib/streaming/chat/_completions.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,11 +37,12 @@
3737
parse_function_tool_arguments,
3838
)
3939
from ...._streaming import Stream, AsyncStream
40-
from ....types.chat import ChatCompletionChunk, ParsedChatCompletion, ChatCompletionToolParam
40+
from ....types.chat import ChatCompletionChunk, ParsedChatCompletion
4141
from ...._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError
4242
from ....types.chat.chat_completion import ChoiceLogprobs
4343
from ....types.chat.chat_completion_chunk import Choice as ChoiceChunk
4444
from ....types.chat.completion_create_params import ResponseFormat as ResponseFormatParam
45+
from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam
4546

4647

4748
class ChatCompletionStream(Generic[ResponseFormatT]):

src/openai/resources/beta/assistants.py

Lines changed: 28 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -96,12 +96,11 @@ def create(
9696
9797
name: The name of the assistant. The maximum length is 256 characters.
9898
99-
reasoning_effort: **o-series models only**
100-
101-
Constrains effort on reasoning for
99+
reasoning_effort: Constrains effort on reasoning for
102100
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
103-
supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
104-
result in faster responses and fewer tokens used on reasoning in a response.
101+
supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
102+
effort can result in faster responses and fewer tokens used on reasoning in a
103+
response.
105104
106105
response_format: Specifies the format that the model must output. Compatible with
107106
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -220,6 +219,12 @@ def update(
220219
model: Union[
221220
str,
222221
Literal[
222+
"gpt-5",
223+
"gpt-5-mini",
224+
"gpt-5-nano",
225+
"gpt-5-2025-08-07",
226+
"gpt-5-mini-2025-08-07",
227+
"gpt-5-nano-2025-08-07",
223228
"gpt-4.1",
224229
"gpt-4.1-mini",
225230
"gpt-4.1-nano",
@@ -298,12 +303,11 @@ def update(
298303
299304
name: The name of the assistant. The maximum length is 256 characters.
300305
301-
reasoning_effort: **o-series models only**
302-
303-
Constrains effort on reasoning for
306+
reasoning_effort: Constrains effort on reasoning for
304307
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
305-
supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
306-
result in faster responses and fewer tokens used on reasoning in a response.
308+
supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
309+
effort can result in faster responses and fewer tokens used on reasoning in a
310+
response.
307311
308312
response_format: Specifies the format that the model must output. Compatible with
309313
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -545,12 +549,11 @@ async def create(
545549
546550
name: The name of the assistant. The maximum length is 256 characters.
547551
548-
reasoning_effort: **o-series models only**
549-
550-
Constrains effort on reasoning for
552+
reasoning_effort: Constrains effort on reasoning for
551553
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
552-
supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
553-
result in faster responses and fewer tokens used on reasoning in a response.
554+
supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
555+
effort can result in faster responses and fewer tokens used on reasoning in a
556+
response.
554557
555558
response_format: Specifies the format that the model must output. Compatible with
556559
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -669,6 +672,12 @@ async def update(
669672
model: Union[
670673
str,
671674
Literal[
675+
"gpt-5",
676+
"gpt-5-mini",
677+
"gpt-5-nano",
678+
"gpt-5-2025-08-07",
679+
"gpt-5-mini-2025-08-07",
680+
"gpt-5-nano-2025-08-07",
672681
"gpt-4.1",
673682
"gpt-4.1-mini",
674683
"gpt-4.1-nano",
@@ -747,12 +756,11 @@ async def update(
747756
748757
name: The name of the assistant. The maximum length is 256 characters.
749758
750-
reasoning_effort: **o-series models only**
751-
752-
Constrains effort on reasoning for
759+
reasoning_effort: Constrains effort on reasoning for
753760
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
754-
supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
755-
result in faster responses and fewer tokens used on reasoning in a response.
761+
supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
762+
effort can result in faster responses and fewer tokens used on reasoning in a
763+
response.
756764
757765
response_format: Specifies the format that the model must output. Compatible with
758766
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),

0 commit comments

Comments
 (0)