diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3b81c9b87e..102fa47016 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.107.3" + ".": "1.108.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 905a02c44a..2dd0aef46a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 118 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d30ff992a48873c1466c49f3c01f2ec8933faebff23424748f8d056065b1bcef.yml -openapi_spec_hash: e933ec43b46f45c348adb78840e5808d -config_hash: bf45940f0a7805b4ec2017eecdd36893 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-380330a93b5d010391ca3b36ea193c5353b0dfdf2ddd02789ef84a84ce427e82.yml +openapi_spec_hash: 859703234259ecdd2a3c6f4de88eb504 +config_hash: b619b45c1e7facf819f902dee8fa4f97 diff --git a/CHANGELOG.md b/CHANGELOG.md index b9314bd48a..1e35189611 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 1.108.0 (2025-09-17) + +Full Changelog: [v1.107.3...v1.108.0](https://github.com/openai/openai-python/compare/v1.107.3...v1.108.0) + +### Features + +* **api:** type updates for conversations, reasoning_effort and results for evals ([c2ee28c](https://github.com/openai/openai-python/commit/c2ee28c1b77eed98766fbb01cf1ad2ee240f412e)) + + +### Chores + +* **internal:** update pydantic dependency ([369d10a](https://github.com/openai/openai-python/commit/369d10a40dfe744f6bfc10c99eb1f58176500120)) + ## 1.107.3 (2025-09-15) Full Changelog: [v1.107.2...v1.107.3](https://github.com/openai/openai-python/compare/v1.107.2...v1.107.3) diff --git a/api.md b/api.md index 73b8427387..6bbb47f78c 100644 --- a/api.md +++ b/api.md @@ -991,22 +991,17 @@ Types: ```python from openai.types.conversations import ( ComputerScreenshotContent, - ContainerFileCitationBody, Conversation, ConversationDeleted, ConversationDeletedResource, - FileCitationBody, - InputFileContent, - InputImageContent, - InputTextContent, - LobProb, Message, - OutputTextContent, - RefusalContent, SummaryTextContent, TextContent, - TopLogProb, - URLCitationBody, + InputTextContent, + OutputTextContent, + RefusalContent, + InputImageContent, + InputFileContent, ) ``` diff --git a/pyproject.toml b/pyproject.toml index 190542e6dc..058b7cda6c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.107.3" +version = "1.108.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/requirements-dev.lock b/requirements-dev.lock index eaf136f7e6..0bd1c2c70f 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -108,6 +108,7 @@ multidict==6.5.0 mypy==1.14.1 mypy-extensions==1.0.0 # via mypy +nest-asyncio==1.6.0 nodeenv==1.8.0 # via pyright nox==2023.4.22 @@ -133,11 +134,11 @@ portalocker==2.10.1 propcache==0.3.2 # via aiohttp # via yarl -pycparser==2.22 +pycparser==2.23 # via cffi -pydantic==2.10.3 +pydantic==2.11.9 # via openai -pydantic-core==2.27.1 +pydantic-core==2.33.2 # via pydantic pygments==2.18.0 # via pytest @@ -199,6 +200,9 @@ typing-extensions==4.12.2 # via pydantic # via pydantic-core # via pyright + # via typing-inspection +typing-inspection==0.4.1 + # via pydantic tzdata==2024.1 # via pandas urllib3==2.2.1 diff --git a/requirements.lock b/requirements.lock index 3b6ece87e2..a2b6845942 100644 --- a/requirements.lock +++ b/requirements.lock @@ -67,11 +67,11 @@ pandas-stubs==2.2.2.240807 propcache==0.3.2 # via aiohttp # via yarl -pycparser==2.22 +pycparser==2.23 # via cffi -pydantic==2.10.3 +pydantic==2.11.9 # via openai -pydantic-core==2.27.1 +pydantic-core==2.33.2 # via pydantic python-dateutil==2.9.0.post0 # via pandas @@ -93,7 +93,10 @@ typing-extensions==4.12.2 # via openai # via pydantic # via pydantic-core -tzdata==2024.1 + # via typing-inspection +typing-inspection==0.4.1 + # via pydantic +tzdata==2025.2 # via pandas websockets==15.0.1 # via openai diff --git a/src/openai/_models.py b/src/openai/_models.py index 8ee8612d1e..af71a91850 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -281,7 +281,7 @@ def model_dump( mode: Literal["json", "python"] | str = "python", include: IncEx | None = None, exclude: IncEx | None = None, - by_alias: bool = False, + by_alias: bool | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, @@ -289,6 +289,7 @@ def model_dump( warnings: bool | Literal["none", "warn", "error"] = True, context: dict[str, Any] | None = None, serialize_as_any: bool = False, + fallback: Callable[[Any], Any] | None = None, ) -> dict[str, Any]: """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump @@ -320,10 +321,12 @@ def model_dump( raise ValueError("context is only supported in Pydantic v2") if serialize_as_any != False: raise ValueError("serialize_as_any is only supported in Pydantic v2") + if fallback is not None: + raise ValueError("fallback is only supported in Pydantic v2") dumped = super().dict( # pyright: ignore[reportDeprecated] include=include, exclude=exclude, - by_alias=by_alias, + by_alias=by_alias if by_alias is not None else False, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, @@ -338,13 +341,14 @@ def model_dump_json( indent: int | None = None, include: IncEx | None = None, exclude: IncEx | None = None, - by_alias: bool = False, + by_alias: bool | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool | Literal["none", "warn", "error"] = True, context: dict[str, Any] | None = None, + fallback: Callable[[Any], Any] | None = None, serialize_as_any: bool = False, ) -> str: """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json @@ -373,11 +377,13 @@ def model_dump_json( raise ValueError("context is only supported in Pydantic v2") if serialize_as_any != False: raise ValueError("serialize_as_any is only supported in Pydantic v2") + if fallback is not None: + raise ValueError("fallback is only supported in Pydantic v2") return super().json( # type: ignore[reportDeprecated] indent=indent, include=include, exclude=exclude, - by_alias=by_alias, + by_alias=by_alias if by_alias is not None else False, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, diff --git a/src/openai/_version.py b/src/openai/_version.py index aa7660d137..7030fe068c 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.107.3" # x-release-please-version +__version__ = "1.108.0" # x-release-please-version diff --git a/src/openai/types/conversations/__init__.py b/src/openai/types/conversations/__init__.py index 538966db4f..9dec848737 100644 --- a/src/openai/types/conversations/__init__.py +++ b/src/openai/types/conversations/__init__.py @@ -3,15 +3,11 @@ from __future__ import annotations from .message import Message as Message -from .lob_prob import LobProb as LobProb from .conversation import Conversation as Conversation from .text_content import TextContent as TextContent -from .top_log_prob import TopLogProb as TopLogProb from .refusal_content import RefusalContent as RefusalContent from .item_list_params import ItemListParams as ItemListParams from .conversation_item import ConversationItem as ConversationItem -from .url_citation_body import URLCitationBody as URLCitationBody -from .file_citation_body import FileCitationBody as FileCitationBody from .input_file_content import InputFileContent as InputFileContent from .input_text_content import InputTextContent as InputTextContent from .item_create_params import ItemCreateParams as ItemCreateParams @@ -19,9 +15,13 @@ from .output_text_content import OutputTextContent as OutputTextContent from .item_retrieve_params import ItemRetrieveParams as ItemRetrieveParams from .summary_text_content import SummaryTextContent as SummaryTextContent +from .refusal_content_param import RefusalContentParam as RefusalContentParam from .conversation_item_list import ConversationItemList as ConversationItemList +from .input_file_content_param import InputFileContentParam as InputFileContentParam +from .input_text_content_param import InputTextContentParam as InputTextContentParam +from .input_image_content_param import InputImageContentParam as InputImageContentParam +from .output_text_content_param import OutputTextContentParam as OutputTextContentParam from .conversation_create_params import ConversationCreateParams as ConversationCreateParams from .conversation_update_params import ConversationUpdateParams as ConversationUpdateParams from .computer_screenshot_content import ComputerScreenshotContent as ComputerScreenshotContent -from .container_file_citation_body import ContainerFileCitationBody as ContainerFileCitationBody from .conversation_deleted_resource import ConversationDeletedResource as ConversationDeletedResource diff --git a/src/openai/types/conversations/container_file_citation_body.py b/src/openai/types/conversations/container_file_citation_body.py deleted file mode 100644 index ea460df2e2..0000000000 --- a/src/openai/types/conversations/container_file_citation_body.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ContainerFileCitationBody"] - - -class ContainerFileCitationBody(BaseModel): - container_id: str - """The ID of the container file.""" - - end_index: int - """The index of the last character of the container file citation in the message.""" - - file_id: str - """The ID of the file.""" - - filename: str - """The filename of the container file cited.""" - - start_index: int - """The index of the first character of the container file citation in the message.""" - - type: Literal["container_file_citation"] - """The type of the container file citation. Always `container_file_citation`.""" diff --git a/src/openai/types/conversations/file_citation_body.py b/src/openai/types/conversations/file_citation_body.py deleted file mode 100644 index ea90ae381d..0000000000 --- a/src/openai/types/conversations/file_citation_body.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["FileCitationBody"] - - -class FileCitationBody(BaseModel): - file_id: str - """The ID of the file.""" - - filename: str - """The filename of the file cited.""" - - index: int - """The index of the file in the list of files.""" - - type: Literal["file_citation"] - """The type of the file citation. Always `file_citation`.""" diff --git a/src/openai/types/conversations/input_file_content.py b/src/openai/types/conversations/input_file_content.py index 6aef7a89d9..ca555d85fc 100644 --- a/src/openai/types/conversations/input_file_content.py +++ b/src/openai/types/conversations/input_file_content.py @@ -1,22 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel +from ..responses.response_input_file import ResponseInputFile __all__ = ["InputFileContent"] - -class InputFileContent(BaseModel): - file_id: Optional[str] = None - """The ID of the file to be sent to the model.""" - - type: Literal["input_file"] - """The type of the input item. Always `input_file`.""" - - file_url: Optional[str] = None - """The URL of the file to be sent to the model.""" - - filename: Optional[str] = None - """The name of the file to be sent to the model.""" +InputFileContent = ResponseInputFile diff --git a/src/openai/types/conversations/input_file_content_param.py b/src/openai/types/conversations/input_file_content_param.py new file mode 100644 index 0000000000..1ed8b8b9d1 --- /dev/null +++ b/src/openai/types/conversations/input_file_content_param.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..responses.response_input_file_param import ResponseInputFileParam + +InputFileContentParam = ResponseInputFileParam diff --git a/src/openai/types/conversations/input_image_content.py b/src/openai/types/conversations/input_image_content.py index f2587e0adc..4304323c3a 100644 --- a/src/openai/types/conversations/input_image_content.py +++ b/src/openai/types/conversations/input_image_content.py @@ -1,28 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel +from ..responses.response_input_image import ResponseInputImage __all__ = ["InputImageContent"] - -class InputImageContent(BaseModel): - detail: Literal["low", "high", "auto"] - """The detail level of the image to be sent to the model. - - One of `high`, `low`, or `auto`. Defaults to `auto`. - """ - - file_id: Optional[str] = None - """The ID of the file to be sent to the model.""" - - image_url: Optional[str] = None - """The URL of the image to be sent to the model. - - A fully qualified URL or base64 encoded image in a data URL. - """ - - type: Literal["input_image"] - """The type of the input item. Always `input_image`.""" +InputImageContent = ResponseInputImage diff --git a/src/openai/types/conversations/input_image_content_param.py b/src/openai/types/conversations/input_image_content_param.py new file mode 100644 index 0000000000..a0ef9f545c --- /dev/null +++ b/src/openai/types/conversations/input_image_content_param.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..responses.response_input_image_param import ResponseInputImageParam + +InputImageContentParam = ResponseInputImageParam diff --git a/src/openai/types/conversations/input_text_content.py b/src/openai/types/conversations/input_text_content.py index 5e2daebdc5..cab8b26cb1 100644 --- a/src/openai/types/conversations/input_text_content.py +++ b/src/openai/types/conversations/input_text_content.py @@ -1,15 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal - -from ..._models import BaseModel +from ..responses.response_input_text import ResponseInputText __all__ = ["InputTextContent"] - -class InputTextContent(BaseModel): - text: str - """The text input to the model.""" - - type: Literal["input_text"] - """The type of the input item. Always `input_text`.""" +InputTextContent = ResponseInputText diff --git a/src/openai/types/conversations/input_text_content_param.py b/src/openai/types/conversations/input_text_content_param.py new file mode 100644 index 0000000000..b1fd9a5f1c --- /dev/null +++ b/src/openai/types/conversations/input_text_content_param.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..responses.response_input_text_param import ResponseInputTextParam + +InputTextContentParam = ResponseInputTextParam diff --git a/src/openai/types/conversations/lob_prob.py b/src/openai/types/conversations/lob_prob.py deleted file mode 100644 index f7dcd62a5e..0000000000 --- a/src/openai/types/conversations/lob_prob.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ..._models import BaseModel -from .top_log_prob import TopLogProb - -__all__ = ["LobProb"] - - -class LobProb(BaseModel): - token: str - - bytes: List[int] - - logprob: float - - top_logprobs: List[TopLogProb] diff --git a/src/openai/types/conversations/message.py b/src/openai/types/conversations/message.py index a070cf2869..95e03c5c00 100644 --- a/src/openai/types/conversations/message.py +++ b/src/openai/types/conversations/message.py @@ -6,26 +6,26 @@ from ..._utils import PropertyInfo from ..._models import BaseModel from .text_content import TextContent -from .refusal_content import RefusalContent -from .input_file_content import InputFileContent -from .input_text_content import InputTextContent -from .input_image_content import InputImageContent -from .output_text_content import OutputTextContent from .summary_text_content import SummaryTextContent from .computer_screenshot_content import ComputerScreenshotContent +from ..responses.response_input_file import ResponseInputFile +from ..responses.response_input_text import ResponseInputText +from ..responses.response_input_image import ResponseInputImage +from ..responses.response_output_text import ResponseOutputText +from ..responses.response_output_refusal import ResponseOutputRefusal __all__ = ["Message", "Content"] Content: TypeAlias = Annotated[ Union[ - InputTextContent, - OutputTextContent, + ResponseInputText, + ResponseOutputText, TextContent, SummaryTextContent, - RefusalContent, - InputImageContent, + ResponseOutputRefusal, + ResponseInputImage, ComputerScreenshotContent, - InputFileContent, + ResponseInputFile, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/conversations/output_text_content.py b/src/openai/types/conversations/output_text_content.py index 2ffee76526..cfe9307d74 100644 --- a/src/openai/types/conversations/output_text_content.py +++ b/src/openai/types/conversations/output_text_content.py @@ -1,30 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional -from typing_extensions import Literal, Annotated, TypeAlias +from ..responses.response_output_text import ResponseOutputText -from ..._utils import PropertyInfo -from .lob_prob import LobProb -from ..._models import BaseModel -from .url_citation_body import URLCitationBody -from .file_citation_body import FileCitationBody -from .container_file_citation_body import ContainerFileCitationBody +__all__ = ["OutputTextContent"] -__all__ = ["OutputTextContent", "Annotation"] - -Annotation: TypeAlias = Annotated[ - Union[FileCitationBody, URLCitationBody, ContainerFileCitationBody], PropertyInfo(discriminator="type") -] - - -class OutputTextContent(BaseModel): - annotations: List[Annotation] - """The annotations of the text output.""" - - text: str - """The text output from the model.""" - - type: Literal["output_text"] - """The type of the output text. Always `output_text`.""" - - logprobs: Optional[List[LobProb]] = None +OutputTextContent = ResponseOutputText diff --git a/src/openai/types/conversations/output_text_content_param.py b/src/openai/types/conversations/output_text_content_param.py new file mode 100644 index 0000000000..dc3e2026f6 --- /dev/null +++ b/src/openai/types/conversations/output_text_content_param.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..responses.response_output_text_param import ResponseOutputTextParam + +OutputTextContentParam = ResponseOutputTextParam diff --git a/src/openai/types/conversations/refusal_content.py b/src/openai/types/conversations/refusal_content.py index 3c8bd5e35f..6206c267dc 100644 --- a/src/openai/types/conversations/refusal_content.py +++ b/src/openai/types/conversations/refusal_content.py @@ -1,15 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal - -from ..._models import BaseModel +from ..responses.response_output_refusal import ResponseOutputRefusal __all__ = ["RefusalContent"] - -class RefusalContent(BaseModel): - refusal: str - """The refusal explanation from the model.""" - - type: Literal["refusal"] - """The type of the refusal. Always `refusal`.""" +RefusalContent = ResponseOutputRefusal diff --git a/src/openai/types/conversations/refusal_content_param.py b/src/openai/types/conversations/refusal_content_param.py new file mode 100644 index 0000000000..9b83da5f2d --- /dev/null +++ b/src/openai/types/conversations/refusal_content_param.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..responses.response_output_refusal_param import ResponseOutputRefusalParam + +RefusalContentParam = ResponseOutputRefusalParam diff --git a/src/openai/types/conversations/top_log_prob.py b/src/openai/types/conversations/top_log_prob.py deleted file mode 100644 index fafca756ae..0000000000 --- a/src/openai/types/conversations/top_log_prob.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ..._models import BaseModel - -__all__ = ["TopLogProb"] - - -class TopLogProb(BaseModel): - token: str - - bytes: List[int] - - logprob: float diff --git a/src/openai/types/conversations/url_citation_body.py b/src/openai/types/conversations/url_citation_body.py deleted file mode 100644 index 1becb44bc0..0000000000 --- a/src/openai/types/conversations/url_citation_body.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["URLCitationBody"] - - -class URLCitationBody(BaseModel): - end_index: int - """The index of the last character of the URL citation in the message.""" - - start_index: int - """The index of the first character of the URL citation in the message.""" - - title: str - """The title of the web resource.""" - - type: Literal["url_citation"] - """The type of the URL citation. Always `url_citation`.""" - - url: str - """The URL of the web resource.""" diff --git a/src/openai/types/evals/create_eval_completions_run_data_source.py b/src/openai/types/evals/create_eval_completions_run_data_source.py index edf70c8ad4..74323a735e 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source.py @@ -6,6 +6,7 @@ from ..._utils import PropertyInfo from ..._models import BaseModel from ..shared.metadata import Metadata +from ..shared.reasoning_effort import ReasoningEffort from ..shared.response_format_text import ResponseFormatText from ..responses.easy_input_message import EasyInputMessage from ..responses.response_input_text import ResponseInputText @@ -167,6 +168,15 @@ class SamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" + reasoning_effort: Optional[ReasoningEffort] = None + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + """ + response_format: Optional[SamplingParamsResponseFormat] = None """An object specifying the format that the model must output. diff --git a/src/openai/types/evals/create_eval_completions_run_data_source_param.py b/src/openai/types/evals/create_eval_completions_run_data_source_param.py index c14360ac80..4e9c1fdeb8 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source_param.py @@ -6,6 +6,7 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..shared_params.metadata import Metadata +from ..shared.reasoning_effort import ReasoningEffort from ..responses.easy_input_message_param import EasyInputMessageParam from ..shared_params.response_format_text import ResponseFormatText from ..responses.response_input_text_param import ResponseInputTextParam @@ -163,6 +164,15 @@ class SamplingParams(TypedDict, total=False): max_completion_tokens: int """The maximum number of tokens in the generated output.""" + reasoning_effort: Optional[ReasoningEffort] + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + """ + response_format: SamplingParamsResponseFormat """An object specifying the format that the model must output. diff --git a/src/openai/types/evals/run_cancel_response.py b/src/openai/types/evals/run_cancel_response.py index 8f43494e68..d04d4ff657 100644 --- a/src/openai/types/evals/run_cancel_response.py +++ b/src/openai/types/evals/run_cancel_response.py @@ -234,6 +234,15 @@ class DataSourceResponsesSamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" + reasoning_effort: Optional[ReasoningEffort] = None + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + """ + seed: Optional[int] = None """A seed value to initialize the randomness, during sampling.""" diff --git a/src/openai/types/evals/run_create_params.py b/src/openai/types/evals/run_create_params.py index 35813c8901..6ff897b5de 100644 --- a/src/openai/types/evals/run_create_params.py +++ b/src/openai/types/evals/run_create_params.py @@ -252,6 +252,15 @@ class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total= max_completion_tokens: int """The maximum number of tokens in the generated output.""" + reasoning_effort: Optional[ReasoningEffort] + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + """ + seed: int """A seed value to initialize the randomness, during sampling.""" diff --git a/src/openai/types/evals/run_create_response.py b/src/openai/types/evals/run_create_response.py index c842a5ad2f..defa275c8c 100644 --- a/src/openai/types/evals/run_create_response.py +++ b/src/openai/types/evals/run_create_response.py @@ -234,6 +234,15 @@ class DataSourceResponsesSamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" + reasoning_effort: Optional[ReasoningEffort] = None + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + """ + seed: Optional[int] = None """A seed value to initialize the randomness, during sampling.""" diff --git a/src/openai/types/evals/run_list_response.py b/src/openai/types/evals/run_list_response.py index 5a5c2efbb3..7fe0e55ace 100644 --- a/src/openai/types/evals/run_list_response.py +++ b/src/openai/types/evals/run_list_response.py @@ -234,6 +234,15 @@ class DataSourceResponsesSamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" + reasoning_effort: Optional[ReasoningEffort] = None + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + """ + seed: Optional[int] = None """A seed value to initialize the randomness, during sampling.""" diff --git a/src/openai/types/evals/run_retrieve_response.py b/src/openai/types/evals/run_retrieve_response.py index f341296875..a50520f17d 100644 --- a/src/openai/types/evals/run_retrieve_response.py +++ b/src/openai/types/evals/run_retrieve_response.py @@ -234,6 +234,15 @@ class DataSourceResponsesSamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" + reasoning_effort: Optional[ReasoningEffort] = None + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + """ + seed: Optional[int] = None """A seed value to initialize the randomness, during sampling.""" diff --git a/src/openai/types/evals/runs/output_item_list_response.py b/src/openai/types/evals/runs/output_item_list_response.py index 72b1049f7b..f774518f3c 100644 --- a/src/openai/types/evals/runs/output_item_list_response.py +++ b/src/openai/types/evals/runs/output_item_list_response.py @@ -1,13 +1,38 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import builtins -from typing import Dict, List, Optional +from typing import TYPE_CHECKING, Dict, List, Optional from typing_extensions import Literal +from pydantic import Field as FieldInfo + from ...._models import BaseModel from ..eval_api_error import EvalAPIError -__all__ = ["OutputItemListResponse", "Sample", "SampleInput", "SampleOutput", "SampleUsage"] +__all__ = ["OutputItemListResponse", "Result", "Sample", "SampleInput", "SampleOutput", "SampleUsage"] + + +class Result(BaseModel): + name: str + """The name of the grader.""" + + passed: bool + """Whether the grader considered the output a pass.""" + + score: float + """The numeric score produced by the grader.""" + + sample: Optional[Dict[str, object]] = None + """Optional sample or intermediate data produced by the grader.""" + + type: Optional[str] = None + """The grader type (for example, "string-check-grader").""" + + __pydantic_extra__: Dict[str, object] = FieldInfo(init=False) # pyright: ignore[reportIncompatibleVariableOverride] + if TYPE_CHECKING: + # Stub to indicate that arbitrary properties are accepted. + # To access properties that are not valid identifiers you can use `getattr`, e.g. + # `getattr(obj, '$type')` + def __getattr__(self, attr: str) -> object: ... class SampleInput(BaseModel): @@ -91,8 +116,8 @@ class OutputItemListResponse(BaseModel): object: Literal["eval.run.output_item"] """The type of the object. Always "eval.run.output_item".""" - results: List[Dict[str, builtins.object]] - """A list of results from the evaluation run.""" + results: List[Result] + """A list of grader results for this output item.""" run_id: str """The identifier of the evaluation run associated with this output item.""" diff --git a/src/openai/types/evals/runs/output_item_retrieve_response.py b/src/openai/types/evals/runs/output_item_retrieve_response.py index 63aab5565f..d66435bd4f 100644 --- a/src/openai/types/evals/runs/output_item_retrieve_response.py +++ b/src/openai/types/evals/runs/output_item_retrieve_response.py @@ -1,13 +1,38 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import builtins -from typing import Dict, List, Optional +from typing import TYPE_CHECKING, Dict, List, Optional from typing_extensions import Literal +from pydantic import Field as FieldInfo + from ...._models import BaseModel from ..eval_api_error import EvalAPIError -__all__ = ["OutputItemRetrieveResponse", "Sample", "SampleInput", "SampleOutput", "SampleUsage"] +__all__ = ["OutputItemRetrieveResponse", "Result", "Sample", "SampleInput", "SampleOutput", "SampleUsage"] + + +class Result(BaseModel): + name: str + """The name of the grader.""" + + passed: bool + """Whether the grader considered the output a pass.""" + + score: float + """The numeric score produced by the grader.""" + + sample: Optional[Dict[str, object]] = None + """Optional sample or intermediate data produced by the grader.""" + + type: Optional[str] = None + """The grader type (for example, "string-check-grader").""" + + __pydantic_extra__: Dict[str, object] = FieldInfo(init=False) # pyright: ignore[reportIncompatibleVariableOverride] + if TYPE_CHECKING: + # Stub to indicate that arbitrary properties are accepted. + # To access properties that are not valid identifiers you can use `getattr`, e.g. + # `getattr(obj, '$type')` + def __getattr__(self, attr: str) -> object: ... class SampleInput(BaseModel): @@ -91,8 +116,8 @@ class OutputItemRetrieveResponse(BaseModel): object: Literal["eval.run.output_item"] """The type of the object. Always "eval.run.output_item".""" - results: List[Dict[str, builtins.object]] - """A list of results from the evaluation run.""" + results: List[Result] + """A list of grader results for this output item.""" run_id: str """The identifier of the evaluation run associated with this output item.""" diff --git a/src/openai/types/graders/score_model_grader.py b/src/openai/types/graders/score_model_grader.py index fc221b8e41..908c6f91d3 100644 --- a/src/openai/types/graders/score_model_grader.py +++ b/src/openai/types/graders/score_model_grader.py @@ -4,10 +4,18 @@ from typing_extensions import Literal, TypeAlias from ..._models import BaseModel +from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText from ..responses.response_input_audio import ResponseInputAudio -__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] +__all__ = [ + "ScoreModelGrader", + "Input", + "InputContent", + "InputContentOutputText", + "InputContentInputImage", + "SamplingParams", +] class InputContentOutputText(BaseModel): @@ -51,6 +59,29 @@ class Input(BaseModel): """The type of the message input. Always `message`.""" +class SamplingParams(BaseModel): + max_completions_tokens: Optional[int] = None + """The maximum number of tokens the grader model may generate in its response.""" + + reasoning_effort: Optional[ReasoningEffort] = None + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + """ + + seed: Optional[int] = None + """A seed value to initialize the randomness, during sampling.""" + + temperature: Optional[float] = None + """A higher temperature increases randomness in the outputs.""" + + top_p: Optional[float] = None + """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" + + class ScoreModelGrader(BaseModel): input: List[Input] """The input text. This may include template strings.""" @@ -67,5 +98,5 @@ class ScoreModelGrader(BaseModel): range: Optional[List[float]] = None """The range of the score. Defaults to `[0, 1]`.""" - sampling_params: Optional[object] = None + sampling_params: Optional[SamplingParams] = None """The sampling parameters for the model.""" diff --git a/src/openai/types/graders/score_model_grader_param.py b/src/openai/types/graders/score_model_grader_param.py index 15100bb74b..743944e099 100644 --- a/src/openai/types/graders/score_model_grader_param.py +++ b/src/openai/types/graders/score_model_grader_param.py @@ -2,13 +2,21 @@ from __future__ import annotations -from typing import Union, Iterable +from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text_param import ResponseInputTextParam from ..responses.response_input_audio_param import ResponseInputAudioParam -__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] +__all__ = [ + "ScoreModelGraderParam", + "Input", + "InputContent", + "InputContentOutputText", + "InputContentInputImage", + "SamplingParams", +] class InputContentOutputText(TypedDict, total=False): @@ -57,6 +65,29 @@ class Input(TypedDict, total=False): """The type of the message input. Always `message`.""" +class SamplingParams(TypedDict, total=False): + max_completions_tokens: Optional[int] + """The maximum number of tokens the grader model may generate in its response.""" + + reasoning_effort: Optional[ReasoningEffort] + """ + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. + """ + + seed: Optional[int] + """A seed value to initialize the randomness, during sampling.""" + + temperature: Optional[float] + """A higher temperature increases randomness in the outputs.""" + + top_p: Optional[float] + """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" + + class ScoreModelGraderParam(TypedDict, total=False): input: Required[Iterable[Input]] """The input text. This may include template strings.""" @@ -73,5 +104,5 @@ class ScoreModelGraderParam(TypedDict, total=False): range: Iterable[float] """The range of the score. Defaults to `[0, 1]`.""" - sampling_params: object + sampling_params: SamplingParams """The sampling parameters for the model."""