Skip to content

Commit 9105b8b

Browse files
feat(api): new API tools
1 parent 746abf4 commit 9105b8b

File tree

98 files changed

+9036
-68
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

98 files changed

+9036
-68
lines changed

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 99
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d51538ac955164de98b0c94a0a4718d96623fe39bf31a1d168be06c93c94e645.yml
3-
openapi_spec_hash: 33e00a48df8f94c94f46290c489f132b
4-
config_hash: c42d37618b8628ce7e1c76437db5dd8f
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a5651cb97f86d1e2531af6aef8c5230f1ea350560fbae790ca2e481b30a6c217.yml
3+
openapi_spec_hash: 66a5104fd3bb43383cf919225df7a6fd
4+
config_hash: bb657c3fed232a56930035de3aaed936

lib/openai.rb

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -367,6 +367,10 @@
367367
require_relative "openai/models/responses/response_function_tool_call_item"
368368
require_relative "openai/models/responses/response_function_tool_call_output_item"
369369
require_relative "openai/models/responses/response_function_web_search"
370+
require_relative "openai/models/responses/response_image_gen_call_completed_event"
371+
require_relative "openai/models/responses/response_image_gen_call_generating_event"
372+
require_relative "openai/models/responses/response_image_gen_call_in_progress_event"
373+
require_relative "openai/models/responses/response_image_gen_call_partial_image_event"
370374
require_relative "openai/models/responses/response_includable"
371375
require_relative "openai/models/responses/response_incomplete_event"
372376
require_relative "openai/models/responses/response_in_progress_event"
@@ -381,14 +385,28 @@
381385
require_relative "openai/models/responses/response_input_text"
382386
require_relative "openai/models/responses/response_item"
383387
require_relative "openai/models/responses/response_item_list"
388+
require_relative "openai/models/responses/response_mcp_call_arguments_delta_event"
389+
require_relative "openai/models/responses/response_mcp_call_arguments_done_event"
390+
require_relative "openai/models/responses/response_mcp_call_completed_event"
391+
require_relative "openai/models/responses/response_mcp_call_failed_event"
392+
require_relative "openai/models/responses/response_mcp_call_in_progress_event"
393+
require_relative "openai/models/responses/response_mcp_list_tools_completed_event"
394+
require_relative "openai/models/responses/response_mcp_list_tools_failed_event"
395+
require_relative "openai/models/responses/response_mcp_list_tools_in_progress_event"
384396
require_relative "openai/models/responses/response_output_audio"
385397
require_relative "openai/models/responses/response_output_item"
386398
require_relative "openai/models/responses/response_output_item_added_event"
387399
require_relative "openai/models/responses/response_output_item_done_event"
388400
require_relative "openai/models/responses/response_output_message"
389401
require_relative "openai/models/responses/response_output_refusal"
390402
require_relative "openai/models/responses/response_output_text"
403+
require_relative "openai/models/responses/response_output_text_annotation_added_event"
404+
require_relative "openai/models/responses/response_queued_event"
405+
require_relative "openai/models/responses/response_reasoning_delta_event"
406+
require_relative "openai/models/responses/response_reasoning_done_event"
391407
require_relative "openai/models/responses/response_reasoning_item"
408+
require_relative "openai/models/responses/response_reasoning_summary_delta_event"
409+
require_relative "openai/models/responses/response_reasoning_summary_done_event"
392410
require_relative "openai/models/responses/response_reasoning_summary_part_added_event"
393411
require_relative "openai/models/responses/response_reasoning_summary_part_done_event"
394412
require_relative "openai/models/responses/response_reasoning_summary_text_delta_event"

lib/openai/models/responses/response.rb

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ class Response < OpenAI::Internal::Type::BaseModel
7878
# an `assistant` message with the content generated by the model, you might
7979
# consider using the `output_text` property where supported in SDKs.
8080
#
81-
# @return [Array<OpenAI::Responses::ResponseOutputMessage, OpenAI::Responses::ResponseFileSearchToolCall, OpenAI::Responses::ResponseFunctionToolCall, OpenAI::Responses::ResponseFunctionWebSearch, OpenAI::Responses::ResponseComputerToolCall, OpenAI::Responses::ResponseReasoningItem>]
81+
# @return [Array<OpenAI::Responses::ResponseOutputMessage, OpenAI::Responses::ResponseFileSearchToolCall, OpenAI::Responses::ResponseFunctionToolCall, OpenAI::Responses::ResponseFunctionWebSearch, OpenAI::Responses::ResponseComputerToolCall, OpenAI::Responses::ResponseReasoningItem, OpenAI::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Responses::ResponseCodeInterpreterToolCall, OpenAI::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Responses::ResponseOutputItem::McpCall, OpenAI::Responses::ResponseOutputItem::McpListTools, OpenAI::Responses::ResponseOutputItem::McpApprovalRequest>]
8282
required :output, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputItem] }
8383

8484
# @!attribute parallel_tool_calls
@@ -120,7 +120,7 @@ class Response < OpenAI::Internal::Type::BaseModel
120120
# the model to call your own code. Learn more about
121121
# [function calling](https://platform.openai.com/docs/guides/function-calling).
122122
#
123-
# @return [Array<OpenAI::Responses::FileSearchTool, OpenAI::Responses::FunctionTool, OpenAI::Responses::ComputerTool, OpenAI::Responses::WebSearchTool>]
123+
# @return [Array<OpenAI::Responses::FunctionTool, OpenAI::Responses::FileSearchTool, OpenAI::Responses::ComputerTool, OpenAI::Responses::Tool::Mcp, OpenAI::Responses::Tool::CodeInterpreter, OpenAI::Responses::Tool::ImageGeneration, OpenAI::Responses::Tool::LocalShell, OpenAI::Responses::WebSearchTool>]
124124
required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
125125

126126
# @!attribute top_p
@@ -133,6 +133,13 @@ class Response < OpenAI::Internal::Type::BaseModel
133133
# @return [Float, nil]
134134
required :top_p, Float, nil?: true
135135

136+
# @!attribute background
137+
# Whether to run the model response in the background.
138+
# [Learn more](https://platform.openai.com/docs/guides/background).
139+
#
140+
# @return [Boolean, nil]
141+
optional :background, OpenAI::Internal::Type::Boolean, nil?: true
142+
136143
# @!attribute max_output_tokens
137144
# An upper bound for the number of tokens that can be generated for a response,
138145
# including visible output tokens and
@@ -182,7 +189,7 @@ class Response < OpenAI::Internal::Type::BaseModel
182189

183190
# @!attribute status
184191
# The status of the response generation. One of `completed`, `failed`,
185-
# `in_progress`, or `incomplete`.
192+
# `in_progress`, `cancelled`, `queued`, or `incomplete`.
186193
#
187194
# @return [Symbol, OpenAI::Responses::ResponseStatus, nil]
188195
optional :status, enum: -> { OpenAI::Responses::ResponseStatus }
@@ -224,7 +231,7 @@ class Response < OpenAI::Internal::Type::BaseModel
224231
# @return [String, nil]
225232
optional :user, String
226233

227-
# @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, max_output_tokens: nil, previous_response_id: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, truncation: nil, usage: nil, user: nil, object: :response)
234+
# @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, previous_response_id: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, truncation: nil, usage: nil, user: nil, object: :response)
228235
# Some parameter documentations has been truncated, see
229236
# {OpenAI::Responses::Response} for more details.
230237
#
@@ -242,18 +249,20 @@ class Response < OpenAI::Internal::Type::BaseModel
242249
#
243250
# @param model [String, Symbol, OpenAI::ChatModel, OpenAI::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
244251
#
245-
# @param output [Array<OpenAI::Responses::ResponseOutputMessage, OpenAI::Responses::ResponseFileSearchToolCall, OpenAI::Responses::ResponseFunctionToolCall, OpenAI::Responses::ResponseFunctionWebSearch, OpenAI::Responses::ResponseComputerToolCall, OpenAI::Responses::ResponseReasoningItem>] An array of content items generated by the model.
252+
# @param output [Array<OpenAI::Responses::ResponseOutputMessage, OpenAI::Responses::ResponseFileSearchToolCall, OpenAI::Responses::ResponseFunctionToolCall, OpenAI::Responses::ResponseFunctionWebSearch, OpenAI::Responses::ResponseComputerToolCall, OpenAI::Responses::ResponseReasoningItem, OpenAI::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Responses::ResponseCodeInterpreterToolCall, OpenAI::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Responses::ResponseOutputItem::McpCall, OpenAI::Responses::ResponseOutputItem::McpListTools, OpenAI::Responses::ResponseOutputItem::McpApprovalRequest>] An array of content items generated by the model.
246253
#
247254
# @param parallel_tool_calls [Boolean] Whether to allow the model to run tool calls in parallel.
248255
#
249256
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
250257
#
251258
# @param tool_choice [Symbol, OpenAI::Responses::ToolChoiceOptions, OpenAI::Responses::ToolChoiceTypes, OpenAI::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
252259
#
253-
# @param tools [Array<OpenAI::Responses::FileSearchTool, OpenAI::Responses::FunctionTool, OpenAI::Responses::ComputerTool, OpenAI::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
260+
# @param tools [Array<OpenAI::Responses::FunctionTool, OpenAI::Responses::FileSearchTool, OpenAI::Responses::ComputerTool, OpenAI::Responses::Tool::Mcp, OpenAI::Responses::Tool::CodeInterpreter, OpenAI::Responses::Tool::ImageGeneration, OpenAI::Responses::Tool::LocalShell, OpenAI::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
254261
#
255262
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
256263
#
264+
# @param background [Boolean, nil] Whether to run the model response in the background.
265+
#
257266
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
258267
#
259268
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to

lib/openai/models/responses/response_code_interpreter_tool_call.rb

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,13 @@ class ResponseCodeInterpreterToolCall < OpenAI::Internal::Type::BaseModel
3737
# @return [Symbol, :code_interpreter_call]
3838
required :type, const: :code_interpreter_call
3939

40-
# @!method initialize(id:, code:, results:, status:, type: :code_interpreter_call)
40+
# @!attribute container_id
41+
# The ID of the container used to run the code.
42+
#
43+
# @return [String, nil]
44+
optional :container_id, String
45+
46+
# @!method initialize(id:, code:, results:, status:, container_id: nil, type: :code_interpreter_call)
4147
# Some parameter documentations has been truncated, see
4248
# {OpenAI::Responses::ResponseCodeInterpreterToolCall} for more details.
4349
#
@@ -51,9 +57,11 @@ class ResponseCodeInterpreterToolCall < OpenAI::Internal::Type::BaseModel
5157
#
5258
# @param status [Symbol, OpenAI::Responses::ResponseCodeInterpreterToolCall::Status] The status of the code interpreter tool call.
5359
#
60+
# @param container_id [String] The ID of the container used to run the code.
61+
#
5462
# @param type [Symbol, :code_interpreter_call] The type of the code interpreter tool call. Always `code_interpreter_call`.
5563

56-
# The output of a code interpreter tool call that is text.
64+
# The output of a code interpreter tool.
5765
module Result
5866
extend OpenAI::Internal::Type::Union
5967

0 commit comments

Comments
 (0)