diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 66788158..d2d60a3d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.23.3" + ".": "0.24.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 905a02c4..2dd0aef4 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 118 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d30ff992a48873c1466c49f3c01f2ec8933faebff23424748f8d056065b1bcef.yml -openapi_spec_hash: e933ec43b46f45c348adb78840e5808d -config_hash: bf45940f0a7805b4ec2017eecdd36893 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-380330a93b5d010391ca3b36ea193c5353b0dfdf2ddd02789ef84a84ce427e82.yml +openapi_spec_hash: 859703234259ecdd2a3c6f4de88eb504 +config_hash: b619b45c1e7facf819f902dee8fa4f97 diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d858f1c..d0576f0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 0.24.0 (2025-09-17) + +Full Changelog: [v0.23.3...v0.24.0](https://github.com/openai/openai-ruby/compare/v0.23.3...v0.24.0) + +### Features + +* **api:** type updates for conversations, reasoning_effort and results for evals ([ee17642](https://github.com/openai/openai-ruby/commit/ee17642d7319dacb933a41ae9f1edae2a200762f)) +* expose response headers for both streams and errors ([a158fd6](https://github.com/openai/openai-ruby/commit/a158fd66b22a5586f4a45301ff96e40f8d52fe8c)) + ## 0.23.3 (2025-09-15) Full Changelog: [v0.23.2...v0.23.3](https://github.com/openai/openai-ruby/compare/v0.23.2...v0.23.3) diff --git a/Gemfile.lock b/Gemfile.lock index 5da34d73..7c214f36 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -11,7 +11,7 @@ GIT PATH remote: . specs: - openai (0.23.3) + openai (0.24.0) connection_pool GEM diff --git a/README.md b/README.md index 916935e8..aeee93ae 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application ```ruby -gem "openai", "~> 0.23.3" +gem "openai", "~> 0.24.0" ``` diff --git a/lib/openai.rb b/lib/openai.rb index 90c25945..a0018aa1 100644 --- a/lib/openai.rb +++ b/lib/openai.rb @@ -62,6 +62,11 @@ require_relative "openai/structured_output" require_relative "openai/models/reasoning_effort" require_relative "openai/models/chat/chat_completion_message" +require_relative "openai/models/responses/response_input_file" +require_relative "openai/models/responses/response_input_image" +require_relative "openai/models/responses/response_input_text" +require_relative "openai/models/responses/response_output_text" +require_relative "openai/models/responses/response_output_refusal" require_relative "openai/models/graders/score_model_grader" require_relative "openai/models/graders/python_grader" require_relative "openai/models/graders/text_similarity_grader" @@ -248,7 +253,6 @@ require_relative "openai/models/containers/file_retrieve_response" require_relative "openai/models/containers/files/content_retrieve_params" require_relative "openai/models/conversations/computer_screenshot_content" -require_relative "openai/models/conversations/container_file_citation_body" require_relative "openai/models/conversations/conversation" require_relative "openai/models/conversations/conversation_create_params" require_relative "openai/models/conversations/conversation_deleted" @@ -258,7 +262,6 @@ require_relative "openai/models/conversations/conversation_item_list" require_relative "openai/models/conversations/conversation_retrieve_params" require_relative "openai/models/conversations/conversation_update_params" -require_relative "openai/models/conversations/file_citation_body" require_relative "openai/models/conversations/input_file_content" require_relative "openai/models/conversations/input_image_content" require_relative "openai/models/conversations/input_text_content" @@ -266,14 +269,11 @@ require_relative "openai/models/conversations/item_delete_params" require_relative "openai/models/conversations/item_list_params" require_relative "openai/models/conversations/item_retrieve_params" -require_relative "openai/models/conversations/lob_prob" require_relative "openai/models/conversations/message" require_relative "openai/models/conversations/output_text_content" require_relative "openai/models/conversations/refusal_content" require_relative "openai/models/conversations/summary_text_content" require_relative "openai/models/conversations/text_content" -require_relative "openai/models/conversations/top_log_prob" -require_relative "openai/models/conversations/url_citation_body" require_relative "openai/models/create_embedding_response" require_relative "openai/models/custom_tool_input_format" require_relative "openai/models/embedding" @@ -549,12 +549,9 @@ require_relative "openai/models/responses/response_input" require_relative "openai/models/responses/response_input_audio" require_relative "openai/models/responses/response_input_content" -require_relative "openai/models/responses/response_input_file" -require_relative "openai/models/responses/response_input_image" require_relative "openai/models/responses/response_input_item" require_relative "openai/models/responses/response_input_message_content_list" require_relative "openai/models/responses/response_input_message_item" -require_relative "openai/models/responses/response_input_text" require_relative "openai/models/responses/response_item" require_relative "openai/models/responses/response_item_list" require_relative "openai/models/responses/response_mcp_call_arguments_delta_event" @@ -570,8 +567,6 @@ require_relative "openai/models/responses/response_output_item_added_event" require_relative "openai/models/responses/response_output_item_done_event" require_relative "openai/models/responses/response_output_message" -require_relative "openai/models/responses/response_output_refusal" -require_relative "openai/models/responses/response_output_text" require_relative "openai/models/responses/response_output_text_annotation_added_event" require_relative "openai/models/responses/response_prompt" require_relative "openai/models/responses/response_queued_event" diff --git a/lib/openai/errors.rb b/lib/openai/errors.rb index 3637604e..76e68f39 100644 --- a/lib/openai/errors.rb +++ b/lib/openai/errors.rb @@ -43,6 +43,9 @@ class APIError < OpenAI::Errors::Error # @return [Integer, nil] attr_accessor :status + # @return [Hash{String=>String}, nil] + attr_accessor :headers + # @return [Object, nil] attr_accessor :body @@ -59,13 +62,15 @@ class APIError < OpenAI::Errors::Error # # @param url [URI::Generic] # @param status [Integer, nil] + # @param headers [Hash{String=>String}, nil] # @param body [Object, nil] # @param request [nil] # @param response [nil] # @param message [String, nil] - def initialize(url:, status: nil, body: nil, request: nil, response: nil, message: nil) + def initialize(url:, status: nil, headers: nil, body: nil, request: nil, response: nil, message: nil) @url = url @status = status + @headers = headers @body = body @request = request @response = response @@ -98,6 +103,7 @@ class APIConnectionError < OpenAI::Errors::APIError # # @param url [URI::Generic] # @param status [nil] + # @param headers [Hash{String=>String}, nil] # @param body [nil] # @param request [nil] # @param response [nil] @@ -105,6 +111,7 @@ class APIConnectionError < OpenAI::Errors::APIError def initialize( url:, status: nil, + headers: nil, body: nil, request: nil, response: nil, @@ -119,6 +126,7 @@ class APITimeoutError < OpenAI::Errors::APIConnectionError # # @param url [URI::Generic] # @param status [nil] + # @param headers [Hash{String=>String}, nil] # @param body [nil] # @param request [nil] # @param response [nil] @@ -126,6 +134,7 @@ class APITimeoutError < OpenAI::Errors::APIConnectionError def initialize( url:, status: nil, + headers: nil, body: nil, request: nil, response: nil, @@ -140,21 +149,24 @@ class APIStatusError < OpenAI::Errors::APIError # # @param url [URI::Generic] # @param status [Integer] + # @param headers [Hash{String=>String}, nil] # @param body [Object, nil] # @param request [nil] # @param response [nil] # @param message [String, nil] # # @return [self] - def self.for(url:, status:, body:, request:, response:, message: nil) - kwargs = { - url: url, - status: status, - body: body, - request: request, - response: response, - message: message - } + def self.for(url:, status:, headers:, body:, request:, response:, message: nil) + kwargs = + { + url: url, + status: status, + headers: headers, + body: body, + request: request, + response: response, + message: message + } case status in 400 @@ -198,11 +210,12 @@ def self.for(url:, status:, body:, request:, response:, message: nil) # # @param url [URI::Generic] # @param status [Integer] + # @param headers [Hash{String=>String}, nil] # @param body [Object, nil] # @param request [nil] # @param response [nil] # @param message [String, nil] - def initialize(url:, status:, body:, request:, response:, message: nil) + def initialize(url:, status:, headers:, body:, request:, response:, message: nil) message ||= OpenAI::Internal::Util.dig(body, :message) { {url: url.to_s, status: status, body: body} } @code = OpenAI::Internal::Type::Converter.coerce(String, OpenAI::Internal::Util.dig(body, :code)) @param = OpenAI::Internal::Type::Converter.coerce(String, OpenAI::Internal::Util.dig(body, :param)) @@ -210,6 +223,7 @@ def initialize(url:, status:, body:, request:, response:, message: nil) super( url: url, status: status, + headers: headers, body: body, request: request, response: response, diff --git a/lib/openai/internal/conversation_cursor_page.rb b/lib/openai/internal/conversation_cursor_page.rb index 6dddc16e..c62a4eff 100644 --- a/lib/openai/internal/conversation_cursor_page.rb +++ b/lib/openai/internal/conversation_cursor_page.rb @@ -63,7 +63,7 @@ def auto_paging_each(&blk) # # @param client [OpenAI::Internal::Transport::BaseClient] # @param req [Hash{Symbol=>Object}] - # @param headers [Hash{String=>String}, Net::HTTPHeader] + # @param headers [Hash{String=>String}] # @param page_data [Hash{Symbol=>Object}] def initialize(client:, req:, headers:, page_data:) super diff --git a/lib/openai/internal/cursor_page.rb b/lib/openai/internal/cursor_page.rb index 5f68a217..819d751e 100644 --- a/lib/openai/internal/cursor_page.rb +++ b/lib/openai/internal/cursor_page.rb @@ -60,7 +60,7 @@ def auto_paging_each(&blk) # # @param client [OpenAI::Internal::Transport::BaseClient] # @param req [Hash{Symbol=>Object}] - # @param headers [Hash{String=>String}, Net::HTTPHeader] + # @param headers [Hash{String=>String}] # @param page_data [Hash{Symbol=>Object}] def initialize(client:, req:, headers:, page_data:) super diff --git a/lib/openai/internal/page.rb b/lib/openai/internal/page.rb index 36c350a4..0d95eac7 100644 --- a/lib/openai/internal/page.rb +++ b/lib/openai/internal/page.rb @@ -54,7 +54,7 @@ def auto_paging_each(&blk) # # @param client [OpenAI::Internal::Transport::BaseClient] # @param req [Hash{Symbol=>Object}] - # @param headers [Hash{String=>String}, Net::HTTPHeader] + # @param headers [Hash{String=>String}] # @param page_data [Array] def initialize(client:, req:, headers:, page_data:) super diff --git a/lib/openai/internal/stream.rb b/lib/openai/internal/stream.rb index cd2f2a2d..7f0efbe8 100644 --- a/lib/openai/internal/stream.rb +++ b/lib/openai/internal/stream.rb @@ -41,6 +41,7 @@ class Stream err = OpenAI::Errors::APIStatusError.for( url: @url, status: @status, + headers: @headers, body: data, request: nil, response: @response, diff --git a/lib/openai/internal/transport/base_client.rb b/lib/openai/internal/transport/base_client.rb index ca1b145f..db53aa1c 100644 --- a/lib/openai/internal/transport/base_client.rb +++ b/lib/openai/internal/transport/base_client.rb @@ -47,7 +47,7 @@ def validate!(req) # @api private # # @param status [Integer] - # @param headers [Hash{String=>String}, Net::HTTPHeader] + # @param headers [Hash{String=>String}] # # @return [Boolean] def should_retry?(status, headers:) @@ -85,7 +85,7 @@ def should_retry?(status, headers:) # # @param status [Integer] # - # @param response_headers [Hash{String=>String}, Net::HTTPHeader] + # @param response_headers [Hash{String=>String}] # # @return [Hash{Symbol=>Object}] def follow_redirect(request, status:, response_headers:) @@ -378,6 +378,7 @@ def send_request(request, redirect_count:, retry_count:, send_retry_header:) rescue OpenAI::Errors::APIConnectionError => e status = e end + headers = OpenAI::Internal::Util.normalized_headers(response&.each_header&.to_h) case status in ..299 @@ -390,7 +391,7 @@ def send_request(request, redirect_count:, retry_count:, send_retry_header:) in 300..399 self.class.reap_connection!(status, stream: stream) - request = self.class.follow_redirect(request, status: status, response_headers: response) + request = self.class.follow_redirect(request, status: status, response_headers: headers) send_request( request, redirect_count: redirect_count + 1, @@ -399,9 +400,9 @@ def send_request(request, redirect_count:, retry_count:, send_retry_header:) ) in OpenAI::Errors::APIConnectionError if retry_count >= max_retries raise status - in (400..) if retry_count >= max_retries || !self.class.should_retry?(status, headers: response) + in (400..) if retry_count >= max_retries || !self.class.should_retry?(status, headers: headers) decoded = Kernel.then do - OpenAI::Internal::Util.decode_content(response, stream: stream, suppress_error: true) + OpenAI::Internal::Util.decode_content(headers, stream: stream, suppress_error: true) ensure self.class.reap_connection!(status, stream: stream) end @@ -409,6 +410,7 @@ def send_request(request, redirect_count:, retry_count:, send_retry_header:) raise OpenAI::Errors::APIStatusError.for( url: url, status: status, + headers: headers, body: decoded, request: nil, response: response @@ -485,19 +487,21 @@ def request(req) send_retry_header: send_retry_header ) - decoded = OpenAI::Internal::Util.decode_content(response, stream: stream) + headers = OpenAI::Internal::Util.normalized_headers(response.each_header.to_h) + decoded = OpenAI::Internal::Util.decode_content(headers, stream: stream) case req in {stream: Class => st} st.new( model: model, url: url, status: status, + headers: headers, response: response, unwrap: unwrap, stream: decoded ) in {page: Class => page} - page.new(client: self, req: req, headers: response, page_data: decoded) + page.new(client: self, req: req, headers: headers, page_data: decoded) else unwrapped = OpenAI::Internal::Util.dig(decoded, unwrap) OpenAI::Internal::Type::Converter.coerce(model, unwrapped) diff --git a/lib/openai/internal/type/base_page.rb b/lib/openai/internal/type/base_page.rb index 3ebca02d..402b1f11 100644 --- a/lib/openai/internal/type/base_page.rb +++ b/lib/openai/internal/type/base_page.rb @@ -39,7 +39,7 @@ def to_enum = super(:auto_paging_each) # # @param client [OpenAI::Internal::Transport::BaseClient] # @param req [Hash{Symbol=>Object}] - # @param headers [Hash{String=>String}, Net::HTTPHeader] + # @param headers [Hash{String=>String}] # @param page_data [Object] def initialize(client:, req:, headers:, page_data:) @client = client diff --git a/lib/openai/internal/type/base_stream.rb b/lib/openai/internal/type/base_stream.rb index 3ebdf248..8b6acc19 100644 --- a/lib/openai/internal/type/base_stream.rb +++ b/lib/openai/internal/type/base_stream.rb @@ -28,6 +28,12 @@ class << self def defer_closing(stream) = ->(_id) { OpenAI::Internal::Util.close_fused!(stream) } end + # @return [Integer] + attr_reader :status + + # @return [Hash{String=>String}] + attr_reader :headers + # @api public # # @return [void] @@ -63,13 +69,15 @@ def to_enum = @iterator # @param model [Class, OpenAI::Internal::Type::Converter] # @param url [URI::Generic] # @param status [Integer] + # @param headers [Hash{String=>String}] # @param response [Net::HTTPResponse] # @param unwrap [Symbol, Integer, Array, Proc] # @param stream [Enumerable] - def initialize(model:, url:, status:, response:, unwrap:, stream:) + def initialize(model:, url:, status:, headers:, response:, unwrap:, stream:) @model = model @url = url @status = status + @headers = headers @response = response @unwrap = unwrap @stream = stream diff --git a/lib/openai/internal/util.rb b/lib/openai/internal/util.rb index bec08a64..bc25753e 100644 --- a/lib/openai/internal/util.rb +++ b/lib/openai/internal/util.rb @@ -647,7 +647,7 @@ def force_charset!(content_type, text:) # # Assumes each chunk in stream has `Encoding::BINARY`. # - # @param headers [Hash{String=>String}, Net::HTTPHeader] + # @param headers [Hash{String=>String}] # @param stream [Enumerable] # @param suppress_error [Boolean] # diff --git a/lib/openai/models/conversations/computer_screenshot_content.rb b/lib/openai/models/conversations/computer_screenshot_content.rb index 1b030a1c..6d5ff741 100644 --- a/lib/openai/models/conversations/computer_screenshot_content.rb +++ b/lib/openai/models/conversations/computer_screenshot_content.rb @@ -27,6 +27,8 @@ class ComputerScreenshotContent < OpenAI::Internal::Type::BaseModel # Some parameter documentations has been truncated, see # {OpenAI::Models::Conversations::ComputerScreenshotContent} for more details. # + # A screenshot of a computer. + # # @param file_id [String, nil] The identifier of an uploaded file that contains the screenshot. # # @param image_url [String, nil] The URL of the screenshot image. diff --git a/lib/openai/models/conversations/container_file_citation_body.rb b/lib/openai/models/conversations/container_file_citation_body.rb deleted file mode 100644 index 4c373465..00000000 --- a/lib/openai/models/conversations/container_file_citation_body.rb +++ /dev/null @@ -1,58 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - module Models - module Conversations - class ContainerFileCitationBody < OpenAI::Internal::Type::BaseModel - # @!attribute container_id - # The ID of the container file. - # - # @return [String] - required :container_id, String - - # @!attribute end_index - # The index of the last character of the container file citation in the message. - # - # @return [Integer] - required :end_index, Integer - - # @!attribute file_id - # The ID of the file. - # - # @return [String] - required :file_id, String - - # @!attribute filename - # The filename of the container file cited. - # - # @return [String] - required :filename, String - - # @!attribute start_index - # The index of the first character of the container file citation in the message. - # - # @return [Integer] - required :start_index, Integer - - # @!attribute type - # The type of the container file citation. Always `container_file_citation`. - # - # @return [Symbol, :container_file_citation] - required :type, const: :container_file_citation - - # @!method initialize(container_id:, end_index:, file_id:, filename:, start_index:, type: :container_file_citation) - # @param container_id [String] The ID of the container file. - # - # @param end_index [Integer] The index of the last character of the container file citation in the message. - # - # @param file_id [String] The ID of the file. - # - # @param filename [String] The filename of the container file cited. - # - # @param start_index [Integer] The index of the first character of the container file citation in the message. - # - # @param type [Symbol, :container_file_citation] The type of the container file citation. Always `container_file_citation`. - end - end - end -end diff --git a/lib/openai/models/conversations/conversation_item.rb b/lib/openai/models/conversations/conversation_item.rb index 3ca95f12..7f4a0f2e 100644 --- a/lib/openai/models/conversations/conversation_item.rb +++ b/lib/openai/models/conversations/conversation_item.rb @@ -13,6 +13,7 @@ module ConversationItem discriminator :type + # A message to or from the model. variant :message, -> { OpenAI::Conversations::Message } # A tool call to run a function. See the diff --git a/lib/openai/models/conversations/file_citation_body.rb b/lib/openai/models/conversations/file_citation_body.rb deleted file mode 100644 index 93d84a93..00000000 --- a/lib/openai/models/conversations/file_citation_body.rb +++ /dev/null @@ -1,42 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - module Models - module Conversations - class FileCitationBody < OpenAI::Internal::Type::BaseModel - # @!attribute file_id - # The ID of the file. - # - # @return [String] - required :file_id, String - - # @!attribute filename - # The filename of the file cited. - # - # @return [String] - required :filename, String - - # @!attribute index - # The index of the file in the list of files. - # - # @return [Integer] - required :index, Integer - - # @!attribute type - # The type of the file citation. Always `file_citation`. - # - # @return [Symbol, :file_citation] - required :type, const: :file_citation - - # @!method initialize(file_id:, filename:, index:, type: :file_citation) - # @param file_id [String] The ID of the file. - # - # @param filename [String] The filename of the file cited. - # - # @param index [Integer] The index of the file in the list of files. - # - # @param type [Symbol, :file_citation] The type of the file citation. Always `file_citation`. - end - end - end -end diff --git a/lib/openai/models/conversations/input_file_content.rb b/lib/openai/models/conversations/input_file_content.rb index 1cb5b5fa..5f65029c 100644 --- a/lib/openai/models/conversations/input_file_content.rb +++ b/lib/openai/models/conversations/input_file_content.rb @@ -3,40 +3,7 @@ module OpenAI module Models module Conversations - class InputFileContent < OpenAI::Internal::Type::BaseModel - # @!attribute file_id - # The ID of the file to be sent to the model. - # - # @return [String, nil] - required :file_id, String, nil?: true - - # @!attribute type - # The type of the input item. Always `input_file`. - # - # @return [Symbol, :input_file] - required :type, const: :input_file - - # @!attribute file_url - # The URL of the file to be sent to the model. - # - # @return [String, nil] - optional :file_url, String - - # @!attribute filename - # The name of the file to be sent to the model. - # - # @return [String, nil] - optional :filename, String - - # @!method initialize(file_id:, file_url: nil, filename: nil, type: :input_file) - # @param file_id [String, nil] The ID of the file to be sent to the model. - # - # @param file_url [String] The URL of the file to be sent to the model. - # - # @param filename [String] The name of the file to be sent to the model. - # - # @param type [Symbol, :input_file] The type of the input item. Always `input_file`. - end + InputFileContent = OpenAI::Models::Responses::ResponseInputFile end end end diff --git a/lib/openai/models/conversations/input_image_content.rb b/lib/openai/models/conversations/input_image_content.rb index 63d1ef58..14254b78 100644 --- a/lib/openai/models/conversations/input_image_content.rb +++ b/lib/openai/models/conversations/input_image_content.rb @@ -3,60 +3,7 @@ module OpenAI module Models module Conversations - class InputImageContent < OpenAI::Internal::Type::BaseModel - # @!attribute detail - # The detail level of the image to be sent to the model. One of `high`, `low`, or - # `auto`. Defaults to `auto`. - # - # @return [Symbol, OpenAI::Models::Conversations::InputImageContent::Detail] - required :detail, enum: -> { OpenAI::Conversations::InputImageContent::Detail } - - # @!attribute file_id - # The ID of the file to be sent to the model. - # - # @return [String, nil] - required :file_id, String, nil?: true - - # @!attribute image_url - # The URL of the image to be sent to the model. A fully qualified URL or base64 - # encoded image in a data URL. - # - # @return [String, nil] - required :image_url, String, nil?: true - - # @!attribute type - # The type of the input item. Always `input_image`. - # - # @return [Symbol, :input_image] - required :type, const: :input_image - - # @!method initialize(detail:, file_id:, image_url:, type: :input_image) - # Some parameter documentations has been truncated, see - # {OpenAI::Models::Conversations::InputImageContent} for more details. - # - # @param detail [Symbol, OpenAI::Models::Conversations::InputImageContent::Detail] The detail level of the image to be sent to the model. One of `high`, `low`, or - # - # @param file_id [String, nil] The ID of the file to be sent to the model. - # - # @param image_url [String, nil] The URL of the image to be sent to the model. A fully qualified URL or base64 en - # - # @param type [Symbol, :input_image] The type of the input item. Always `input_image`. - - # The detail level of the image to be sent to the model. One of `high`, `low`, or - # `auto`. Defaults to `auto`. - # - # @see OpenAI::Models::Conversations::InputImageContent#detail - module Detail - extend OpenAI::Internal::Type::Enum - - LOW = :low - HIGH = :high - AUTO = :auto - - # @!method self.values - # @return [Array] - end - end + InputImageContent = OpenAI::Models::Responses::ResponseInputImage end end end diff --git a/lib/openai/models/conversations/input_text_content.rb b/lib/openai/models/conversations/input_text_content.rb index 81c74843..f58d6e17 100644 --- a/lib/openai/models/conversations/input_text_content.rb +++ b/lib/openai/models/conversations/input_text_content.rb @@ -3,24 +3,7 @@ module OpenAI module Models module Conversations - class InputTextContent < OpenAI::Internal::Type::BaseModel - # @!attribute text - # The text input to the model. - # - # @return [String] - required :text, String - - # @!attribute type - # The type of the input item. Always `input_text`. - # - # @return [Symbol, :input_text] - required :type, const: :input_text - - # @!method initialize(text:, type: :input_text) - # @param text [String] The text input to the model. - # - # @param type [Symbol, :input_text] The type of the input item. Always `input_text`. - end + InputTextContent = OpenAI::Models::Responses::ResponseInputText end end end diff --git a/lib/openai/models/conversations/lob_prob.rb b/lib/openai/models/conversations/lob_prob.rb deleted file mode 100644 index 60d72ed2..00000000 --- a/lib/openai/models/conversations/lob_prob.rb +++ /dev/null @@ -1,35 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - module Models - module Conversations - class LobProb < OpenAI::Internal::Type::BaseModel - # @!attribute token - # - # @return [String] - required :token, String - - # @!attribute bytes - # - # @return [Array] - required :bytes, OpenAI::Internal::Type::ArrayOf[Integer] - - # @!attribute logprob - # - # @return [Float] - required :logprob, Float - - # @!attribute top_logprobs - # - # @return [Array] - required :top_logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Conversations::TopLogProb] } - - # @!method initialize(token:, bytes:, logprob:, top_logprobs:) - # @param token [String] - # @param bytes [Array] - # @param logprob [Float] - # @param top_logprobs [Array] - end - end - end -end diff --git a/lib/openai/models/conversations/message.rb b/lib/openai/models/conversations/message.rb index 5b620a90..389c2787 100644 --- a/lib/openai/models/conversations/message.rb +++ b/lib/openai/models/conversations/message.rb @@ -13,7 +13,7 @@ class Message < OpenAI::Internal::Type::BaseModel # @!attribute content # The content of the message # - # @return [Array] + # @return [Array] required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Conversations::Message::Content] } # @!attribute role @@ -40,9 +40,11 @@ class Message < OpenAI::Internal::Type::BaseModel # Some parameter documentations has been truncated, see # {OpenAI::Models::Conversations::Message} for more details. # + # A message to or from the model. + # # @param id [String] The unique ID of the message. # - # @param content [Array] The content of the message + # @param content [Array] The content of the message # # @param role [Symbol, OpenAI::Models::Conversations::Message::Role] The role of the message. One of `unknown`, `user`, `assistant`, `system`, `criti # @@ -50,29 +52,38 @@ class Message < OpenAI::Internal::Type::BaseModel # # @param type [Symbol, :message] The type of the message. Always set to `message`. + # A text input to the model. module Content extend OpenAI::Internal::Type::Union discriminator :type - variant :input_text, -> { OpenAI::Conversations::InputTextContent } + # A text input to the model. + variant :input_text, -> { OpenAI::Responses::ResponseInputText } - variant :output_text, -> { OpenAI::Conversations::OutputTextContent } + # A text output from the model. + variant :output_text, -> { OpenAI::Responses::ResponseOutputText } + # A text content. variant :text, -> { OpenAI::Conversations::TextContent } + # A summary text from the model. variant :summary_text, -> { OpenAI::Conversations::SummaryTextContent } - variant :refusal, -> { OpenAI::Conversations::RefusalContent } + # A refusal from the model. + variant :refusal, -> { OpenAI::Responses::ResponseOutputRefusal } - variant :input_image, -> { OpenAI::Conversations::InputImageContent } + # An image input to the model. Learn about [image inputs](https://platform.openai.com/docs/guides/vision). + variant :input_image, -> { OpenAI::Responses::ResponseInputImage } + # A screenshot of a computer. variant :computer_screenshot, -> { OpenAI::Conversations::ComputerScreenshotContent } - variant :input_file, -> { OpenAI::Conversations::InputFileContent } + # A file input to the model. + variant :input_file, -> { OpenAI::Responses::ResponseInputFile } # @!method self.variants - # @return [Array(OpenAI::Models::Conversations::InputTextContent, OpenAI::Models::Conversations::OutputTextContent, OpenAI::Models::Conversations::TextContent, OpenAI::Models::Conversations::SummaryTextContent, OpenAI::Models::Conversations::RefusalContent, OpenAI::Models::Conversations::InputImageContent, OpenAI::Models::Conversations::ComputerScreenshotContent, OpenAI::Models::Conversations::InputFileContent)] + # @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Conversations::TextContent, OpenAI::Models::Conversations::SummaryTextContent, OpenAI::Models::Responses::ResponseOutputRefusal, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Conversations::ComputerScreenshotContent, OpenAI::Models::Responses::ResponseInputFile)] end # The role of the message. One of `unknown`, `user`, `assistant`, `system`, diff --git a/lib/openai/models/conversations/output_text_content.rb b/lib/openai/models/conversations/output_text_content.rb index 23791d5b..46d05fc9 100644 --- a/lib/openai/models/conversations/output_text_content.rb +++ b/lib/openai/models/conversations/output_text_content.rb @@ -3,55 +3,7 @@ module OpenAI module Models module Conversations - class OutputTextContent < OpenAI::Internal::Type::BaseModel - # @!attribute annotations - # The annotations of the text output. - # - # @return [Array] - required :annotations, - -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Conversations::OutputTextContent::Annotation] } - - # @!attribute text - # The text output from the model. - # - # @return [String] - required :text, String - - # @!attribute type - # The type of the output text. Always `output_text`. - # - # @return [Symbol, :output_text] - required :type, const: :output_text - - # @!attribute logprobs - # - # @return [Array, nil] - optional :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Conversations::LobProb] } - - # @!method initialize(annotations:, text:, logprobs: nil, type: :output_text) - # @param annotations [Array] The annotations of the text output. - # - # @param text [String] The text output from the model. - # - # @param logprobs [Array] - # - # @param type [Symbol, :output_text] The type of the output text. Always `output_text`. - - module Annotation - extend OpenAI::Internal::Type::Union - - discriminator :type - - variant :file_citation, -> { OpenAI::Conversations::FileCitationBody } - - variant :url_citation, -> { OpenAI::Conversations::URLCitationBody } - - variant :container_file_citation, -> { OpenAI::Conversations::ContainerFileCitationBody } - - # @!method self.variants - # @return [Array(OpenAI::Models::Conversations::FileCitationBody, OpenAI::Models::Conversations::URLCitationBody, OpenAI::Models::Conversations::ContainerFileCitationBody)] - end - end + OutputTextContent = OpenAI::Models::Responses::ResponseOutputText end end end diff --git a/lib/openai/models/conversations/refusal_content.rb b/lib/openai/models/conversations/refusal_content.rb index 2b1cdce3..289496ce 100644 --- a/lib/openai/models/conversations/refusal_content.rb +++ b/lib/openai/models/conversations/refusal_content.rb @@ -3,24 +3,7 @@ module OpenAI module Models module Conversations - class RefusalContent < OpenAI::Internal::Type::BaseModel - # @!attribute refusal - # The refusal explanation from the model. - # - # @return [String] - required :refusal, String - - # @!attribute type - # The type of the refusal. Always `refusal`. - # - # @return [Symbol, :refusal] - required :type, const: :refusal - - # @!method initialize(refusal:, type: :refusal) - # @param refusal [String] The refusal explanation from the model. - # - # @param type [Symbol, :refusal] The type of the refusal. Always `refusal`. - end + RefusalContent = OpenAI::Models::Responses::ResponseOutputRefusal end end end diff --git a/lib/openai/models/conversations/summary_text_content.rb b/lib/openai/models/conversations/summary_text_content.rb index e3768df9..6959ef34 100644 --- a/lib/openai/models/conversations/summary_text_content.rb +++ b/lib/openai/models/conversations/summary_text_content.rb @@ -15,6 +15,8 @@ class SummaryTextContent < OpenAI::Internal::Type::BaseModel required :type, const: :summary_text # @!method initialize(text:, type: :summary_text) + # A summary text from the model. + # # @param text [String] # @param type [Symbol, :summary_text] end diff --git a/lib/openai/models/conversations/text_content.rb b/lib/openai/models/conversations/text_content.rb index 3fa27b08..1271c84d 100644 --- a/lib/openai/models/conversations/text_content.rb +++ b/lib/openai/models/conversations/text_content.rb @@ -15,6 +15,8 @@ class TextContent < OpenAI::Internal::Type::BaseModel required :type, const: :text # @!method initialize(text:, type: :text) + # A text content. + # # @param text [String] # @param type [Symbol, :text] end diff --git a/lib/openai/models/conversations/top_log_prob.rb b/lib/openai/models/conversations/top_log_prob.rb deleted file mode 100644 index 4677b3bf..00000000 --- a/lib/openai/models/conversations/top_log_prob.rb +++ /dev/null @@ -1,29 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - module Models - module Conversations - class TopLogProb < OpenAI::Internal::Type::BaseModel - # @!attribute token - # - # @return [String] - required :token, String - - # @!attribute bytes - # - # @return [Array] - required :bytes, OpenAI::Internal::Type::ArrayOf[Integer] - - # @!attribute logprob - # - # @return [Float] - required :logprob, Float - - # @!method initialize(token:, bytes:, logprob:) - # @param token [String] - # @param bytes [Array] - # @param logprob [Float] - end - end - end -end diff --git a/lib/openai/models/conversations/url_citation_body.rb b/lib/openai/models/conversations/url_citation_body.rb deleted file mode 100644 index bab5ffdf..00000000 --- a/lib/openai/models/conversations/url_citation_body.rb +++ /dev/null @@ -1,50 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - module Models - module Conversations - class URLCitationBody < OpenAI::Internal::Type::BaseModel - # @!attribute end_index - # The index of the last character of the URL citation in the message. - # - # @return [Integer] - required :end_index, Integer - - # @!attribute start_index - # The index of the first character of the URL citation in the message. - # - # @return [Integer] - required :start_index, Integer - - # @!attribute title - # The title of the web resource. - # - # @return [String] - required :title, String - - # @!attribute type - # The type of the URL citation. Always `url_citation`. - # - # @return [Symbol, :url_citation] - required :type, const: :url_citation - - # @!attribute url - # The URL of the web resource. - # - # @return [String] - required :url, String - - # @!method initialize(end_index:, start_index:, title:, url:, type: :url_citation) - # @param end_index [Integer] The index of the last character of the URL citation in the message. - # - # @param start_index [Integer] The index of the first character of the URL citation in the message. - # - # @param title [String] The title of the web resource. - # - # @param url [String] The URL of the web resource. - # - # @param type [Symbol, :url_citation] The type of the URL citation. Always `url_citation`. - end - end - end -end diff --git a/lib/openai/models/evals/create_eval_completions_run_data_source.rb b/lib/openai/models/evals/create_eval_completions_run_data_source.rb index 0c72035e..24e5106e 100644 --- a/lib/openai/models/evals/create_eval_completions_run_data_source.rb +++ b/lib/openai/models/evals/create_eval_completions_run_data_source.rb @@ -459,6 +459,16 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @return [Integer, nil] optional :max_completion_tokens, Integer + # @!attribute reasoning_effort + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + # + # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true + # @!attribute response_format # An object specifying the format that the model must output. # @@ -501,13 +511,15 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @return [Float, nil] optional :top_p, Float - # @!method initialize(max_completion_tokens: nil, response_format: nil, seed: nil, temperature: nil, tools: nil, top_p: nil) + # @!method initialize(max_completion_tokens: nil, reasoning_effort: nil, response_format: nil, seed: nil, temperature: nil, tools: nil, top_p: nil) # Some parameter documentations has been truncated, see # {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams} for # more details. # # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output. # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for + # # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. # # @param seed [Integer] A seed value to initialize the randomness, during sampling. diff --git a/lib/openai/models/evals/run_cancel_response.rb b/lib/openai/models/evals/run_cancel_response.rb index 4c97c294..f3886959 100644 --- a/lib/openai/models/evals/run_cancel_response.rb +++ b/lib/openai/models/evals/run_cancel_response.rb @@ -654,6 +654,16 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @return [Integer, nil] optional :max_completion_tokens, Integer + # @!attribute reasoning_effort + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + # + # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true + # @!attribute seed # A seed value to initialize the randomness, during sampling. # @@ -702,13 +712,15 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @return [Float, nil] optional :top_p, Float - # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) + # @!method initialize(max_completion_tokens: nil, reasoning_effort: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) # Some parameter documentations has been truncated, see # {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams} # for more details. # # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output. # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for + # # @param seed [Integer] A seed value to initialize the randomness, during sampling. # # @param temperature [Float] A higher temperature increases randomness in the outputs. diff --git a/lib/openai/models/evals/run_create_params.rb b/lib/openai/models/evals/run_create_params.rb index 04477bef..739dfe6f 100644 --- a/lib/openai/models/evals/run_create_params.rb +++ b/lib/openai/models/evals/run_create_params.rb @@ -582,6 +582,16 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @return [Integer, nil] optional :max_completion_tokens, Integer + # @!attribute reasoning_effort + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + # + # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true + # @!attribute seed # A seed value to initialize the randomness, during sampling. # @@ -630,13 +640,15 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @return [Float, nil] optional :top_p, Float - # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) + # @!method initialize(max_completion_tokens: nil, reasoning_effort: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) # Some parameter documentations has been truncated, see # {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams} # for more details. # # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output. # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for + # # @param seed [Integer] A seed value to initialize the randomness, during sampling. # # @param temperature [Float] A higher temperature increases randomness in the outputs. diff --git a/lib/openai/models/evals/run_create_response.rb b/lib/openai/models/evals/run_create_response.rb index 9e7dce76..0dd7985c 100644 --- a/lib/openai/models/evals/run_create_response.rb +++ b/lib/openai/models/evals/run_create_response.rb @@ -654,6 +654,16 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @return [Integer, nil] optional :max_completion_tokens, Integer + # @!attribute reasoning_effort + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + # + # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true + # @!attribute seed # A seed value to initialize the randomness, during sampling. # @@ -702,13 +712,15 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @return [Float, nil] optional :top_p, Float - # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) + # @!method initialize(max_completion_tokens: nil, reasoning_effort: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) # Some parameter documentations has been truncated, see # {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams} # for more details. # # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output. # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for + # # @param seed [Integer] A seed value to initialize the randomness, during sampling. # # @param temperature [Float] A higher temperature increases randomness in the outputs. diff --git a/lib/openai/models/evals/run_list_response.rb b/lib/openai/models/evals/run_list_response.rb index a7082381..0789f15a 100644 --- a/lib/openai/models/evals/run_list_response.rb +++ b/lib/openai/models/evals/run_list_response.rb @@ -654,6 +654,16 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @return [Integer, nil] optional :max_completion_tokens, Integer + # @!attribute reasoning_effort + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + # + # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true + # @!attribute seed # A seed value to initialize the randomness, during sampling. # @@ -701,13 +711,15 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @return [Float, nil] optional :top_p, Float - # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) + # @!method initialize(max_completion_tokens: nil, reasoning_effort: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) # Some parameter documentations has been truncated, see # {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams} # for more details. # # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output. # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for + # # @param seed [Integer] A seed value to initialize the randomness, during sampling. # # @param temperature [Float] A higher temperature increases randomness in the outputs. diff --git a/lib/openai/models/evals/run_retrieve_response.rb b/lib/openai/models/evals/run_retrieve_response.rb index f55b5ef0..f531c975 100644 --- a/lib/openai/models/evals/run_retrieve_response.rb +++ b/lib/openai/models/evals/run_retrieve_response.rb @@ -658,6 +658,16 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @return [Integer, nil] optional :max_completion_tokens, Integer + # @!attribute reasoning_effort + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + # + # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true + # @!attribute seed # A seed value to initialize the randomness, during sampling. # @@ -706,13 +716,15 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # @return [Float, nil] optional :top_p, Float - # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) + # @!method initialize(max_completion_tokens: nil, reasoning_effort: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) # Some parameter documentations has been truncated, see # {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams} # for more details. # # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output. # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for + # # @param seed [Integer] A seed value to initialize the randomness, during sampling. # # @param temperature [Float] A higher temperature increases randomness in the outputs. diff --git a/lib/openai/models/evals/runs/output_item_list_response.rb b/lib/openai/models/evals/runs/output_item_list_response.rb index d3271c97..5e49c767 100644 --- a/lib/openai/models/evals/runs/output_item_list_response.rb +++ b/lib/openai/models/evals/runs/output_item_list_response.rb @@ -43,11 +43,11 @@ class OutputItemListResponse < OpenAI::Internal::Type::BaseModel required :object, const: :"eval.run.output_item" # @!attribute results - # A list of results from the evaluation run. + # A list of grader results for this output item. # - # @return [ArrayObject}>] + # @return [Array] required :results, - OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]] + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemListResponse::Result] } # @!attribute run_id # The identifier of the evaluation run associated with this output item. @@ -80,7 +80,7 @@ class OutputItemListResponse < OpenAI::Internal::Type::BaseModel # # @param eval_id [String] The identifier of the evaluation group. # - # @param results [ArrayObject}>] A list of results from the evaluation run. + # @param results [Array] A list of grader results for this output item. # # @param run_id [String] The identifier of the evaluation run associated with this output item. # @@ -90,6 +90,51 @@ class OutputItemListResponse < OpenAI::Internal::Type::BaseModel # # @param object [Symbol, :"eval.run.output_item"] The type of the object. Always "eval.run.output_item". + class Result < OpenAI::Internal::Type::BaseModel + # @!attribute name + # The name of the grader. + # + # @return [String] + required :name, String + + # @!attribute passed + # Whether the grader considered the output a pass. + # + # @return [Boolean] + required :passed, OpenAI::Internal::Type::Boolean + + # @!attribute score + # The numeric score produced by the grader. + # + # @return [Float] + required :score, Float + + # @!attribute sample + # Optional sample or intermediate data produced by the grader. + # + # @return [Hash{Symbol=>Object}, nil] + optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown], nil?: true + + # @!attribute type + # The grader type (for example, "string-check-grader"). + # + # @return [String, nil] + optional :type, String + + # @!method initialize(name:, passed:, score:, sample: nil, type: nil) + # A single grader result for an evaluation run output item. + # + # @param name [String] The name of the grader. + # + # @param passed [Boolean] Whether the grader considered the output a pass. + # + # @param score [Float] The numeric score produced by the grader. + # + # @param sample [Hash{Symbol=>Object}, nil] Optional sample or intermediate data produced by the grader. + # + # @param type [String] The grader type (for example, "string-check-grader"). + end + # @see OpenAI::Models::Evals::Runs::OutputItemListResponse#sample class Sample < OpenAI::Internal::Type::BaseModel # @!attribute error diff --git a/lib/openai/models/evals/runs/output_item_retrieve_response.rb b/lib/openai/models/evals/runs/output_item_retrieve_response.rb index e43f1fcf..c18cc0de 100644 --- a/lib/openai/models/evals/runs/output_item_retrieve_response.rb +++ b/lib/openai/models/evals/runs/output_item_retrieve_response.rb @@ -43,11 +43,11 @@ class OutputItemRetrieveResponse < OpenAI::Internal::Type::BaseModel required :object, const: :"eval.run.output_item" # @!attribute results - # A list of results from the evaluation run. + # A list of grader results for this output item. # - # @return [ArrayObject}>] + # @return [Array] required :results, - OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]] + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result] } # @!attribute run_id # The identifier of the evaluation run associated with this output item. @@ -80,7 +80,7 @@ class OutputItemRetrieveResponse < OpenAI::Internal::Type::BaseModel # # @param eval_id [String] The identifier of the evaluation group. # - # @param results [ArrayObject}>] A list of results from the evaluation run. + # @param results [Array] A list of grader results for this output item. # # @param run_id [String] The identifier of the evaluation run associated with this output item. # @@ -90,6 +90,51 @@ class OutputItemRetrieveResponse < OpenAI::Internal::Type::BaseModel # # @param object [Symbol, :"eval.run.output_item"] The type of the object. Always "eval.run.output_item". + class Result < OpenAI::Internal::Type::BaseModel + # @!attribute name + # The name of the grader. + # + # @return [String] + required :name, String + + # @!attribute passed + # Whether the grader considered the output a pass. + # + # @return [Boolean] + required :passed, OpenAI::Internal::Type::Boolean + + # @!attribute score + # The numeric score produced by the grader. + # + # @return [Float] + required :score, Float + + # @!attribute sample + # Optional sample or intermediate data produced by the grader. + # + # @return [Hash{Symbol=>Object}, nil] + optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown], nil?: true + + # @!attribute type + # The grader type (for example, "string-check-grader"). + # + # @return [String, nil] + optional :type, String + + # @!method initialize(name:, passed:, score:, sample: nil, type: nil) + # A single grader result for an evaluation run output item. + # + # @param name [String] The name of the grader. + # + # @param passed [Boolean] Whether the grader considered the output a pass. + # + # @param score [Float] The numeric score produced by the grader. + # + # @param sample [Hash{Symbol=>Object}, nil] Optional sample or intermediate data produced by the grader. + # + # @param type [String] The grader type (for example, "string-check-grader"). + end + # @see OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse#sample class Sample < OpenAI::Internal::Type::BaseModel # @!attribute error diff --git a/lib/openai/models/graders/score_model_grader.rb b/lib/openai/models/graders/score_model_grader.rb index 2998c298..eeb6d628 100644 --- a/lib/openai/models/graders/score_model_grader.rb +++ b/lib/openai/models/graders/score_model_grader.rb @@ -37,8 +37,8 @@ class ScoreModelGrader < OpenAI::Internal::Type::BaseModel # @!attribute sampling_params # The sampling parameters for the model. # - # @return [Object, nil] - optional :sampling_params, OpenAI::Internal::Type::Unknown + # @return [OpenAI::Models::Graders::ScoreModelGrader::SamplingParams, nil] + optional :sampling_params, -> { OpenAI::Graders::ScoreModelGrader::SamplingParams } # @!method initialize(input:, model:, name:, range: nil, sampling_params: nil, type: :score_model) # A ScoreModelGrader object that uses a model to assign a score to the input. @@ -51,7 +51,7 @@ class ScoreModelGrader < OpenAI::Internal::Type::BaseModel # # @param range [Array] The range of the score. Defaults to `[0, 1]`. # - # @param sampling_params [Object] The sampling parameters for the model. + # @param sampling_params [OpenAI::Models::Graders::ScoreModelGrader::SamplingParams] The sampling parameters for the model. # # @param type [Symbol, :score_model] The object type, which is always `score_model`. @@ -210,6 +210,59 @@ module Type # @return [Array] end end + + # @see OpenAI::Models::Graders::ScoreModelGrader#sampling_params + class SamplingParams < OpenAI::Internal::Type::BaseModel + # @!attribute max_completions_tokens + # The maximum number of tokens the grader model may generate in its response. + # + # @return [Integer, nil] + optional :max_completions_tokens, Integer, nil?: true + + # @!attribute reasoning_effort + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + # + # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true + + # @!attribute seed + # A seed value to initialize the randomness, during sampling. + # + # @return [Integer, nil] + optional :seed, Integer, nil?: true + + # @!attribute temperature + # A higher temperature increases randomness in the outputs. + # + # @return [Float, nil] + optional :temperature, Float, nil?: true + + # @!attribute top_p + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + # + # @return [Float, nil] + optional :top_p, Float, nil?: true + + # @!method initialize(max_completions_tokens: nil, reasoning_effort: nil, seed: nil, temperature: nil, top_p: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Graders::ScoreModelGrader::SamplingParams} for more details. + # + # The sampling parameters for the model. + # + # @param max_completions_tokens [Integer, nil] The maximum number of tokens the grader model may generate in its response. + # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for + # + # @param seed [Integer, nil] A seed value to initialize the randomness, during sampling. + # + # @param temperature [Float, nil] A higher temperature increases randomness in the outputs. + # + # @param top_p [Float, nil] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + end end end diff --git a/lib/openai/version.rb b/lib/openai/version.rb index 2a63a5d1..b4bf7527 100644 --- a/lib/openai/version.rb +++ b/lib/openai/version.rb @@ -1,5 +1,5 @@ # frozen_string_literal: true module OpenAI - VERSION = "0.23.3" + VERSION = "0.24.0" end diff --git a/rbi/openai/errors.rbi b/rbi/openai/errors.rbi index b3cfeb33..01fde4ef 100644 --- a/rbi/openai/errors.rbi +++ b/rbi/openai/errors.rbi @@ -33,6 +33,9 @@ module OpenAI sig { returns(T.nilable(Integer)) } attr_accessor :status + sig { returns(T.nilable(T::Hash[String, String])) } + attr_accessor :headers + sig { returns(T.nilable(T.anything)) } attr_accessor :body @@ -50,6 +53,7 @@ module OpenAI params( url: URI::Generic, status: T.nilable(Integer), + headers: T.nilable(T::Hash[String, String]), body: T.nilable(Object), request: NilClass, response: NilClass, @@ -59,6 +63,7 @@ module OpenAI def self.new( url:, status: nil, + headers: nil, body: nil, request: nil, response: nil, @@ -88,6 +93,7 @@ module OpenAI params( url: URI::Generic, status: NilClass, + headers: T.nilable(T::Hash[String, String]), body: NilClass, request: NilClass, response: NilClass, @@ -97,6 +103,7 @@ module OpenAI def self.new( url:, status: nil, + headers: nil, body: nil, request: nil, response: nil, @@ -111,6 +118,7 @@ module OpenAI params( url: URI::Generic, status: NilClass, + headers: T.nilable(T::Hash[String, String]), body: NilClass, request: NilClass, response: NilClass, @@ -120,6 +128,7 @@ module OpenAI def self.new( url:, status: nil, + headers: nil, body: nil, request: nil, response: nil, @@ -134,13 +143,22 @@ module OpenAI params( url: URI::Generic, status: Integer, + headers: T.nilable(T::Hash[String, String]), body: T.nilable(Object), request: NilClass, response: NilClass, message: T.nilable(String) ).returns(T.attached_class) end - def self.for(url:, status:, body:, request:, response:, message: nil) + def self.for( + url:, + status:, + headers:, + body:, + request:, + response:, + message: nil + ) end sig { returns(Integer) } @@ -160,13 +178,22 @@ module OpenAI params( url: URI::Generic, status: Integer, + headers: T.nilable(T::Hash[String, String]), body: T.nilable(Object), request: NilClass, response: NilClass, message: T.nilable(String) ).returns(T.attached_class) end - def self.new(url:, status:, body:, request:, response:, message: nil) + def self.new( + url:, + status:, + headers:, + body:, + request:, + response:, + message: nil + ) end end diff --git a/rbi/openai/internal/transport/base_client.rbi b/rbi/openai/internal/transport/base_client.rbi index 095d4476..a15bc545 100644 --- a/rbi/openai/internal/transport/base_client.rbi +++ b/rbi/openai/internal/transport/base_client.rbi @@ -92,10 +92,9 @@ module OpenAI # @api private sig do - params( - status: Integer, - headers: T.any(T::Hash[String, String], Net::HTTPHeader) - ).returns(T::Boolean) + params(status: Integer, headers: T::Hash[String, String]).returns( + T::Boolean + ) end def should_retry?(status, headers:) end @@ -105,7 +104,7 @@ module OpenAI params( request: OpenAI::Internal::Transport::BaseClient::RequestInput, status: Integer, - response_headers: T.any(T::Hash[String, String], Net::HTTPHeader) + response_headers: T::Hash[String, String] ).returns(OpenAI::Internal::Transport::BaseClient::RequestInput) end def follow_redirect(request, status:, response_headers:) diff --git a/rbi/openai/internal/type/base_page.rbi b/rbi/openai/internal/type/base_page.rbi index c097c095..19a344a0 100644 --- a/rbi/openai/internal/type/base_page.rbi +++ b/rbi/openai/internal/type/base_page.rbi @@ -30,7 +30,7 @@ module OpenAI params( client: OpenAI::Internal::Transport::BaseClient, req: OpenAI::Internal::Transport::BaseClient::RequestComponents, - headers: T.any(T::Hash[String, String], Net::HTTPHeader), + headers: T::Hash[String, String], page_data: T.anything ).void end diff --git a/rbi/openai/internal/type/base_stream.rbi b/rbi/openai/internal/type/base_stream.rbi index e1155943..16a4cdf0 100644 --- a/rbi/openai/internal/type/base_stream.rbi +++ b/rbi/openai/internal/type/base_stream.rbi @@ -27,6 +27,12 @@ module OpenAI end end + sig { returns(Integer) } + attr_reader :status + + sig { returns(T::Hash[String, String]) } + attr_reader :headers + sig { void } def close end @@ -51,6 +57,7 @@ module OpenAI T.any(T::Class[T.anything], OpenAI::Internal::Type::Converter), url: URI::Generic, status: Integer, + headers: T::Hash[String, String], response: Net::HTTPResponse, unwrap: T.any( @@ -62,7 +69,15 @@ module OpenAI stream: T::Enumerable[Message] ).void end - def initialize(model:, url:, status:, response:, unwrap:, stream:) + def initialize( + model:, + url:, + status:, + headers:, + response:, + unwrap:, + stream: + ) end # @api private diff --git a/rbi/openai/internal/util.rbi b/rbi/openai/internal/util.rbi index 69ba15c5..3ee6eb00 100644 --- a/rbi/openai/internal/util.rbi +++ b/rbi/openai/internal/util.rbi @@ -361,7 +361,7 @@ module OpenAI # Assumes each chunk in stream has `Encoding::BINARY`. sig do params( - headers: T.any(T::Hash[String, String], Net::HTTPHeader), + headers: T::Hash[String, String], stream: T::Enumerable[String], suppress_error: T::Boolean ).returns(T.anything) diff --git a/rbi/openai/models/conversations/computer_screenshot_content.rbi b/rbi/openai/models/conversations/computer_screenshot_content.rbi index 50a29357..1ca4c771 100644 --- a/rbi/openai/models/conversations/computer_screenshot_content.rbi +++ b/rbi/openai/models/conversations/computer_screenshot_content.rbi @@ -25,6 +25,7 @@ module OpenAI sig { returns(Symbol) } attr_accessor :type + # A screenshot of a computer. sig do params( file_id: T.nilable(String), diff --git a/rbi/openai/models/conversations/container_file_citation_body.rbi b/rbi/openai/models/conversations/container_file_citation_body.rbi deleted file mode 100644 index d828926e..00000000 --- a/rbi/openai/models/conversations/container_file_citation_body.rbi +++ /dev/null @@ -1,82 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Conversations - class ContainerFileCitationBody < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any( - OpenAI::Conversations::ContainerFileCitationBody, - OpenAI::Internal::AnyHash - ) - end - - # The ID of the container file. - sig { returns(String) } - attr_accessor :container_id - - # The index of the last character of the container file citation in the message. - sig { returns(Integer) } - attr_accessor :end_index - - # The ID of the file. - sig { returns(String) } - attr_accessor :file_id - - # The filename of the container file cited. - sig { returns(String) } - attr_accessor :filename - - # The index of the first character of the container file citation in the message. - sig { returns(Integer) } - attr_accessor :start_index - - # The type of the container file citation. Always `container_file_citation`. - sig { returns(Symbol) } - attr_accessor :type - - sig do - params( - container_id: String, - end_index: Integer, - file_id: String, - filename: String, - start_index: Integer, - type: Symbol - ).returns(T.attached_class) - end - def self.new( - # The ID of the container file. - container_id:, - # The index of the last character of the container file citation in the message. - end_index:, - # The ID of the file. - file_id:, - # The filename of the container file cited. - filename:, - # The index of the first character of the container file citation in the message. - start_index:, - # The type of the container file citation. Always `container_file_citation`. - type: :container_file_citation - ) - end - - sig do - override.returns( - { - container_id: String, - end_index: Integer, - file_id: String, - filename: String, - start_index: Integer, - type: Symbol - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/openai/models/conversations/file_citation_body.rbi b/rbi/openai/models/conversations/file_citation_body.rbi deleted file mode 100644 index ec14bf91..00000000 --- a/rbi/openai/models/conversations/file_citation_body.rbi +++ /dev/null @@ -1,61 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Conversations - class FileCitationBody < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any( - OpenAI::Conversations::FileCitationBody, - OpenAI::Internal::AnyHash - ) - end - - # The ID of the file. - sig { returns(String) } - attr_accessor :file_id - - # The filename of the file cited. - sig { returns(String) } - attr_accessor :filename - - # The index of the file in the list of files. - sig { returns(Integer) } - attr_accessor :index - - # The type of the file citation. Always `file_citation`. - sig { returns(Symbol) } - attr_accessor :type - - sig do - params( - file_id: String, - filename: String, - index: Integer, - type: Symbol - ).returns(T.attached_class) - end - def self.new( - # The ID of the file. - file_id:, - # The filename of the file cited. - filename:, - # The index of the file in the list of files. - index:, - # The type of the file citation. Always `file_citation`. - type: :file_citation - ) - end - - sig do - override.returns( - { file_id: String, filename: String, index: Integer, type: Symbol } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/openai/models/conversations/input_file_content.rbi b/rbi/openai/models/conversations/input_file_content.rbi index 5516a933..a354464e 100644 --- a/rbi/openai/models/conversations/input_file_content.rbi +++ b/rbi/openai/models/conversations/input_file_content.rbi @@ -3,70 +3,7 @@ module OpenAI module Models module Conversations - class InputFileContent < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any( - OpenAI::Conversations::InputFileContent, - OpenAI::Internal::AnyHash - ) - end - - # The ID of the file to be sent to the model. - sig { returns(T.nilable(String)) } - attr_accessor :file_id - - # The type of the input item. Always `input_file`. - sig { returns(Symbol) } - attr_accessor :type - - # The URL of the file to be sent to the model. - sig { returns(T.nilable(String)) } - attr_reader :file_url - - sig { params(file_url: String).void } - attr_writer :file_url - - # The name of the file to be sent to the model. - sig { returns(T.nilable(String)) } - attr_reader :filename - - sig { params(filename: String).void } - attr_writer :filename - - sig do - params( - file_id: T.nilable(String), - file_url: String, - filename: String, - type: Symbol - ).returns(T.attached_class) - end - def self.new( - # The ID of the file to be sent to the model. - file_id:, - # The URL of the file to be sent to the model. - file_url: nil, - # The name of the file to be sent to the model. - filename: nil, - # The type of the input item. Always `input_file`. - type: :input_file - ) - end - - sig do - override.returns( - { - file_id: T.nilable(String), - type: Symbol, - file_url: String, - filename: String - } - ) - end - def to_hash - end - end + InputFileContent = OpenAI::Models::Responses::ResponseInputFile end end end diff --git a/rbi/openai/models/conversations/input_image_content.rbi b/rbi/openai/models/conversations/input_image_content.rbi index b9b48a84..41cdf39b 100644 --- a/rbi/openai/models/conversations/input_image_content.rbi +++ b/rbi/openai/models/conversations/input_image_content.rbi @@ -3,111 +3,7 @@ module OpenAI module Models module Conversations - class InputImageContent < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any( - OpenAI::Conversations::InputImageContent, - OpenAI::Internal::AnyHash - ) - end - - # The detail level of the image to be sent to the model. One of `high`, `low`, or - # `auto`. Defaults to `auto`. - sig do - returns( - OpenAI::Conversations::InputImageContent::Detail::TaggedSymbol - ) - end - attr_accessor :detail - - # The ID of the file to be sent to the model. - sig { returns(T.nilable(String)) } - attr_accessor :file_id - - # The URL of the image to be sent to the model. A fully qualified URL or base64 - # encoded image in a data URL. - sig { returns(T.nilable(String)) } - attr_accessor :image_url - - # The type of the input item. Always `input_image`. - sig { returns(Symbol) } - attr_accessor :type - - sig do - params( - detail: OpenAI::Conversations::InputImageContent::Detail::OrSymbol, - file_id: T.nilable(String), - image_url: T.nilable(String), - type: Symbol - ).returns(T.attached_class) - end - def self.new( - # The detail level of the image to be sent to the model. One of `high`, `low`, or - # `auto`. Defaults to `auto`. - detail:, - # The ID of the file to be sent to the model. - file_id:, - # The URL of the image to be sent to the model. A fully qualified URL or base64 - # encoded image in a data URL. - image_url:, - # The type of the input item. Always `input_image`. - type: :input_image - ) - end - - sig do - override.returns( - { - detail: - OpenAI::Conversations::InputImageContent::Detail::TaggedSymbol, - file_id: T.nilable(String), - image_url: T.nilable(String), - type: Symbol - } - ) - end - def to_hash - end - - # The detail level of the image to be sent to the model. One of `high`, `low`, or - # `auto`. Defaults to `auto`. - module Detail - extend OpenAI::Internal::Type::Enum - - TaggedSymbol = - T.type_alias do - T.all(Symbol, OpenAI::Conversations::InputImageContent::Detail) - end - OrSymbol = T.type_alias { T.any(Symbol, String) } - - LOW = - T.let( - :low, - OpenAI::Conversations::InputImageContent::Detail::TaggedSymbol - ) - HIGH = - T.let( - :high, - OpenAI::Conversations::InputImageContent::Detail::TaggedSymbol - ) - AUTO = - T.let( - :auto, - OpenAI::Conversations::InputImageContent::Detail::TaggedSymbol - ) - - sig do - override.returns( - T::Array[ - OpenAI::Conversations::InputImageContent::Detail::TaggedSymbol - ] - ) - end - def self.values - end - end - end + InputImageContent = OpenAI::Models::Responses::ResponseInputImage end end end diff --git a/rbi/openai/models/conversations/input_text_content.rbi b/rbi/openai/models/conversations/input_text_content.rbi index 1f8e0760..e7df350c 100644 --- a/rbi/openai/models/conversations/input_text_content.rbi +++ b/rbi/openai/models/conversations/input_text_content.rbi @@ -3,36 +3,7 @@ module OpenAI module Models module Conversations - class InputTextContent < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any( - OpenAI::Conversations::InputTextContent, - OpenAI::Internal::AnyHash - ) - end - - # The text input to the model. - sig { returns(String) } - attr_accessor :text - - # The type of the input item. Always `input_text`. - sig { returns(Symbol) } - attr_accessor :type - - sig { params(text: String, type: Symbol).returns(T.attached_class) } - def self.new( - # The text input to the model. - text:, - # The type of the input item. Always `input_text`. - type: :input_text - ) - end - - sig { override.returns({ text: String, type: Symbol }) } - def to_hash - end - end + InputTextContent = OpenAI::Models::Responses::ResponseInputText end end end diff --git a/rbi/openai/models/conversations/lob_prob.rbi b/rbi/openai/models/conversations/lob_prob.rbi deleted file mode 100644 index 737d318b..00000000 --- a/rbi/openai/models/conversations/lob_prob.rbi +++ /dev/null @@ -1,50 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Conversations - class LobProb < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any(OpenAI::Conversations::LobProb, OpenAI::Internal::AnyHash) - end - - sig { returns(String) } - attr_accessor :token - - sig { returns(T::Array[Integer]) } - attr_accessor :bytes - - sig { returns(Float) } - attr_accessor :logprob - - sig { returns(T::Array[OpenAI::Conversations::TopLogProb]) } - attr_accessor :top_logprobs - - sig do - params( - token: String, - bytes: T::Array[Integer], - logprob: Float, - top_logprobs: T::Array[OpenAI::Conversations::TopLogProb::OrHash] - ).returns(T.attached_class) - end - def self.new(token:, bytes:, logprob:, top_logprobs:) - end - - sig do - override.returns( - { - token: String, - bytes: T::Array[Integer], - logprob: Float, - top_logprobs: T::Array[OpenAI::Conversations::TopLogProb] - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/openai/models/conversations/message.rbi b/rbi/openai/models/conversations/message.rbi index 6d6a4e07..2c08ba0a 100644 --- a/rbi/openai/models/conversations/message.rbi +++ b/rbi/openai/models/conversations/message.rbi @@ -33,20 +33,21 @@ module OpenAI sig { returns(Symbol) } attr_accessor :type + # A message to or from the model. sig do params( id: String, content: T::Array[ T.any( - OpenAI::Conversations::InputTextContent::OrHash, - OpenAI::Conversations::OutputTextContent::OrHash, + OpenAI::Responses::ResponseInputText::OrHash, + OpenAI::Responses::ResponseOutputText::OrHash, OpenAI::Conversations::TextContent::OrHash, OpenAI::Conversations::SummaryTextContent::OrHash, - OpenAI::Conversations::RefusalContent::OrHash, - OpenAI::Conversations::InputImageContent::OrHash, + OpenAI::Responses::ResponseOutputRefusal::OrHash, + OpenAI::Responses::ResponseInputImage::OrHash, OpenAI::Conversations::ComputerScreenshotContent::OrHash, - OpenAI::Conversations::InputFileContent::OrHash + OpenAI::Responses::ResponseInputFile::OrHash ) ], role: OpenAI::Conversations::Message::Role::OrSymbol, @@ -85,20 +86,21 @@ module OpenAI def to_hash end + # A text input to the model. module Content extend OpenAI::Internal::Type::Union Variants = T.type_alias do T.any( - OpenAI::Conversations::InputTextContent, - OpenAI::Conversations::OutputTextContent, + OpenAI::Responses::ResponseInputText, + OpenAI::Responses::ResponseOutputText, OpenAI::Conversations::TextContent, OpenAI::Conversations::SummaryTextContent, - OpenAI::Conversations::RefusalContent, - OpenAI::Conversations::InputImageContent, + OpenAI::Responses::ResponseOutputRefusal, + OpenAI::Responses::ResponseInputImage, OpenAI::Conversations::ComputerScreenshotContent, - OpenAI::Conversations::InputFileContent + OpenAI::Responses::ResponseInputFile ) end diff --git a/rbi/openai/models/conversations/output_text_content.rbi b/rbi/openai/models/conversations/output_text_content.rbi index f9c643c6..60fe960a 100644 --- a/rbi/openai/models/conversations/output_text_content.rbi +++ b/rbi/openai/models/conversations/output_text_content.rbi @@ -3,108 +3,7 @@ module OpenAI module Models module Conversations - class OutputTextContent < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any( - OpenAI::Conversations::OutputTextContent, - OpenAI::Internal::AnyHash - ) - end - - # The annotations of the text output. - sig do - returns( - T::Array[ - OpenAI::Conversations::OutputTextContent::Annotation::Variants - ] - ) - end - attr_accessor :annotations - - # The text output from the model. - sig { returns(String) } - attr_accessor :text - - # The type of the output text. Always `output_text`. - sig { returns(Symbol) } - attr_accessor :type - - sig { returns(T.nilable(T::Array[OpenAI::Conversations::LobProb])) } - attr_reader :logprobs - - sig do - params( - logprobs: T::Array[OpenAI::Conversations::LobProb::OrHash] - ).void - end - attr_writer :logprobs - - sig do - params( - annotations: - T::Array[ - T.any( - OpenAI::Conversations::FileCitationBody::OrHash, - OpenAI::Conversations::URLCitationBody::OrHash, - OpenAI::Conversations::ContainerFileCitationBody::OrHash - ) - ], - text: String, - logprobs: T::Array[OpenAI::Conversations::LobProb::OrHash], - type: Symbol - ).returns(T.attached_class) - end - def self.new( - # The annotations of the text output. - annotations:, - # The text output from the model. - text:, - logprobs: nil, - # The type of the output text. Always `output_text`. - type: :output_text - ) - end - - sig do - override.returns( - { - annotations: - T::Array[ - OpenAI::Conversations::OutputTextContent::Annotation::Variants - ], - text: String, - type: Symbol, - logprobs: T::Array[OpenAI::Conversations::LobProb] - } - ) - end - def to_hash - end - - module Annotation - extend OpenAI::Internal::Type::Union - - Variants = - T.type_alias do - T.any( - OpenAI::Conversations::FileCitationBody, - OpenAI::Conversations::URLCitationBody, - OpenAI::Conversations::ContainerFileCitationBody - ) - end - - sig do - override.returns( - T::Array[ - OpenAI::Conversations::OutputTextContent::Annotation::Variants - ] - ) - end - def self.variants - end - end - end + OutputTextContent = OpenAI::Models::Responses::ResponseOutputText end end end diff --git a/rbi/openai/models/conversations/refusal_content.rbi b/rbi/openai/models/conversations/refusal_content.rbi index 77516323..ec2bd093 100644 --- a/rbi/openai/models/conversations/refusal_content.rbi +++ b/rbi/openai/models/conversations/refusal_content.rbi @@ -3,36 +3,7 @@ module OpenAI module Models module Conversations - class RefusalContent < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any( - OpenAI::Conversations::RefusalContent, - OpenAI::Internal::AnyHash - ) - end - - # The refusal explanation from the model. - sig { returns(String) } - attr_accessor :refusal - - # The type of the refusal. Always `refusal`. - sig { returns(Symbol) } - attr_accessor :type - - sig { params(refusal: String, type: Symbol).returns(T.attached_class) } - def self.new( - # The refusal explanation from the model. - refusal:, - # The type of the refusal. Always `refusal`. - type: :refusal - ) - end - - sig { override.returns({ refusal: String, type: Symbol }) } - def to_hash - end - end + RefusalContent = OpenAI::Models::Responses::ResponseOutputRefusal end end end diff --git a/rbi/openai/models/conversations/summary_text_content.rbi b/rbi/openai/models/conversations/summary_text_content.rbi index 7292fa8d..4e98991d 100644 --- a/rbi/openai/models/conversations/summary_text_content.rbi +++ b/rbi/openai/models/conversations/summary_text_content.rbi @@ -18,6 +18,7 @@ module OpenAI sig { returns(Symbol) } attr_accessor :type + # A summary text from the model. sig { params(text: String, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :summary_text) end diff --git a/rbi/openai/models/conversations/text_content.rbi b/rbi/openai/models/conversations/text_content.rbi index abb7a442..219fca89 100644 --- a/rbi/openai/models/conversations/text_content.rbi +++ b/rbi/openai/models/conversations/text_content.rbi @@ -15,6 +15,7 @@ module OpenAI sig { returns(Symbol) } attr_accessor :type + # A text content. sig { params(text: String, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :text) end diff --git a/rbi/openai/models/conversations/top_log_prob.rbi b/rbi/openai/models/conversations/top_log_prob.rbi deleted file mode 100644 index c11b651a..00000000 --- a/rbi/openai/models/conversations/top_log_prob.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Conversations - class TopLogProb < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any(OpenAI::Conversations::TopLogProb, OpenAI::Internal::AnyHash) - end - - sig { returns(String) } - attr_accessor :token - - sig { returns(T::Array[Integer]) } - attr_accessor :bytes - - sig { returns(Float) } - attr_accessor :logprob - - sig do - params( - token: String, - bytes: T::Array[Integer], - logprob: Float - ).returns(T.attached_class) - end - def self.new(token:, bytes:, logprob:) - end - - sig do - override.returns( - { token: String, bytes: T::Array[Integer], logprob: Float } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/openai/models/conversations/url_citation_body.rbi b/rbi/openai/models/conversations/url_citation_body.rbi deleted file mode 100644 index 4c34ad3d..00000000 --- a/rbi/openai/models/conversations/url_citation_body.rbi +++ /dev/null @@ -1,74 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Conversations - class URLCitationBody < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any( - OpenAI::Conversations::URLCitationBody, - OpenAI::Internal::AnyHash - ) - end - - # The index of the last character of the URL citation in the message. - sig { returns(Integer) } - attr_accessor :end_index - - # The index of the first character of the URL citation in the message. - sig { returns(Integer) } - attr_accessor :start_index - - # The title of the web resource. - sig { returns(String) } - attr_accessor :title - - # The type of the URL citation. Always `url_citation`. - sig { returns(Symbol) } - attr_accessor :type - - # The URL of the web resource. - sig { returns(String) } - attr_accessor :url - - sig do - params( - end_index: Integer, - start_index: Integer, - title: String, - url: String, - type: Symbol - ).returns(T.attached_class) - end - def self.new( - # The index of the last character of the URL citation in the message. - end_index:, - # The index of the first character of the URL citation in the message. - start_index:, - # The title of the web resource. - title:, - # The URL of the web resource. - url:, - # The type of the URL citation. Always `url_citation`. - type: :url_citation - ) - end - - sig do - override.returns( - { - end_index: Integer, - start_index: Integer, - title: String, - type: Symbol, - url: String - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi b/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi index 62adad40..c04a1c2f 100644 --- a/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +++ b/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi @@ -887,6 +887,14 @@ module OpenAI sig { params(max_completion_tokens: Integer).void } attr_writer :max_completion_tokens + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } + attr_accessor :reasoning_effort + # An object specifying the format that the model must output. # # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -963,6 +971,7 @@ module OpenAI sig do params( max_completion_tokens: Integer, + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), response_format: T.any( OpenAI::ResponseFormatText::OrHash, @@ -978,6 +987,12 @@ module OpenAI def self.new( # The maximum number of tokens in the generated output. max_completion_tokens: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, # An object specifying the format that the model must output. # # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1006,6 +1021,7 @@ module OpenAI override.returns( { max_completion_tokens: Integer, + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), response_format: T.any( OpenAI::ResponseFormatText, diff --git a/rbi/openai/models/evals/run_cancel_response.rbi b/rbi/openai/models/evals/run_cancel_response.rbi index cb502fe8..6502568e 100644 --- a/rbi/openai/models/evals/run_cancel_response.rbi +++ b/rbi/openai/models/evals/run_cancel_response.rbi @@ -1115,6 +1115,14 @@ module OpenAI sig { params(max_completion_tokens: Integer).void } attr_writer :max_completion_tokens + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) } + attr_accessor :reasoning_effort + # A seed value to initialize the randomness, during sampling. sig { returns(T.nilable(Integer)) } attr_reader :seed @@ -1201,6 +1209,8 @@ module OpenAI sig do params( max_completion_tokens: Integer, + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::OrSymbol), seed: Integer, temperature: Float, text: @@ -1226,6 +1236,12 @@ module OpenAI def self.new( # The maximum number of tokens in the generated output. max_completion_tokens: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, # A seed value to initialize the randomness, during sampling. seed: nil, # A higher temperature increases randomness in the outputs. @@ -1260,6 +1276,8 @@ module OpenAI override.returns( { max_completion_tokens: Integer, + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::TaggedSymbol), seed: Integer, temperature: Float, text: diff --git a/rbi/openai/models/evals/run_create_params.rbi b/rbi/openai/models/evals/run_create_params.rbi index 52dead66..f772910e 100644 --- a/rbi/openai/models/evals/run_create_params.rbi +++ b/rbi/openai/models/evals/run_create_params.rbi @@ -1073,6 +1073,14 @@ module OpenAI sig { params(max_completion_tokens: Integer).void } attr_writer :max_completion_tokens + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } + attr_accessor :reasoning_effort + # A seed value to initialize the randomness, during sampling. sig { returns(T.nilable(Integer)) } attr_reader :seed @@ -1176,6 +1184,8 @@ module OpenAI sig do params( max_completion_tokens: Integer, + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::OrSymbol), seed: Integer, temperature: Float, text: @@ -1201,6 +1211,12 @@ module OpenAI def self.new( # The maximum number of tokens in the generated output. max_completion_tokens: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, # A seed value to initialize the randomness, during sampling. seed: nil, # A higher temperature increases randomness in the outputs. @@ -1235,6 +1251,8 @@ module OpenAI override.returns( { max_completion_tokens: Integer, + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::OrSymbol), seed: Integer, temperature: Float, text: diff --git a/rbi/openai/models/evals/run_create_response.rbi b/rbi/openai/models/evals/run_create_response.rbi index 1a8def7e..f127a170 100644 --- a/rbi/openai/models/evals/run_create_response.rbi +++ b/rbi/openai/models/evals/run_create_response.rbi @@ -1115,6 +1115,14 @@ module OpenAI sig { params(max_completion_tokens: Integer).void } attr_writer :max_completion_tokens + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) } + attr_accessor :reasoning_effort + # A seed value to initialize the randomness, during sampling. sig { returns(T.nilable(Integer)) } attr_reader :seed @@ -1201,6 +1209,8 @@ module OpenAI sig do params( max_completion_tokens: Integer, + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::OrSymbol), seed: Integer, temperature: Float, text: @@ -1226,6 +1236,12 @@ module OpenAI def self.new( # The maximum number of tokens in the generated output. max_completion_tokens: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, # A seed value to initialize the randomness, during sampling. seed: nil, # A higher temperature increases randomness in the outputs. @@ -1260,6 +1276,8 @@ module OpenAI override.returns( { max_completion_tokens: Integer, + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::TaggedSymbol), seed: Integer, temperature: Float, text: diff --git a/rbi/openai/models/evals/run_list_response.rbi b/rbi/openai/models/evals/run_list_response.rbi index 54400783..2fba6c40 100644 --- a/rbi/openai/models/evals/run_list_response.rbi +++ b/rbi/openai/models/evals/run_list_response.rbi @@ -1111,6 +1111,14 @@ module OpenAI sig { params(max_completion_tokens: Integer).void } attr_writer :max_completion_tokens + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) } + attr_accessor :reasoning_effort + # A seed value to initialize the randomness, during sampling. sig { returns(T.nilable(Integer)) } attr_reader :seed @@ -1197,6 +1205,8 @@ module OpenAI sig do params( max_completion_tokens: Integer, + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::OrSymbol), seed: Integer, temperature: Float, text: @@ -1222,6 +1232,12 @@ module OpenAI def self.new( # The maximum number of tokens in the generated output. max_completion_tokens: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, # A seed value to initialize the randomness, during sampling. seed: nil, # A higher temperature increases randomness in the outputs. @@ -1256,6 +1272,8 @@ module OpenAI override.returns( { max_completion_tokens: Integer, + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::TaggedSymbol), seed: Integer, temperature: Float, text: diff --git a/rbi/openai/models/evals/run_retrieve_response.rbi b/rbi/openai/models/evals/run_retrieve_response.rbi index c270c1d0..bf9b002e 100644 --- a/rbi/openai/models/evals/run_retrieve_response.rbi +++ b/rbi/openai/models/evals/run_retrieve_response.rbi @@ -1117,6 +1117,14 @@ module OpenAI sig { params(max_completion_tokens: Integer).void } attr_writer :max_completion_tokens + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) } + attr_accessor :reasoning_effort + # A seed value to initialize the randomness, during sampling. sig { returns(T.nilable(Integer)) } attr_reader :seed @@ -1203,6 +1211,8 @@ module OpenAI sig do params( max_completion_tokens: Integer, + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::OrSymbol), seed: Integer, temperature: Float, text: @@ -1228,6 +1238,12 @@ module OpenAI def self.new( # The maximum number of tokens in the generated output. max_completion_tokens: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, # A seed value to initialize the randomness, during sampling. seed: nil, # A higher temperature increases randomness in the outputs. @@ -1262,6 +1278,8 @@ module OpenAI override.returns( { max_completion_tokens: Integer, + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::TaggedSymbol), seed: Integer, temperature: Float, text: diff --git a/rbi/openai/models/evals/runs/output_item_list_response.rbi b/rbi/openai/models/evals/runs/output_item_list_response.rbi index 9a34bb28..4d7b6338 100644 --- a/rbi/openai/models/evals/runs/output_item_list_response.rbi +++ b/rbi/openai/models/evals/runs/output_item_list_response.rbi @@ -37,8 +37,14 @@ module OpenAI sig { returns(Symbol) } attr_accessor :object - # A list of results from the evaluation run. - sig { returns(T::Array[T::Hash[Symbol, T.anything]]) } + # A list of grader results for this output item. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemListResponse::Result + ] + ) + end attr_accessor :results # The identifier of the evaluation run associated with this output item. @@ -71,7 +77,10 @@ module OpenAI datasource_item: T::Hash[Symbol, T.anything], datasource_item_id: Integer, eval_id: String, - results: T::Array[T::Hash[Symbol, T.anything]], + results: + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemListResponse::Result::OrHash + ], run_id: String, sample: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::OrHash, @@ -90,7 +99,7 @@ module OpenAI datasource_item_id:, # The identifier of the evaluation group. eval_id:, - # A list of results from the evaluation run. + # A list of grader results for this output item. results:, # The identifier of the evaluation run associated with this output item. run_id:, @@ -112,7 +121,10 @@ module OpenAI datasource_item_id: Integer, eval_id: String, object: Symbol, - results: T::Array[T::Hash[Symbol, T.anything]], + results: + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemListResponse::Result + ], run_id: String, sample: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample, @@ -123,6 +135,77 @@ module OpenAI def to_hash end + class Result < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::Runs::OutputItemListResponse::Result, + OpenAI::Internal::AnyHash + ) + end + + # The name of the grader. + sig { returns(String) } + attr_accessor :name + + # Whether the grader considered the output a pass. + sig { returns(T::Boolean) } + attr_accessor :passed + + # The numeric score produced by the grader. + sig { returns(Float) } + attr_accessor :score + + # Optional sample or intermediate data produced by the grader. + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_accessor :sample + + # The grader type (for example, "string-check-grader"). + sig { returns(T.nilable(String)) } + attr_reader :type + + sig { params(type: String).void } + attr_writer :type + + # A single grader result for an evaluation run output item. + sig do + params( + name: String, + passed: T::Boolean, + score: Float, + sample: T.nilable(T::Hash[Symbol, T.anything]), + type: String + ).returns(T.attached_class) + end + def self.new( + # The name of the grader. + name:, + # Whether the grader considered the output a pass. + passed:, + # The numeric score produced by the grader. + score:, + # Optional sample or intermediate data produced by the grader. + sample: nil, + # The grader type (for example, "string-check-grader"). + type: nil + ) + end + + sig do + override.returns( + { + name: String, + passed: T::Boolean, + score: Float, + sample: T.nilable(T::Hash[Symbol, T.anything]), + type: String + } + ) + end + def to_hash + end + end + class Sample < OpenAI::Internal::Type::BaseModel OrHash = T.type_alias do diff --git a/rbi/openai/models/evals/runs/output_item_retrieve_response.rbi b/rbi/openai/models/evals/runs/output_item_retrieve_response.rbi index 8a39ad5e..f930fe06 100644 --- a/rbi/openai/models/evals/runs/output_item_retrieve_response.rbi +++ b/rbi/openai/models/evals/runs/output_item_retrieve_response.rbi @@ -37,8 +37,14 @@ module OpenAI sig { returns(Symbol) } attr_accessor :object - # A list of results from the evaluation run. - sig { returns(T::Array[T::Hash[Symbol, T.anything]]) } + # A list of grader results for this output item. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result + ] + ) + end attr_accessor :results # The identifier of the evaluation run associated with this output item. @@ -73,7 +79,10 @@ module OpenAI datasource_item: T::Hash[Symbol, T.anything], datasource_item_id: Integer, eval_id: String, - results: T::Array[T::Hash[Symbol, T.anything]], + results: + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result::OrHash + ], run_id: String, sample: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::OrHash, @@ -92,7 +101,7 @@ module OpenAI datasource_item_id:, # The identifier of the evaluation group. eval_id:, - # A list of results from the evaluation run. + # A list of grader results for this output item. results:, # The identifier of the evaluation run associated with this output item. run_id:, @@ -114,7 +123,10 @@ module OpenAI datasource_item_id: Integer, eval_id: String, object: Symbol, - results: T::Array[T::Hash[Symbol, T.anything]], + results: + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result + ], run_id: String, sample: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample, @@ -125,6 +137,77 @@ module OpenAI def to_hash end + class Result < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result, + OpenAI::Internal::AnyHash + ) + end + + # The name of the grader. + sig { returns(String) } + attr_accessor :name + + # Whether the grader considered the output a pass. + sig { returns(T::Boolean) } + attr_accessor :passed + + # The numeric score produced by the grader. + sig { returns(Float) } + attr_accessor :score + + # Optional sample or intermediate data produced by the grader. + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_accessor :sample + + # The grader type (for example, "string-check-grader"). + sig { returns(T.nilable(String)) } + attr_reader :type + + sig { params(type: String).void } + attr_writer :type + + # A single grader result for an evaluation run output item. + sig do + params( + name: String, + passed: T::Boolean, + score: Float, + sample: T.nilable(T::Hash[Symbol, T.anything]), + type: String + ).returns(T.attached_class) + end + def self.new( + # The name of the grader. + name:, + # Whether the grader considered the output a pass. + passed:, + # The numeric score produced by the grader. + score:, + # Optional sample or intermediate data produced by the grader. + sample: nil, + # The grader type (for example, "string-check-grader"). + type: nil + ) + end + + sig do + override.returns( + { + name: String, + passed: T::Boolean, + score: Float, + sample: T.nilable(T::Hash[Symbol, T.anything]), + type: String + } + ) + end + def to_hash + end + end + class Sample < OpenAI::Internal::Type::BaseModel OrHash = T.type_alias do diff --git a/rbi/openai/models/graders/score_model_grader.rbi b/rbi/openai/models/graders/score_model_grader.rbi index 9f57b6bf..8491c99e 100644 --- a/rbi/openai/models/graders/score_model_grader.rbi +++ b/rbi/openai/models/graders/score_model_grader.rbi @@ -35,10 +35,17 @@ module OpenAI attr_writer :range # The sampling parameters for the model. - sig { returns(T.nilable(T.anything)) } + sig do + returns(T.nilable(OpenAI::Graders::ScoreModelGrader::SamplingParams)) + end attr_reader :sampling_params - sig { params(sampling_params: T.anything).void } + sig do + params( + sampling_params: + OpenAI::Graders::ScoreModelGrader::SamplingParams::OrHash + ).void + end attr_writer :sampling_params # A ScoreModelGrader object that uses a model to assign a score to the input. @@ -48,7 +55,8 @@ module OpenAI model: String, name: String, range: T::Array[Float], - sampling_params: T.anything, + sampling_params: + OpenAI::Graders::ScoreModelGrader::SamplingParams::OrHash, type: Symbol ).returns(T.attached_class) end @@ -76,7 +84,7 @@ module OpenAI name: String, type: Symbol, range: T::Array[Float], - sampling_params: T.anything + sampling_params: OpenAI::Graders::ScoreModelGrader::SamplingParams } ) end @@ -372,6 +380,82 @@ module OpenAI end end end + + class SamplingParams < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Graders::ScoreModelGrader::SamplingParams, + OpenAI::Internal::AnyHash + ) + end + + # The maximum number of tokens the grader model may generate in its response. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_completions_tokens + + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } + attr_accessor :reasoning_effort + + # A seed value to initialize the randomness, during sampling. + sig { returns(T.nilable(Integer)) } + attr_accessor :seed + + # A higher temperature increases randomness in the outputs. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + # The sampling parameters for the model. + sig do + params( + max_completions_tokens: T.nilable(Integer), + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + seed: T.nilable(Integer), + temperature: T.nilable(Float), + top_p: T.nilable(Float) + ).returns(T.attached_class) + end + def self.new( + # The maximum number of tokens the grader model may generate in its response. + max_completions_tokens: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, + # A seed value to initialize the randomness, during sampling. + seed: nil, + # A higher temperature increases randomness in the outputs. + temperature: nil, + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + top_p: nil + ) + end + + sig do + override.returns( + { + max_completions_tokens: T.nilable(Integer), + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + seed: T.nilable(Integer), + temperature: T.nilable(Float), + top_p: T.nilable(Float) + } + ) + end + def to_hash + end + end end end end diff --git a/sig/openai/errors.rbs b/sig/openai/errors.rbs index 6bbd9f41..ff3cc5cb 100644 --- a/sig/openai/errors.rbs +++ b/sig/openai/errors.rbs @@ -21,6 +21,8 @@ module OpenAI attr_accessor status: Integer? + attr_accessor headers: ::Hash[String, String]? + attr_accessor body: top? attr_accessor code: String? @@ -32,6 +34,7 @@ module OpenAI def initialize: ( url: URI::Generic, ?status: Integer?, + ?headers: ::Hash[String, String]?, ?body: Object?, ?request: nil, ?response: nil, @@ -43,6 +46,7 @@ module OpenAI def initialize: ( url: URI::Generic, ?status: nil, + ?headers: ::Hash[String, String]?, ?body: nil, ?request: nil, ?response: nil, @@ -54,6 +58,7 @@ module OpenAI def initialize: ( url: URI::Generic, ?status: nil, + ?headers: ::Hash[String, String]?, ?body: nil, ?request: nil, ?response: nil, @@ -65,6 +70,7 @@ module OpenAI def self.for: ( url: URI::Generic, status: Integer, + headers: ::Hash[String, String]?, body: Object?, request: nil, response: nil, @@ -74,6 +80,7 @@ module OpenAI def initialize: ( url: URI::Generic, status: Integer, + headers: ::Hash[String, String]?, body: Object?, request: nil, response: nil, diff --git a/sig/openai/internal/type/base_stream.rbs b/sig/openai/internal/type/base_stream.rbs index 75f49297..e17cf86d 100644 --- a/sig/openai/internal/type/base_stream.rbs +++ b/sig/openai/internal/type/base_stream.rbs @@ -8,6 +8,10 @@ module OpenAI Enumerable[top] stream ) -> (^(Integer arg0) -> void) + attr_reader status: Integer + + attr_reader headers: ::Hash[String, String] + def close: -> void private def iterator: -> Enumerable[Elem] @@ -22,6 +26,7 @@ module OpenAI model: Class | OpenAI::Internal::Type::Converter, url: URI::Generic, status: Integer, + headers: ::Hash[String, String], response: top, unwrap: Symbol | Integer diff --git a/sig/openai/models/conversations/container_file_citation_body.rbs b/sig/openai/models/conversations/container_file_citation_body.rbs deleted file mode 100644 index c8d856d0..00000000 --- a/sig/openai/models/conversations/container_file_citation_body.rbs +++ /dev/null @@ -1,47 +0,0 @@ -module OpenAI - module Models - module Conversations - type container_file_citation_body = - { - container_id: String, - end_index: Integer, - file_id: String, - filename: String, - start_index: Integer, - type: :container_file_citation - } - - class ContainerFileCitationBody < OpenAI::Internal::Type::BaseModel - attr_accessor container_id: String - - attr_accessor end_index: Integer - - attr_accessor file_id: String - - attr_accessor filename: String - - attr_accessor start_index: Integer - - attr_accessor type: :container_file_citation - - def initialize: ( - container_id: String, - end_index: Integer, - file_id: String, - filename: String, - start_index: Integer, - ?type: :container_file_citation - ) -> void - - def to_hash: -> { - container_id: String, - end_index: Integer, - file_id: String, - filename: String, - start_index: Integer, - type: :container_file_citation - } - end - end - end -end diff --git a/sig/openai/models/conversations/file_citation_body.rbs b/sig/openai/models/conversations/file_citation_body.rbs deleted file mode 100644 index cfdd97ac..00000000 --- a/sig/openai/models/conversations/file_citation_body.rbs +++ /dev/null @@ -1,37 +0,0 @@ -module OpenAI - module Models - module Conversations - type file_citation_body = - { - file_id: String, - filename: String, - index: Integer, - type: :file_citation - } - - class FileCitationBody < OpenAI::Internal::Type::BaseModel - attr_accessor file_id: String - - attr_accessor filename: String - - attr_accessor index: Integer - - attr_accessor type: :file_citation - - def initialize: ( - file_id: String, - filename: String, - index: Integer, - ?type: :file_citation - ) -> void - - def to_hash: -> { - file_id: String, - filename: String, - index: Integer, - type: :file_citation - } - end - end - end -end diff --git a/sig/openai/models/conversations/input_file_content.rbs b/sig/openai/models/conversations/input_file_content.rbs index 69739208..d172ca47 100644 --- a/sig/openai/models/conversations/input_file_content.rbs +++ b/sig/openai/models/conversations/input_file_content.rbs @@ -1,41 +1,7 @@ module OpenAI module Models module Conversations - type input_file_content = - { - file_id: String?, - type: :input_file, - file_url: String, - filename: String - } - - class InputFileContent < OpenAI::Internal::Type::BaseModel - attr_accessor file_id: String? - - attr_accessor type: :input_file - - attr_reader file_url: String? - - def file_url=: (String) -> String - - attr_reader filename: String? - - def filename=: (String) -> String - - def initialize: ( - file_id: String?, - ?file_url: String, - ?filename: String, - ?type: :input_file - ) -> void - - def to_hash: -> { - file_id: String?, - type: :input_file, - file_url: String, - filename: String - } - end + class InputFileContent = OpenAI::Models::Responses::ResponseInputFile end end end diff --git a/sig/openai/models/conversations/input_image_content.rbs b/sig/openai/models/conversations/input_image_content.rbs index bfd75dcf..4b01a8a9 100644 --- a/sig/openai/models/conversations/input_image_content.rbs +++ b/sig/openai/models/conversations/input_image_content.rbs @@ -1,49 +1,7 @@ module OpenAI module Models module Conversations - type input_image_content = - { - detail: OpenAI::Models::Conversations::InputImageContent::detail, - file_id: String?, - image_url: String?, - type: :input_image - } - - class InputImageContent < OpenAI::Internal::Type::BaseModel - attr_accessor detail: OpenAI::Models::Conversations::InputImageContent::detail - - attr_accessor file_id: String? - - attr_accessor image_url: String? - - attr_accessor type: :input_image - - def initialize: ( - detail: OpenAI::Models::Conversations::InputImageContent::detail, - file_id: String?, - image_url: String?, - ?type: :input_image - ) -> void - - def to_hash: -> { - detail: OpenAI::Models::Conversations::InputImageContent::detail, - file_id: String?, - image_url: String?, - type: :input_image - } - - type detail = :low | :high | :auto - - module Detail - extend OpenAI::Internal::Type::Enum - - LOW: :low - HIGH: :high - AUTO: :auto - - def self?.values: -> ::Array[OpenAI::Models::Conversations::InputImageContent::detail] - end - end + class InputImageContent = OpenAI::Models::Responses::ResponseInputImage end end end diff --git a/sig/openai/models/conversations/input_text_content.rbs b/sig/openai/models/conversations/input_text_content.rbs index 59155bd2..f7ea7318 100644 --- a/sig/openai/models/conversations/input_text_content.rbs +++ b/sig/openai/models/conversations/input_text_content.rbs @@ -1,17 +1,7 @@ module OpenAI module Models module Conversations - type input_text_content = { text: String, type: :input_text } - - class InputTextContent < OpenAI::Internal::Type::BaseModel - attr_accessor text: String - - attr_accessor type: :input_text - - def initialize: (text: String, ?type: :input_text) -> void - - def to_hash: -> { text: String, type: :input_text } - end + class InputTextContent = OpenAI::Models::Responses::ResponseInputText end end end diff --git a/sig/openai/models/conversations/lob_prob.rbs b/sig/openai/models/conversations/lob_prob.rbs deleted file mode 100644 index 7d64c4d1..00000000 --- a/sig/openai/models/conversations/lob_prob.rbs +++ /dev/null @@ -1,37 +0,0 @@ -module OpenAI - module Models - module Conversations - type lob_prob = - { - token: String, - bytes: ::Array[Integer], - logprob: Float, - top_logprobs: ::Array[OpenAI::Conversations::TopLogProb] - } - - class LobProb < OpenAI::Internal::Type::BaseModel - attr_accessor token: String - - attr_accessor bytes: ::Array[Integer] - - attr_accessor logprob: Float - - attr_accessor top_logprobs: ::Array[OpenAI::Conversations::TopLogProb] - - def initialize: ( - token: String, - bytes: ::Array[Integer], - logprob: Float, - top_logprobs: ::Array[OpenAI::Conversations::TopLogProb] - ) -> void - - def to_hash: -> { - token: String, - bytes: ::Array[Integer], - logprob: Float, - top_logprobs: ::Array[OpenAI::Conversations::TopLogProb] - } - end - end - end -end diff --git a/sig/openai/models/conversations/message.rbs b/sig/openai/models/conversations/message.rbs index 370dc4c3..c441dae2 100644 --- a/sig/openai/models/conversations/message.rbs +++ b/sig/openai/models/conversations/message.rbs @@ -38,14 +38,14 @@ module OpenAI } type content = - OpenAI::Conversations::InputTextContent - | OpenAI::Conversations::OutputTextContent + OpenAI::Responses::ResponseInputText + | OpenAI::Responses::ResponseOutputText | OpenAI::Conversations::TextContent | OpenAI::Conversations::SummaryTextContent - | OpenAI::Conversations::RefusalContent - | OpenAI::Conversations::InputImageContent + | OpenAI::Responses::ResponseOutputRefusal + | OpenAI::Responses::ResponseInputImage | OpenAI::Conversations::ComputerScreenshotContent - | OpenAI::Conversations::InputFileContent + | OpenAI::Responses::ResponseInputFile module Content extend OpenAI::Internal::Type::Union diff --git a/sig/openai/models/conversations/output_text_content.rbs b/sig/openai/models/conversations/output_text_content.rbs index fbb50d7b..9a2a834e 100644 --- a/sig/openai/models/conversations/output_text_content.rbs +++ b/sig/openai/models/conversations/output_text_content.rbs @@ -1,52 +1,7 @@ module OpenAI module Models module Conversations - type output_text_content = - { - annotations: ::Array[OpenAI::Models::Conversations::OutputTextContent::annotation], - text: String, - type: :output_text, - logprobs: ::Array[OpenAI::Conversations::LobProb] - } - - class OutputTextContent < OpenAI::Internal::Type::BaseModel - attr_accessor annotations: ::Array[OpenAI::Models::Conversations::OutputTextContent::annotation] - - attr_accessor text: String - - attr_accessor type: :output_text - - attr_reader logprobs: ::Array[OpenAI::Conversations::LobProb]? - - def logprobs=: ( - ::Array[OpenAI::Conversations::LobProb] - ) -> ::Array[OpenAI::Conversations::LobProb] - - def initialize: ( - annotations: ::Array[OpenAI::Models::Conversations::OutputTextContent::annotation], - text: String, - ?logprobs: ::Array[OpenAI::Conversations::LobProb], - ?type: :output_text - ) -> void - - def to_hash: -> { - annotations: ::Array[OpenAI::Models::Conversations::OutputTextContent::annotation], - text: String, - type: :output_text, - logprobs: ::Array[OpenAI::Conversations::LobProb] - } - - type annotation = - OpenAI::Conversations::FileCitationBody - | OpenAI::Conversations::URLCitationBody - | OpenAI::Conversations::ContainerFileCitationBody - - module Annotation - extend OpenAI::Internal::Type::Union - - def self?.variants: -> ::Array[OpenAI::Models::Conversations::OutputTextContent::annotation] - end - end + class OutputTextContent = OpenAI::Models::Responses::ResponseOutputText end end end diff --git a/sig/openai/models/conversations/refusal_content.rbs b/sig/openai/models/conversations/refusal_content.rbs index 904922e1..c8affcdd 100644 --- a/sig/openai/models/conversations/refusal_content.rbs +++ b/sig/openai/models/conversations/refusal_content.rbs @@ -1,17 +1,7 @@ module OpenAI module Models module Conversations - type refusal_content = { refusal: String, type: :refusal } - - class RefusalContent < OpenAI::Internal::Type::BaseModel - attr_accessor refusal: String - - attr_accessor type: :refusal - - def initialize: (refusal: String, ?type: :refusal) -> void - - def to_hash: -> { refusal: String, type: :refusal } - end + class RefusalContent = OpenAI::Models::Responses::ResponseOutputRefusal end end end diff --git a/sig/openai/models/conversations/top_log_prob.rbs b/sig/openai/models/conversations/top_log_prob.rbs deleted file mode 100644 index 43425978..00000000 --- a/sig/openai/models/conversations/top_log_prob.rbs +++ /dev/null @@ -1,28 +0,0 @@ -module OpenAI - module Models - module Conversations - type top_log_prob = - { token: String, bytes: ::Array[Integer], logprob: Float } - - class TopLogProb < OpenAI::Internal::Type::BaseModel - attr_accessor token: String - - attr_accessor bytes: ::Array[Integer] - - attr_accessor logprob: Float - - def initialize: ( - token: String, - bytes: ::Array[Integer], - logprob: Float - ) -> void - - def to_hash: -> { - token: String, - bytes: ::Array[Integer], - logprob: Float - } - end - end - end -end diff --git a/sig/openai/models/conversations/url_citation_body.rbs b/sig/openai/models/conversations/url_citation_body.rbs deleted file mode 100644 index 70a4b20e..00000000 --- a/sig/openai/models/conversations/url_citation_body.rbs +++ /dev/null @@ -1,42 +0,0 @@ -module OpenAI - module Models - module Conversations - type url_citation_body = - { - end_index: Integer, - start_index: Integer, - title: String, - type: :url_citation, - url: String - } - - class URLCitationBody < OpenAI::Internal::Type::BaseModel - attr_accessor end_index: Integer - - attr_accessor start_index: Integer - - attr_accessor title: String - - attr_accessor type: :url_citation - - attr_accessor url: String - - def initialize: ( - end_index: Integer, - start_index: Integer, - title: String, - url: String, - ?type: :url_citation - ) -> void - - def to_hash: -> { - end_index: Integer, - start_index: Integer, - title: String, - type: :url_citation, - url: String - } - end - end - end -end diff --git a/sig/openai/models/evals/create_eval_completions_run_data_source.rbs b/sig/openai/models/evals/create_eval_completions_run_data_source.rbs index 06402a15..555a9fb9 100644 --- a/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +++ b/sig/openai/models/evals/create_eval_completions_run_data_source.rbs @@ -333,6 +333,7 @@ module OpenAI type sampling_params = { max_completion_tokens: Integer, + reasoning_effort: OpenAI::Models::reasoning_effort?, response_format: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format, seed: Integer, temperature: Float, @@ -345,6 +346,8 @@ module OpenAI def max_completion_tokens=: (Integer) -> Integer + attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort? + attr_reader response_format: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format? def response_format=: ( @@ -371,6 +374,7 @@ module OpenAI def initialize: ( ?max_completion_tokens: Integer, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, ?response_format: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format, ?seed: Integer, ?temperature: Float, @@ -380,6 +384,7 @@ module OpenAI def to_hash: -> { max_completion_tokens: Integer, + reasoning_effort: OpenAI::Models::reasoning_effort?, response_format: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format, seed: Integer, temperature: Float, diff --git a/sig/openai/models/evals/run_cancel_response.rbs b/sig/openai/models/evals/run_cancel_response.rbs index 6225703e..ca726d99 100644 --- a/sig/openai/models/evals/run_cancel_response.rbs +++ b/sig/openai/models/evals/run_cancel_response.rbs @@ -459,6 +459,7 @@ module OpenAI type sampling_params = { max_completion_tokens: Integer, + reasoning_effort: OpenAI::Models::reasoning_effort?, seed: Integer, temperature: Float, text: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text, @@ -471,6 +472,8 @@ module OpenAI def max_completion_tokens=: (Integer) -> Integer + attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort? + attr_reader seed: Integer? def seed=: (Integer) -> Integer @@ -497,6 +500,7 @@ module OpenAI def initialize: ( ?max_completion_tokens: Integer, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, ?seed: Integer, ?temperature: Float, ?text: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text, @@ -506,6 +510,7 @@ module OpenAI def to_hash: -> { max_completion_tokens: Integer, + reasoning_effort: OpenAI::Models::reasoning_effort?, seed: Integer, temperature: Float, text: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text, diff --git a/sig/openai/models/evals/run_create_params.rbs b/sig/openai/models/evals/run_create_params.rbs index c5f744f9..73dbe141 100644 --- a/sig/openai/models/evals/run_create_params.rbs +++ b/sig/openai/models/evals/run_create_params.rbs @@ -422,6 +422,7 @@ module OpenAI type sampling_params = { max_completion_tokens: Integer, + reasoning_effort: OpenAI::Models::reasoning_effort?, seed: Integer, temperature: Float, text: OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text, @@ -434,6 +435,8 @@ module OpenAI def max_completion_tokens=: (Integer) -> Integer + attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort? + attr_reader seed: Integer? def seed=: (Integer) -> Integer @@ -460,6 +463,7 @@ module OpenAI def initialize: ( ?max_completion_tokens: Integer, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, ?seed: Integer, ?temperature: Float, ?text: OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text, @@ -469,6 +473,7 @@ module OpenAI def to_hash: -> { max_completion_tokens: Integer, + reasoning_effort: OpenAI::Models::reasoning_effort?, seed: Integer, temperature: Float, text: OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text, diff --git a/sig/openai/models/evals/run_create_response.rbs b/sig/openai/models/evals/run_create_response.rbs index 3377773c..dd5500e4 100644 --- a/sig/openai/models/evals/run_create_response.rbs +++ b/sig/openai/models/evals/run_create_response.rbs @@ -459,6 +459,7 @@ module OpenAI type sampling_params = { max_completion_tokens: Integer, + reasoning_effort: OpenAI::Models::reasoning_effort?, seed: Integer, temperature: Float, text: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text, @@ -471,6 +472,8 @@ module OpenAI def max_completion_tokens=: (Integer) -> Integer + attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort? + attr_reader seed: Integer? def seed=: (Integer) -> Integer @@ -497,6 +500,7 @@ module OpenAI def initialize: ( ?max_completion_tokens: Integer, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, ?seed: Integer, ?temperature: Float, ?text: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text, @@ -506,6 +510,7 @@ module OpenAI def to_hash: -> { max_completion_tokens: Integer, + reasoning_effort: OpenAI::Models::reasoning_effort?, seed: Integer, temperature: Float, text: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text, diff --git a/sig/openai/models/evals/run_list_response.rbs b/sig/openai/models/evals/run_list_response.rbs index 0aaf9725..08cd067f 100644 --- a/sig/openai/models/evals/run_list_response.rbs +++ b/sig/openai/models/evals/run_list_response.rbs @@ -459,6 +459,7 @@ module OpenAI type sampling_params = { max_completion_tokens: Integer, + reasoning_effort: OpenAI::Models::reasoning_effort?, seed: Integer, temperature: Float, text: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text, @@ -471,6 +472,8 @@ module OpenAI def max_completion_tokens=: (Integer) -> Integer + attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort? + attr_reader seed: Integer? def seed=: (Integer) -> Integer @@ -497,6 +500,7 @@ module OpenAI def initialize: ( ?max_completion_tokens: Integer, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, ?seed: Integer, ?temperature: Float, ?text: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text, @@ -506,6 +510,7 @@ module OpenAI def to_hash: -> { max_completion_tokens: Integer, + reasoning_effort: OpenAI::Models::reasoning_effort?, seed: Integer, temperature: Float, text: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text, diff --git a/sig/openai/models/evals/run_retrieve_response.rbs b/sig/openai/models/evals/run_retrieve_response.rbs index 0be266f0..1cb2ebee 100644 --- a/sig/openai/models/evals/run_retrieve_response.rbs +++ b/sig/openai/models/evals/run_retrieve_response.rbs @@ -459,6 +459,7 @@ module OpenAI type sampling_params = { max_completion_tokens: Integer, + reasoning_effort: OpenAI::Models::reasoning_effort?, seed: Integer, temperature: Float, text: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text, @@ -471,6 +472,8 @@ module OpenAI def max_completion_tokens=: (Integer) -> Integer + attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort? + attr_reader seed: Integer? def seed=: (Integer) -> Integer @@ -497,6 +500,7 @@ module OpenAI def initialize: ( ?max_completion_tokens: Integer, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, ?seed: Integer, ?temperature: Float, ?text: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text, @@ -506,6 +510,7 @@ module OpenAI def to_hash: -> { max_completion_tokens: Integer, + reasoning_effort: OpenAI::Models::reasoning_effort?, seed: Integer, temperature: Float, text: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text, diff --git a/sig/openai/models/evals/runs/output_item_list_response.rbs b/sig/openai/models/evals/runs/output_item_list_response.rbs index 53dfbc98..02e9bb4a 100644 --- a/sig/openai/models/evals/runs/output_item_list_response.rbs +++ b/sig/openai/models/evals/runs/output_item_list_response.rbs @@ -10,7 +10,7 @@ module OpenAI datasource_item_id: Integer, eval_id: String, object: :"eval.run.output_item", - results: ::Array[::Hash[Symbol, top]], + results: ::Array[OpenAI::Models::Evals::Runs::OutputItemListResponse::Result], run_id: String, sample: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample, status: String @@ -29,7 +29,7 @@ module OpenAI attr_accessor object: :"eval.run.output_item" - attr_accessor results: ::Array[::Hash[Symbol, top]] + attr_accessor results: ::Array[OpenAI::Models::Evals::Runs::OutputItemListResponse::Result] attr_accessor run_id: String @@ -43,7 +43,7 @@ module OpenAI datasource_item: ::Hash[Symbol, top], datasource_item_id: Integer, eval_id: String, - results: ::Array[::Hash[Symbol, top]], + results: ::Array[OpenAI::Models::Evals::Runs::OutputItemListResponse::Result], run_id: String, sample: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample, status: String, @@ -57,12 +57,51 @@ module OpenAI datasource_item_id: Integer, eval_id: String, object: :"eval.run.output_item", - results: ::Array[::Hash[Symbol, top]], + results: ::Array[OpenAI::Models::Evals::Runs::OutputItemListResponse::Result], run_id: String, sample: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample, status: String } + type result = + { + name: String, + passed: bool, + score: Float, + sample: ::Hash[Symbol, top]?, + type: String + } + + class Result < OpenAI::Internal::Type::BaseModel + attr_accessor name: String + + attr_accessor passed: bool + + attr_accessor score: Float + + attr_accessor sample: ::Hash[Symbol, top]? + + attr_reader type: String? + + def type=: (String) -> String + + def initialize: ( + name: String, + passed: bool, + score: Float, + ?sample: ::Hash[Symbol, top]?, + ?type: String + ) -> void + + def to_hash: -> { + name: String, + passed: bool, + score: Float, + sample: ::Hash[Symbol, top]?, + type: String + } + end + type sample = { error: OpenAI::Evals::EvalAPIError, diff --git a/sig/openai/models/evals/runs/output_item_retrieve_response.rbs b/sig/openai/models/evals/runs/output_item_retrieve_response.rbs index 70c37909..5794baef 100644 --- a/sig/openai/models/evals/runs/output_item_retrieve_response.rbs +++ b/sig/openai/models/evals/runs/output_item_retrieve_response.rbs @@ -10,7 +10,7 @@ module OpenAI datasource_item_id: Integer, eval_id: String, object: :"eval.run.output_item", - results: ::Array[::Hash[Symbol, top]], + results: ::Array[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result], run_id: String, sample: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample, status: String @@ -29,7 +29,7 @@ module OpenAI attr_accessor object: :"eval.run.output_item" - attr_accessor results: ::Array[::Hash[Symbol, top]] + attr_accessor results: ::Array[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result] attr_accessor run_id: String @@ -43,7 +43,7 @@ module OpenAI datasource_item: ::Hash[Symbol, top], datasource_item_id: Integer, eval_id: String, - results: ::Array[::Hash[Symbol, top]], + results: ::Array[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result], run_id: String, sample: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample, status: String, @@ -57,12 +57,51 @@ module OpenAI datasource_item_id: Integer, eval_id: String, object: :"eval.run.output_item", - results: ::Array[::Hash[Symbol, top]], + results: ::Array[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result], run_id: String, sample: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample, status: String } + type result = + { + name: String, + passed: bool, + score: Float, + sample: ::Hash[Symbol, top]?, + type: String + } + + class Result < OpenAI::Internal::Type::BaseModel + attr_accessor name: String + + attr_accessor passed: bool + + attr_accessor score: Float + + attr_accessor sample: ::Hash[Symbol, top]? + + attr_reader type: String? + + def type=: (String) -> String + + def initialize: ( + name: String, + passed: bool, + score: Float, + ?sample: ::Hash[Symbol, top]?, + ?type: String + ) -> void + + def to_hash: -> { + name: String, + passed: bool, + score: Float, + sample: ::Hash[Symbol, top]?, + type: String + } + end + type sample = { error: OpenAI::Evals::EvalAPIError, diff --git a/sig/openai/models/graders/score_model_grader.rbs b/sig/openai/models/graders/score_model_grader.rbs index 1b0cde32..d31e57ff 100644 --- a/sig/openai/models/graders/score_model_grader.rbs +++ b/sig/openai/models/graders/score_model_grader.rbs @@ -10,7 +10,7 @@ module OpenAI name: String, type: :score_model, range: ::Array[Float], - sampling_params: top + sampling_params: OpenAI::Graders::ScoreModelGrader::SamplingParams } class ScoreModelGrader < OpenAI::Internal::Type::BaseModel @@ -26,16 +26,18 @@ module OpenAI def range=: (::Array[Float]) -> ::Array[Float] - attr_reader sampling_params: top? + attr_reader sampling_params: OpenAI::Graders::ScoreModelGrader::SamplingParams? - def sampling_params=: (top) -> top + def sampling_params=: ( + OpenAI::Graders::ScoreModelGrader::SamplingParams + ) -> OpenAI::Graders::ScoreModelGrader::SamplingParams def initialize: ( input: ::Array[OpenAI::Graders::ScoreModelGrader::Input], model: String, name: String, ?range: ::Array[Float], - ?sampling_params: top, + ?sampling_params: OpenAI::Graders::ScoreModelGrader::SamplingParams, ?type: :score_model ) -> void @@ -45,7 +47,7 @@ module OpenAI name: String, type: :score_model, range: ::Array[Float], - sampling_params: top + sampling_params: OpenAI::Graders::ScoreModelGrader::SamplingParams } type input = @@ -154,6 +156,43 @@ module OpenAI def self?.values: -> ::Array[OpenAI::Models::Graders::ScoreModelGrader::Input::type_] end end + + type sampling_params = + { + max_completions_tokens: Integer?, + reasoning_effort: OpenAI::Models::reasoning_effort?, + seed: Integer?, + temperature: Float?, + top_p: Float? + } + + class SamplingParams < OpenAI::Internal::Type::BaseModel + attr_accessor max_completions_tokens: Integer? + + attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort? + + attr_accessor seed: Integer? + + attr_accessor temperature: Float? + + attr_accessor top_p: Float? + + def initialize: ( + ?max_completions_tokens: Integer?, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?seed: Integer?, + ?temperature: Float?, + ?top_p: Float? + ) -> void + + def to_hash: -> { + max_completions_tokens: Integer?, + reasoning_effort: OpenAI::Models::reasoning_effort?, + seed: Integer?, + temperature: Float?, + top_p: Float? + } + end end end end diff --git a/test/openai/resources/evals/runs/output_items_test.rb b/test/openai/resources/evals/runs/output_items_test.rb index 344b8e1c..00d4bd69 100644 --- a/test/openai/resources/evals/runs/output_items_test.rb +++ b/test/openai/resources/evals/runs/output_items_test.rb @@ -19,7 +19,7 @@ def test_retrieve_required_params datasource_item_id: Integer, eval_id: String, object: Symbol, - results: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]]), + results: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result]), run_id: String, sample: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample, status: String @@ -49,7 +49,7 @@ def test_list_required_params datasource_item_id: Integer, eval_id: String, object: Symbol, - results: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]]), + results: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemListResponse::Result]), run_id: String, sample: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample, status: String