diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index de0c9185..6992080f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,7 @@ jobs: lint: name: lint runs-on: ubuntu-latest - if: github.repository == 'openai/openai-ruby' + steps: - uses: actions/checkout@v4 @@ -29,7 +29,6 @@ jobs: test: name: test runs-on: ubuntu-latest - if: github.repository == 'openai/openai-ruby' steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml deleted file mode 100644 index ba6cb5ea..00000000 --- a/.github/workflows/create-releases.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: Create releases -on: - schedule: - - cron: '0 5 * * *' # every day at 5am UTC - push: - branches: - - main - -jobs: - release: - name: release - if: github.ref == 'refs/heads/main' && github.repository == 'openai/openai-ruby' - runs-on: ubuntu-latest - environment: publish - - steps: - - uses: actions/checkout@v4 - - - uses: stainless-api/trigger-release-please@v1 - id: release - with: - repo: ${{ github.event.repository.full_name }} - stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} - - - name: Update RubyDocs - if: ${{ steps.release.outputs.releases_created }} - run: | - curl -i -H "Content-Type: application/json" -X POST -d '{"repository":{"url":"https://github.com/openai/openai-ruby"}}' https://www.rubydoc.info/checkout diff --git a/.github/workflows/publish-gem.yml b/.github/workflows/publish-gem.yml index 48dbf6e6..d6ba1c4a 100644 --- a/.github/workflows/publish-gem.yml +++ b/.github/workflows/publish-gem.yml @@ -1,9 +1,13 @@ -# workflow for re-running publishing to rubygems.org in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/openai/openai-python/actions/workflows/publish-gem.yml +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to rubygems.org in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/openai/openai-python/actions/workflows/publish-gem.yml name: Publish Gem on: workflow_dispatch: + release: + types: [published] + jobs: publish: name: publish diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index fc3ec131..1659237f 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -19,6 +19,5 @@ jobs: run: | bash ./bin/check-release-environment env: - STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} RUBYGEMS_HOST: ${{ secrets.OPENAI_RUBYGEMS_HOST || secrets.RUBYGEMS_HOST }} GEM_HOST_API_KEY: ${{ secrets.OPENAI_GEM_HOST_API_KEY || secrets.GEM_HOST_API_KEY }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json index ba6c3483..f14b480a 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.1" + ".": "0.1.0-alpha.2" } \ No newline at end of file diff --git a/.rubocop.yml b/.rubocop.yml index 17e6abbd..e0a360b6 100644 --- a/.rubocop.yml +++ b/.rubocop.yml @@ -58,6 +58,11 @@ Layout/MultilineMethodParameterLineBreaks: Layout/SpaceInsideHashLiteralBraces: EnforcedStyle: no_space +# This option occasionally mangles identifier names +Lint/DeprecatedConstants: + Exclude: + - "**/*.rbi" + # Fairly useful in tests for pattern assertions. Lint/EmptyInPattern: Exclude: diff --git a/.stats.yml b/.stats.yml index f18d6148..26b57a65 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 80 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml diff --git a/.yardopts b/.yardopts index 29c933bc..c7c3301d 100644 --- a/.yardopts +++ b/.yardopts @@ -1 +1,3 @@ --markup markdown +--exclude /rbi +--exclude /sig diff --git a/CHANGELOG.md b/CHANGELOG.md index 31ecea67..d0a2be31 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,34 @@ # Changelog +## 0.1.0-alpha.2 (2025-03-18) + +Full Changelog: [v0.1.0-alpha.1...v0.1.0-alpha.2](https://github.com/openai/openai-ruby/compare/v0.1.0-alpha.1...v0.1.0-alpha.2) + +### Features + +* support jsonl uploads ([#10](https://github.com/openai/openai-ruby/issues/10)) ([b3b9e40](https://github.com/openai/openai-ruby/commit/b3b9e406f0174423a12e0e7e26f8f5c469b13f7e)) + + +### Bug Fixes + +* enums should not unnecessarily convert non-members to symbol type ([#23](https://github.com/openai/openai-ruby/issues/23)) ([05294a7](https://github.com/openai/openai-ruby/commit/05294a761c6e0ed3819c9cb4d2cd11f52134cbd6)) + + +### Chores + +* add most doc strings to rbi type definitions ([#12](https://github.com/openai/openai-ruby/issues/12)) ([f711649](https://github.com/openai/openai-ruby/commit/f711649c42200d70f8545d2014e8398297e62691)) +* do not label modules as abstract ([#22](https://github.com/openai/openai-ruby/issues/22)) ([bad4ec9](https://github.com/openai/openai-ruby/commit/bad4ec9ecda97c2eb4c4e9d5fabc62e2a7ab5bf2)) +* document union variants in yard doc ([#16](https://github.com/openai/openai-ruby/issues/16)) ([3ffacfe](https://github.com/openai/openai-ruby/commit/3ffacfe591bbb909a59e9581ea37eceaee07f9f0)) +* ensure doc strings for rbi method arguments ([#13](https://github.com/openai/openai-ruby/issues/13)) ([2c59996](https://github.com/openai/openai-ruby/commit/2c599969eac0c7ffd167f524604cc0e2ebae280c)) +* error fields are now mutable in keeping with rest of SDK ([#15](https://github.com/openai/openai-ruby/issues/15)) ([0e30eb7](https://github.com/openai/openai-ruby/commit/0e30eb76a81ca76a62a89b734d5333fe4d59154f)) +* **internal:** remove CI condition ([#18](https://github.com/openai/openai-ruby/issues/18)) ([db07e59](https://github.com/openai/openai-ruby/commit/db07e59ffce1577ac42d74ff57ecd18838012fcb)) +* mark non-inheritable SDK internal classes as final ([#19](https://github.com/openai/openai-ruby/issues/19)) ([ed33b6b](https://github.com/openai/openai-ruby/commit/ed33b6bbe8ab4c95c88255f4cc68c34276b8662d)) +* sdk client internal refactoring ([#21](https://github.com/openai/openai-ruby/issues/21)) ([927e252](https://github.com/openai/openai-ruby/commit/927e2521fedced96a8429214de8145dc4b5521a3)) +* sdk internal updates ([#9](https://github.com/openai/openai-ruby/issues/9)) ([7673bb0](https://github.com/openai/openai-ruby/commit/7673bb0eb0ed542df89e5bc5dbf0247b26a97617)) +* slightly more consistent type definition layout ([#17](https://github.com/openai/openai-ruby/issues/17)) ([1e4b557](https://github.com/openai/openai-ruby/commit/1e4b557e0ab1c8c6483c5e2c8dd856d5a9a2da90)) +* touch up sdk usage examples ([#14](https://github.com/openai/openai-ruby/issues/14)) ([7219d46](https://github.com/openai/openai-ruby/commit/7219d463ba66a6499a84131f36b06d06de35a5ec)) +* use generics instead of overloading for sorbet type definitions ([#20](https://github.com/openai/openai-ruby/issues/20)) ([a279382](https://github.com/openai/openai-ruby/commit/a279382762e14b18c6573f34bc3e825f927caaab)) + ## 0.1.0-alpha.1 (2025-03-13) Full Changelog: [v0.0.1-alpha.0...v0.1.0-alpha.1](https://github.com/openai/openai-ruby/compare/v0.0.1-alpha.0...v0.1.0-alpha.1) diff --git a/Gemfile.lock b/Gemfile.lock index 341cc850..333b95fb 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -11,7 +11,7 @@ GIT PATH remote: . specs: - openai (0.1.0.pre.alpha.1) + openai (0.1.0.pre.alpha.2) connection_pool GEM diff --git a/README.md b/README.md index c5e359bc..8b458397 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ end We provide support for streaming responses using Server Side Events (SSE). ```ruby -stream = openai.chat_completions_create_streaming( +stream = openai.chat.completions.create_streaming( messages: [{ role: "user", content: "Say this is a test" diff --git a/bin/check-release-environment b/bin/check-release-environment index 6aa95c4f..6303e291 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -2,10 +2,6 @@ errors=() -if [ -z "${STAINLESS_API_KEY}" ]; then - errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") -fi - if [ -z "${GEM_HOST_API_KEY}" ]; then errors+=("The OPENAI_GEM_HOST_API_KEY secret has not been set. Please set it in either this repository's secrets or your organization secrets") fi diff --git a/lib/openai/base_client.rb b/lib/openai/base_client.rb index 9707f135..b5cc5490 100644 --- a/lib/openai/base_client.rb +++ b/lib/openai/base_client.rb @@ -1,10 +1,9 @@ # frozen_string_literal: true module OpenAI - # @private + # @api private # # @abstract - # class BaseClient # from whatwg fetch spec MAX_REDIRECTS = 20 @@ -21,12 +20,11 @@ class BaseClient # rubocop:enable Style/MutableConstant class << self - # @private + # @api private # # @param req [Hash{Symbol=>Object}] # # @raise [ArgumentError] - # def validate!(req) keys = [:method, :path, :query, :headers, :body, :unwrap, :page, :stream, :model, :options] case req @@ -41,13 +39,12 @@ def validate!(req) end end - # @private + # @api private # # @param status [Integer] # @param headers [Hash{String=>String}, Net::HTTPHeader] # # @return [Boolean] - # def should_retry?(status, headers:) coerced = OpenAI::Util.coerce_boolean(headers["x-should-retry"]) case [coerced, status] @@ -65,7 +62,7 @@ def should_retry?(status, headers:) end end - # @private + # @api private # # @param request [Hash{Symbol=>Object}] . # @@ -86,7 +83,6 @@ def should_retry?(status, headers:) # @param response_headers [Hash{String=>String}, Net::HTTPHeader] # # @return [Hash{Symbol=>Object}] - # def follow_redirect(request, status:, response_headers:) method, url, headers = request.fetch_values(:method, :url, :headers) location = @@ -128,14 +124,27 @@ def follow_redirect(request, status:, response_headers:) request end + + # @api private + # + # @param status [Integer, OpenAI::APIConnectionError] + # @param stream [Enumerable, nil] + def reap_connection!(status, stream:) + case status + in (..199) | (300..499) + stream&.each { next } + in OpenAI::APIConnectionError | (500..) + OpenAI::Util.close_fused!(stream) + else + end + end end - # @private - # + # @api private # @return [OpenAI::PooledNetRequester] attr_accessor :requester - # @private + # @api private # # @param base_url [String] # @param timeout [Float] @@ -144,7 +153,6 @@ def follow_redirect(request, status:, response_headers:) # @param max_retry_delay [Float] # @param headers [Hash{String=>String, Integer, Array, nil}] # @param idempotency_header [String, nil] - # def initialize( base_url:, timeout: 0.0, @@ -171,19 +179,17 @@ def initialize( @max_retry_delay = max_retry_delay end - # @private + # @api private # # @return [Hash{String=>String}] - # private def auth_headers = {} - # @private + # @api private # # @return [String] - # private def generate_idempotency_key = "stainless-ruby-retry-#{SecureRandom.uuid}" - # @private + # @api private # # @param req [Hash{Symbol=>Object}] . # @@ -220,7 +226,6 @@ def initialize( # @option opts [Float, nil] :timeout # # @return [Hash{Symbol=>Object}] - # private def build_request(req, opts) method, uninterpolated_path = req.fetch_values(:method, :path) @@ -271,13 +276,12 @@ def initialize( } end - # @private + # @api private # # @param headers [Hash{String=>String}] # @param retry_count [Integer] # # @return [Float] - # private def retry_delay(headers, retry_count:) # Non-standard extension span = Float(headers["retry-after-ms"], exception: false)&.then { _1 / 1000 } @@ -298,7 +302,7 @@ def initialize( (@initial_retry_delay * scale * jitter).clamp(0, @max_retry_delay) end - # @private + # @api private # # @param request [Hash{Symbol=>Object}] . # @@ -322,7 +326,6 @@ def initialize( # # @raise [OpenAI::APIError] # @return [Array(Integer, Net::HTTPResponse, Enumerable)] - # private def send_request(request, redirect_count:, retry_count:, send_retry_header:) url, headers, max_retries, timeout = request.fetch_values(:url, :headers, :max_retries, :timeout) input = {**request.except(:timeout), deadline: OpenAI::Util.monotonic_secs + timeout} @@ -332,28 +335,23 @@ def initialize( end begin - response, stream = @requester.execute(input) - status = Integer(response.code) + status, response, stream = @requester.execute(input) rescue OpenAI::APIConnectionError => e status = e end - # normally we want to drain the response body and reuse the HTTP session by clearing the socket buffers - # unless we hit a server error - srv_fault = (500...).include?(status) - case status in ..299 [status, response, stream] in 300..399 if redirect_count >= self.class::MAX_REDIRECTS - message = "Failed to complete the request within #{self.class::MAX_REDIRECTS} redirects." + self.class.reap_connection!(status, stream: stream) - stream.each { next } + message = "Failed to complete the request within #{self.class::MAX_REDIRECTS} redirects." raise OpenAI::APIConnectionError.new(url: url, message: message) in 300..399 - request = self.class.follow_redirect(request, status: status, response_headers: response) + self.class.reap_connection!(status, stream: stream) - stream.each { next } + request = self.class.follow_redirect(request, status: status, response_headers: response) send_request( request, redirect_count: redirect_count + 1, @@ -363,12 +361,10 @@ def initialize( in OpenAI::APIConnectionError if retry_count >= max_retries raise status in (400..) if retry_count >= max_retries || !self.class.should_retry?(status, headers: response) - decoded = OpenAI::Util.decode_content(response, stream: stream, suppress_error: true) - - if srv_fault - OpenAI::Util.close_fused!(stream) - else - stream.each { next } + decoded = Kernel.then do + OpenAI::Util.decode_content(response, stream: stream, suppress_error: true) + ensure + self.class.reap_connection!(status, stream: stream) end raise OpenAI::APIStatusError.for( @@ -379,13 +375,9 @@ def initialize( response: response ) in (400..) | OpenAI::APIConnectionError - delay = retry_delay(response, retry_count: retry_count) + self.class.reap_connection!(status, stream: stream) - if srv_fault - OpenAI::Util.close_fused!(stream) - else - stream&.each { next } - end + delay = retry_delay(response, retry_count: retry_count) sleep(delay) send_request( @@ -424,7 +416,6 @@ def initialize( # # @raise [OpenAI::APIError] # @return [Object] - # def request(req) self.class.validate!(req) model = req.fetch(:model) { OpenAI::Unknown } @@ -455,7 +446,6 @@ def request(req) end # @return [String] - # def inspect # rubocop:disable Layout/LineLength base_url = OpenAI::Util.unparse_uri(@base_url) diff --git a/lib/openai/base_model.rb b/lib/openai/base_model.rb index 798e49ca..4ef0106e 100644 --- a/lib/openai/base_model.rb +++ b/lib/openai/base_model.rb @@ -1,41 +1,35 @@ # frozen_string_literal: true module OpenAI - # @private - # - # @abstract - # + # @api private module Converter # rubocop:disable Lint/UnusedMethodArgument - # @private + # @api private # # @param value [Object] # # @return [Object] - # def coerce(value) = value - # @private + # @api private # # @param value [Object] # # @return [Object] - # def dump(value) = value - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) = (raise NotImplementedError) # rubocop:enable Lint/UnusedMethodArgument class << self - # @private + # @api private # # @param spec [Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] . # @@ -48,7 +42,6 @@ class << self # @option spec [Boolean] :"nil?" # # @return [Proc] - # def type_info(spec) case spec in Hash @@ -64,7 +57,7 @@ def type_info(spec) end end - # @private + # @api private # # Based on `target`, transform `value` into `target`, to the extent possible: # @@ -77,7 +70,6 @@ def type_info(spec) # @param value [Object] # # @return [Object] - # def coerce(target, value) case target in OpenAI::Converter @@ -111,13 +103,12 @@ def coerce(target, value) end end - # @private + # @api private # # @param target [OpenAI::Converter, Class] # @param value [Object] # # @return [Object] - # def dump(target, value) case target in OpenAI::Converter @@ -127,7 +118,7 @@ def dump(target, value) end end - # @private + # @api private # # The underlying algorithm for computing maximal compatibility is subject to # future improvements. @@ -142,7 +133,6 @@ def dump(target, value) # @param value [Object] # # @return [Object] - # def try_strict_coerce(target, value) case target in OpenAI::Converter @@ -182,7 +172,7 @@ def try_strict_coerce(target, value) end end - # @private + # @api private # # @abstract # @@ -197,40 +187,35 @@ class Unknown # @param other [Object] # # @return [Boolean] - # def self.===(other) = true # @param other [Object] # # @return [Boolean] - # def self.==(other) = other.is_a?(Class) && other <= OpenAI::Unknown class << self # @!parse - # # @private + # # @api private # # # # @param value [Object] # # # # @return [Object] - # # # def coerce(value) = super # @!parse - # # @private + # # @api private # # # # @param value [Object] # # # # @return [Object] - # # # def dump(value) = super - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) # prevent unknown variant from being chosen during the first coercion pass [false, true, 0] @@ -240,7 +225,7 @@ def try_strict_coerce(value) # rubocop:enable Lint/UnusedMethodArgument end - # @private + # @api private # # @abstract # @@ -253,40 +238,35 @@ class BooleanModel # @param other [Object] # # @return [Boolean] - # def self.===(other) = other == true || other == false # @param other [Object] # # @return [Boolean] - # def self.==(other) = other.is_a?(Class) && other <= OpenAI::BooleanModel class << self # @!parse - # # @private + # # @api private # # # # @param value [Boolean, Object] # # # # @return [Boolean, Object] - # # # def coerce(value) = super # @!parse - # # @private + # # @api private # # # # @param value [Boolean, Object] # # # # @return [Boolean, Object] - # # # def dump(value) = super - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) case value in true | false @@ -298,7 +278,7 @@ def try_strict_coerce(value) end end - # @private + # @api private # # @abstract # @@ -324,7 +304,7 @@ def try_strict_coerce(value) # when OpenAI::Models::ChatModel::O1 # # ... # else - # # ... + # puts(chat_model) # end # ``` # @@ -338,7 +318,7 @@ def try_strict_coerce(value) # in :o1 # # ... # else - # # ... + # puts(chat_model) # end # ``` class Enum @@ -348,13 +328,11 @@ class << self # All of the valid Symbol values for this enum. # # @return [Array] - # def values = (@values ||= constants.map { const_get(_1) }) - # @private + # @api private # # Guard against thread safety issues by instantiating `@values`. - # private def finalize! = values end @@ -363,24 +341,21 @@ def values = (@values ||= constants.map { const_get(_1) }) # @param other [Object] # # @return [Boolean] - # def self.===(other) = values.include?(other) # @param other [Object] # # @return [Boolean] - # def self.==(other) other.is_a?(Class) && other <= OpenAI::Enum && other.values.to_set == values.to_set end class << self - # @private + # @api private # # @param value [String, Symbol, Object] # # @return [Symbol, Object] - # def coerce(value) case value in Symbol | String if values.include?(val = value.to_sym) @@ -391,20 +366,18 @@ def coerce(value) end # @!parse - # # @private + # # @api private # # # # @param value [Symbol, Object] # # # # @return [Symbol, Object] - # # # def dump(value) = super - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) return [true, value, 1] if values.include?(value) @@ -423,7 +396,7 @@ def try_strict_coerce(value) end end - # @private + # @api private # # @abstract # @@ -432,13 +405,13 @@ def try_strict_coerce(value) # # `chat_completion_content_part` is a `OpenAI::Models::Chat::ChatCompletionContentPart` # case chat_completion_content_part # when OpenAI::Models::Chat::ChatCompletionContentPartText - # # ... + # puts(chat_completion_content_part.text) # when OpenAI::Models::Chat::ChatCompletionContentPartImage - # # ... + # puts(chat_completion_content_part.image_url) # when OpenAI::Models::Chat::ChatCompletionContentPartInputAudio - # # ... + # puts(chat_completion_content_part.input_audio) # else - # # ... + # puts(chat_completion_content_part) # end # ``` # @@ -446,43 +419,43 @@ def try_strict_coerce(value) # ```ruby # case chat_completion_content_part # in {type: :text, text: text} - # # ... + # puts(text) # in {type: :image_url, image_url: image_url} - # # ... + # puts(image_url) # in {type: :input_audio, input_audio: input_audio} - # # ... - # in {type: :file, file: file} - # # ... + # puts(input_audio) # else - # # ... + # puts(chat_completion_content_part) # end # ``` class Union extend OpenAI::Converter class << self - # @private + # @api private # # All of the specified variant info for this union. # # @return [Array] - # private def known_variants = (@known_variants ||= []) - # @private - # - # All of the specified variants for this union. + # @api private # # @return [Array] - # - protected def variants + protected def derefed_variants @known_variants.map { |key, variant_fn| [key, variant_fn.call] } end - # @private + # All of the specified variants for this union. # - # @param property [Symbol] + # @return [Array] + def variants + derefed_variants.map(&:last) + end + + # @api private # + # @param property [Symbol] private def discriminator(property) case property in Symbol @@ -490,7 +463,7 @@ class << self end end - # @private + # @api private # # @param key [Symbol, Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] # @@ -503,7 +476,6 @@ class << self # @option spec [Proc] :union # # @option spec [Boolean] :"nil?" - # private def variant(key, spec = nil) variant_info = case key @@ -516,12 +488,11 @@ class << self known_variants << variant_info end - # @private + # @api private # # @param value [Object] # # @return [OpenAI::Converter, Class, nil] - # private def resolve_variant(value) case [@discriminator, value] in [_, OpenAI::BaseModel] @@ -551,7 +522,6 @@ class << self # @param other [Object] # # @return [Boolean] - # def self.===(other) known_variants.any? do |_, variant_fn| variant_fn.call === other @@ -561,18 +531,16 @@ def self.===(other) # @param other [Object] # # @return [Boolean] - # def self.==(other) - other.is_a?(Class) && other <= OpenAI::Union && other.variants == variants + other.is_a?(Class) && other <= OpenAI::Union && other.derefed_variants == derefed_variants end class << self - # @private + # @api private # # @param value [Object] # # @return [Object] - # def coerce(value) if (variant = resolve_variant(value)) return OpenAI::Converter.coerce(variant, value) @@ -597,12 +565,11 @@ def coerce(value) variant.nil? ? value : OpenAI::Converter.coerce(variant, value) end - # @private + # @api private # # @param value [Object] # # @return [Object] - # def dump(value) if (variant = resolve_variant(value)) return OpenAI::Converter.dump(variant, value) @@ -617,12 +584,11 @@ def dump(value) value end - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) # TODO(ruby) this will result in super linear decoding behaviour for nested unions # follow up with a decoding context that captures current strictness levels @@ -655,7 +621,7 @@ def try_strict_coerce(value) # rubocop:enable Style/HashEachMethods end - # @private + # @api private # # @abstract # @@ -670,7 +636,6 @@ def self.[](...) = new(...) # @param other [Object] # # @return [Boolean] - # def ===(other) type = item_type case other @@ -686,15 +651,13 @@ def ===(other) # @param other [Object] # # @return [Boolean] - # def ==(other) = other.is_a?(OpenAI::ArrayOf) && other.item_type == item_type - # @private + # @api private # # @param value [Enumerable, Object] # # @return [Array, Object] - # def coerce(value) type = item_type case value @@ -705,12 +668,11 @@ def coerce(value) end end - # @private + # @api private # # @param value [Enumerable, Object] # # @return [Array, Object] - # def dump(value) type = item_type case value @@ -721,12 +683,11 @@ def dump(value) end end - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) case value in Array @@ -760,13 +721,12 @@ def try_strict_coerce(value) end end - # @private + # @api private # # @return [OpenAI::Converter, Class] - # protected def item_type = @item_type_fn.call - # @private + # @api private # # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] # @@ -779,13 +739,12 @@ def try_strict_coerce(value) # @option spec [Proc] :union # # @option spec [Boolean] :"nil?" - # def initialize(type_info, spec = {}) @item_type_fn = OpenAI::Converter.type_info(type_info || spec) end end - # @private + # @api private # # @abstract # @@ -800,7 +759,6 @@ def self.[](...) = new(...) # @param other [Object] # # @return [Boolean] - # def ===(other) type = item_type case other @@ -821,15 +779,13 @@ def ===(other) # @param other [Object] # # @return [Boolean] - # def ==(other) = other.is_a?(OpenAI::HashOf) && other.item_type == item_type - # @private + # @api private # # @param value [Hash{Object=>Object}, Object] # # @return [Hash{Symbol=>Object}, Object] - # def coerce(value) type = item_type case value @@ -843,12 +799,11 @@ def coerce(value) end end - # @private + # @api private # # @param value [Hash{Object=>Object}, Object] # # @return [Hash{Symbol=>Object}, Object] - # def dump(value) type = item_type case value @@ -861,12 +816,11 @@ def dump(value) end end - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) case value in Hash @@ -900,13 +854,12 @@ def try_strict_coerce(value) end end - # @private + # @api private # # @return [OpenAI::Converter, Class] - # protected def item_type = @item_type_fn.call - # @private + # @api private # # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] # @@ -919,14 +872,11 @@ def try_strict_coerce(value) # @option spec [Proc] :union # # @option spec [Boolean] :"nil?" - # def initialize(type_info, spec = {}) @item_type_fn = OpenAI::Converter.type_info(type_info || spec) end end - # @private - # # @abstract # # @example @@ -942,32 +892,31 @@ class BaseModel extend OpenAI::Converter class << self - # @private + # @api private # # Assumes superclass fields are totally defined before fields are accessed / # defined on subclasses. # # @return [Hash{Symbol=>Hash{Symbol=>Object}}] - # def known_fields @known_fields ||= (self < OpenAI::BaseModel ? superclass.known_fields.dup : {}) end - # @return [Hash{Symbol=>Hash{Symbol=>Object}}] + # @api private # + # @return [Hash{Symbol=>Hash{Symbol=>Object}}] def fields known_fields.transform_values do |field| {**field.except(:type_fn), type: field.fetch(:type_fn).call} end end - # @private + # @api private # # @return [Hash{Symbol=>Proc}] - # def defaults = (@defaults ||= {}) - # @private + # @api private # # @param name_sym [Symbol] # @@ -984,7 +933,6 @@ def defaults = (@defaults ||= {}) # @option spec [Proc] :union # # @option spec [Boolean] :"nil?" - # private def add_field(name_sym, required:, type_info:, spec:) type_fn, info = case type_info @@ -1023,7 +971,7 @@ def defaults = (@defaults ||= {}) end end - # @private + # @api private # # @param name_sym [Symbol] # @@ -1038,12 +986,11 @@ def defaults = (@defaults ||= {}) # @option spec [Proc] :union # # @option spec [Boolean] :"nil?" - # def required(name_sym, type_info, spec = {}) add_field(name_sym, required: true, type_info: type_info, spec: spec) end - # @private + # @api private # # @param name_sym [Symbol] # @@ -1058,18 +1005,16 @@ def required(name_sym, type_info, spec = {}) # @option spec [Proc] :union # # @option spec [Boolean] :"nil?" - # def optional(name_sym, type_info, spec = {}) add_field(name_sym, required: false, type_info: type_info, spec: spec) end - # @private + # @api private # # `request_only` attributes not excluded from `.#coerce` when receiving responses # even if well behaved servers should not send them # # @param blk [Proc] - # private def request_only(&blk) @mode = :dump blk.call @@ -1077,12 +1022,11 @@ def optional(name_sym, type_info, spec = {}) @mode = nil end - # @private + # @api private # # `response_only` attributes are omitted from `.#dump` when making requests # # @param blk [Proc] - # private def response_only(&blk) @mode = :coerce blk.call @@ -1094,7 +1038,6 @@ def optional(name_sym, type_info, spec = {}) # @param other [Object] # # @return [Boolean] - # def ==(other) case other in OpenAI::BaseModel @@ -1105,12 +1048,11 @@ def ==(other) end class << self - # @private + # @api private # # @param value [OpenAI::BaseModel, Hash{Object=>Object}, Object] # # @return [OpenAI::BaseModel, Object] - # def coerce(value) case OpenAI::Util.coerce_hash(value) in Hash => coerced @@ -1120,12 +1062,11 @@ def coerce(value) end end - # @private + # @api private # # @param value [OpenAI::BaseModel, Object] # # @return [Hash{Object=>Object}, Object] - # def dump(value) unless (coerced = OpenAI::Util.coerce_hash(value)).is_a?(Hash) return value @@ -1157,12 +1098,11 @@ def dump(value) values end - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) case value in Hash | OpenAI::BaseModel @@ -1220,7 +1160,6 @@ def try_strict_coerce(value) # @param key [Symbol] # # @return [Object, nil] - # def [](key) unless key.instance_of?(Symbol) raise ArgumentError.new("Expected symbol key for lookup, got #{key.inspect}") @@ -1239,7 +1178,6 @@ def [](key) # should not be mutated. # # @return [Hash{Symbol=>Object}] - # def to_h = @data alias_method :to_hash, :to_h @@ -1247,7 +1185,6 @@ def to_h = @data # @param keys [Array, nil] # # @return [Hash{Symbol=>Object}] - # def deconstruct_keys(keys) (keys || self.class.known_fields.keys).filter_map do |k| unless self.class.known_fields.key?(k) @@ -1262,7 +1199,6 @@ def deconstruct_keys(keys) # Create a new instance of a model. # # @param data [Hash{Symbol=>Object}, OpenAI::BaseModel] - # def initialize(data = {}) case OpenAI::Util.coerce_hash(data) in Hash => coerced @@ -1273,11 +1209,9 @@ def initialize(data = {}) end # @return [String] - # def to_s = @data.to_s # @return [String] - # def inspect "#<#{self.class.name}:0x#{object_id.to_s(16)} #{deconstruct_keys(nil).map do |k, v| "#{k}=#{v.inspect}" diff --git a/lib/openai/base_page.rb b/lib/openai/base_page.rb index 9f315c7b..c8a9058a 100644 --- a/lib/openai/base_page.rb +++ b/lib/openai/base_page.rb @@ -1,10 +1,6 @@ # frozen_string_literal: true module OpenAI - # @private - # - # @abstract - # # @example # ```ruby # if page.has_next? @@ -21,39 +17,40 @@ module OpenAI # # @example # ```ruby - # completions = page.to_enum.take(2) + # completions = page + # .to_enum + # .lazy + # .select { _1.object_id.even? } + # .map(&:itself) + # .take(2) + # .to_a # # completions => Array # ``` module BasePage # @return [Boolean] - # def next_page? = (raise NotImplementedError) # @raise [OpenAI::APIError] # @return [OpenAI::BasePage] - # def next_page = (raise NotImplementedError) # @param blk [Proc] # # @return [void] - # def auto_paging_each(&) = (raise NotImplementedError) # @return [Enumerable] - # def to_enum = super(:auto_paging_each) alias_method :enum_for, :to_enum - # @private + # @api private # # @param client [OpenAI::BaseClient] # @param req [Hash{Symbol=>Object}] # @param headers [Hash{String=>String}, Net::HTTPHeader] # @param page_data [Object] - # def initialize(client:, req:, headers:, page_data:) @client = client @req = req diff --git a/lib/openai/base_stream.rb b/lib/openai/base_stream.rb index c2beb4b9..59f8f874 100644 --- a/lib/openai/base_stream.rb +++ b/lib/openai/base_stream.rb @@ -1,36 +1,37 @@ # frozen_string_literal: true module OpenAI - # @private - # # @example # ```ruby - # stream.for_each do |message| - # puts(message) + # stream.for_each do |chunk| + # puts(chunk) # end # ``` # # @example # ```ruby - # messages = stream.to_enum.take(2) + # chunks = stream + # .to_enum + # .lazy + # .select { _1.object_id.even? } + # .map(&:itself) + # .take(2) + # .to_a # - # messages => Array + # chunks => Array # ``` module BaseStream # @return [void] - # def close = OpenAI::Util.close_fused!(@iterator) - # @private + # @api private # # @return [Enumerable] - # private def iterator = (raise NotImplementedError) # @param blk [Proc] # # @return [void] - # def for_each(&) unless block_given? raise ArgumentError.new("A block must be given to ##{__method__}") @@ -39,19 +40,17 @@ def for_each(&) end # @return [Enumerable] - # def to_enum = @iterator alias_method :enum_for, :to_enum - # @private + # @api private # # @param model [Class, OpenAI::Converter] # @param url [URI::Generic] # @param status [Integer] # @param response [Net::HTTPResponse] # @param messages [Enumerable] - # def initialize(model:, url:, status:, response:, messages:) @model = model @url = url diff --git a/lib/openai/client.rb b/lib/openai/client.rb index 7160cc26..126da608 100644 --- a/lib/openai/client.rb +++ b/lib/openai/client.rb @@ -66,10 +66,9 @@ class Client < OpenAI::BaseClient # @return [OpenAI::Resources::Responses] attr_reader :responses - # @private + # @api private # # @return [Hash{String=>String}] - # private def auth_headers return {} if @api_key.nil? @@ -93,7 +92,6 @@ class Client < OpenAI::BaseClient # @param initial_retry_delay [Float] # # @param max_retry_delay [Float] - # def initialize( base_url: nil, api_key: ENV["OPENAI_API_KEY"], diff --git a/lib/openai/cursor_page.rb b/lib/openai/cursor_page.rb index 0e74ea9e..35b585e9 100644 --- a/lib/openai/cursor_page.rb +++ b/lib/openai/cursor_page.rb @@ -17,26 +17,31 @@ module OpenAI # # @example # ```ruby - # completions = cursor_page.to_enum.take(2) + # completions = cursor_page + # .to_enum + # .lazy + # .select { _1.object_id.even? } + # .map(&:itself) + # .take(2) + # .to_a # # completions => Array # ``` class CursorPage include OpenAI::BasePage - # @return [Array] + # @return [Array, nil] attr_accessor :data # @return [Boolean] attr_accessor :has_more - # @private + # @api private # # @param client [OpenAI::BaseClient] # @param req [Hash{Symbol=>Object}] # @param headers [Hash{String=>String}, Net::HTTPHeader] # @param page_data [Hash{Symbol=>Object}] - # def initialize(client:, req:, headers:, page_data:) super model = req.fetch(:model) @@ -61,10 +66,10 @@ def next_page? # @raise [OpenAI::HTTP::Error] # @return [OpenAI::CursorPage] - # def next_page unless next_page? - raise RuntimeError.new("No more pages available. Please check #next_page? before calling ##{__method__}") + message = "No more pages available. Please check #next_page? before calling ##{__method__}" + raise RuntimeError.new(message) end req = OpenAI::Util.deep_merge(@req, {query: {after: data&.last&.id}}) @@ -72,7 +77,6 @@ def next_page end # @param blk [Proc] - # def auto_paging_each(&blk) unless block_given? raise ArgumentError.new("A block must be given to ##{__method__}") @@ -86,7 +90,6 @@ def auto_paging_each(&blk) end # @return [String] - # def inspect "#<#{self.class}:0x#{object_id.to_s(16)} data=#{data.inspect} has_more=#{has_more.inspect}>" end diff --git a/lib/openai/errors.rb b/lib/openai/errors.rb index 40faaaae..7a4228e8 100644 --- a/lib/openai/errors.rb +++ b/lib/openai/errors.rb @@ -4,7 +4,7 @@ module OpenAI class Error < StandardError # @!parse # # @return [StandardError, nil] - # attr_reader :cause + # attr_accessor :cause end class ConversionError < OpenAI::Error @@ -12,24 +12,24 @@ class ConversionError < OpenAI::Error class APIError < OpenAI::Error # @return [URI::Generic] - attr_reader :url + attr_accessor :url # @return [Integer, nil] - attr_reader :status + attr_accessor :status # @return [Object, nil] - attr_reader :body + attr_accessor :body # @return [String, nil] - attr_reader :code + attr_accessor :code # @return [String, nil] - attr_reader :param + attr_accessor :param # @return [String, nil] - attr_reader :type + attr_accessor :type - # @private + # @api private # # @param url [URI::Generic] # @param status [Integer, nil] @@ -37,7 +37,6 @@ class APIError < OpenAI::Error # @param request [nil] # @param response [nil] # @param message [String, nil] - # def initialize(url:, status: nil, body: nil, request: nil, response: nil, message: nil) @url = url @status = status @@ -51,25 +50,25 @@ def initialize(url:, status: nil, body: nil, request: nil, response: nil, messag class APIConnectionError < OpenAI::APIError # @!parse # # @return [nil] - # attr_reader :status + # attr_accessor :status # @!parse # # @return [nil] - # attr_reader :body + # attr_accessor :body # @!parse # # @return [nil] - # attr_reader :code + # attr_accessor :code # @!parse # # @return [nil] - # attr_reader :param + # attr_accessor :param # @!parse # # @return [nil] - # attr_reader :type + # attr_accessor :type - # @private + # @api private # # @param url [URI::Generic] # @param status [nil] @@ -77,7 +76,6 @@ class APIConnectionError < OpenAI::APIError # @param request [nil] # @param response [nil] # @param message [String, nil] - # def initialize( url:, status: nil, @@ -91,7 +89,7 @@ def initialize( end class APITimeoutError < OpenAI::APIConnectionError - # @private + # @api private # # @param url [URI::Generic] # @param status [nil] @@ -99,7 +97,6 @@ class APITimeoutError < OpenAI::APIConnectionError # @param request [nil] # @param response [nil] # @param message [String, nil] - # def initialize( url:, status: nil, @@ -113,7 +110,7 @@ def initialize( end class APIStatusError < OpenAI::APIError - # @private + # @api private # # @param url [URI::Generic] # @param status [Integer] @@ -123,7 +120,6 @@ class APIStatusError < OpenAI::APIError # @param message [String, nil] # # @return [OpenAI::APIStatusError] - # def self.for(url:, status:, body:, request:, response:, message: nil) kwargs = {url: url, status: status, body: body, request: request, response: response, message: message} @@ -151,21 +147,21 @@ def self.for(url:, status:, body:, request:, response:, message: nil) # @!parse # # @return [Integer] - # attr_reader :status + # attr_accessor :status # @!parse # # @return [String, nil] - # attr_reader :code + # attr_accessor :code # @!parse # # @return [String, nil] - # attr_reader :param + # attr_accessor :param # @!parse # # @return [String, nil] - # attr_reader :type + # attr_accessor :type - # @private + # @api private # # @param url [URI::Generic] # @param status [Integer] @@ -173,7 +169,6 @@ def self.for(url:, status:, body:, request:, response:, message: nil) # @param request [nil] # @param response [nil] # @param message [String, nil] - # def initialize(url:, status:, body:, request:, response:, message: nil) message ||= OpenAI::Util.dig(body, :message) { {url: url.to_s, status: status, body: body} } @code = OpenAI::Converter.coerce(String, OpenAI::Util.dig(body, :code)) diff --git a/lib/openai/extern.rb b/lib/openai/extern.rb index 3faad4c1..1ab41492 100644 --- a/lib/openai/extern.rb +++ b/lib/openai/extern.rb @@ -1,10 +1,7 @@ # frozen_string_literal: true module OpenAI - # @private - # - # @abstract - # + # @api private module Extern end end diff --git a/lib/openai/models/audio/speech_create_params.rb b/lib/openai/models/audio/speech_create_params.rb index fa2f4155..b7e77b57 100644 --- a/lib/openai/models/audio/speech_create_params.rb +++ b/lib/openai/models/audio/speech_create_params.rb @@ -73,6 +73,12 @@ class Model < OpenAI::Union # One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1` or `tts-1-hd` variant enum: -> { OpenAI::Models::Audio::SpeechModel } + + # @!parse + # class << self + # # @return [Array(String, Symbol, OpenAI::Models::Audio::SpeechModel)] + # def variants; end + # end end # @abstract diff --git a/lib/openai/models/audio/speech_model.rb b/lib/openai/models/audio/speech_model.rb index 84765c9f..96744e0c 100644 --- a/lib/openai/models/audio/speech_model.rb +++ b/lib/openai/models/audio/speech_model.rb @@ -4,7 +4,6 @@ module OpenAI module Models module Audio # @abstract - # class SpeechModel < OpenAI::Enum TTS_1 = :"tts-1" TTS_1_HD = :"tts-1-hd" diff --git a/lib/openai/models/audio/transcription_create_params.rb b/lib/openai/models/audio/transcription_create_params.rb index d0c79556..d6d9f071 100644 --- a/lib/openai/models/audio/transcription_create_params.rb +++ b/lib/openai/models/audio/transcription_create_params.rb @@ -122,10 +122,15 @@ class Model < OpenAI::Union # ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. variant enum: -> { OpenAI::Models::AudioModel } + + # @!parse + # class << self + # # @return [Array(String, Symbol, OpenAI::Models::AudioModel)] + # def variants; end + # end end # @abstract - # class TimestampGranularity < OpenAI::Enum WORD = :word SEGMENT = :segment diff --git a/lib/openai/models/audio/transcription_create_response.rb b/lib/openai/models/audio/transcription_create_response.rb index 1ff3100f..2ad0d933 100644 --- a/lib/openai/models/audio/transcription_create_response.rb +++ b/lib/openai/models/audio/transcription_create_response.rb @@ -13,6 +13,12 @@ class TranscriptionCreateResponse < OpenAI::Union # Represents a verbose json transcription response returned by model, based on the provided input. variant -> { OpenAI::Models::Audio::TranscriptionVerbose } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Audio::Transcription, OpenAI::Models::Audio::TranscriptionVerbose)] + # def variants; end + # end end end end diff --git a/lib/openai/models/audio/translation_create_params.rb b/lib/openai/models/audio/translation_create_params.rb index 556b8419..4fd4a4dc 100644 --- a/lib/openai/models/audio/translation_create_params.rb +++ b/lib/openai/models/audio/translation_create_params.rb @@ -81,6 +81,12 @@ class Model < OpenAI::Union # ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. variant enum: -> { OpenAI::Models::AudioModel } + + # @!parse + # class << self + # # @return [Array(String, Symbol, OpenAI::Models::AudioModel)] + # def variants; end + # end end end end diff --git a/lib/openai/models/audio/translation_create_response.rb b/lib/openai/models/audio/translation_create_response.rb index 278a37e5..94020236 100644 --- a/lib/openai/models/audio/translation_create_response.rb +++ b/lib/openai/models/audio/translation_create_response.rb @@ -4,11 +4,16 @@ module OpenAI module Models module Audio # @abstract - # class TranslationCreateResponse < OpenAI::Union variant -> { OpenAI::Models::Audio::Translation } variant -> { OpenAI::Models::Audio::TranslationVerbose } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Audio::Translation, OpenAI::Models::Audio::TranslationVerbose)] + # def variants; end + # end end end end diff --git a/lib/openai/models/audio_model.rb b/lib/openai/models/audio_model.rb index 1043030f..81db712e 100644 --- a/lib/openai/models/audio_model.rb +++ b/lib/openai/models/audio_model.rb @@ -3,7 +3,6 @@ module OpenAI module Models # @abstract - # class AudioModel < OpenAI::Enum WHISPER_1 = :"whisper-1" diff --git a/lib/openai/models/beta/assistant_create_params.rb b/lib/openai/models/beta/assistant_create_params.rb index de543e84..f66edb15 100644 --- a/lib/openai/models/beta/assistant_create_params.rb +++ b/lib/openai/models/beta/assistant_create_params.rb @@ -169,6 +169,12 @@ class Model < OpenAI::Union # ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them. variant enum: -> { OpenAI::Models::ChatModel } + + # @!parse + # class << self + # # @return [Array(String, Symbol, OpenAI::Models::ChatModel)] + # def variants; end + # end end class ToolResources < OpenAI::BaseModel @@ -384,6 +390,12 @@ class Static < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/assistant_response_format_option.rb b/lib/openai/models/beta/assistant_response_format_option.rb index 18671049..1e0036f2 100644 --- a/lib/openai/models/beta/assistant_response_format_option.rb +++ b/lib/openai/models/beta/assistant_response_format_option.rb @@ -41,6 +41,12 @@ class AssistantResponseFormatOption < OpenAI::Union # JSON Schema response format. Used to generate structured JSON responses. # Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). variant -> { OpenAI::Models::ResponseFormatJSONSchema } + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/assistant_stream_event.rb b/lib/openai/models/beta/assistant_stream_event.rb index 80632016..fdc598d7 100644 --- a/lib/openai/models/beta/assistant_stream_event.rb +++ b/lib/openai/models/beta/assistant_stream_event.rb @@ -725,6 +725,12 @@ class ErrorEvent < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete, OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/assistant_tool.rb b/lib/openai/models/beta/assistant_tool.rb index 7cd4e9b8..0ea9bc07 100644 --- a/lib/openai/models/beta/assistant_tool.rb +++ b/lib/openai/models/beta/assistant_tool.rb @@ -4,7 +4,6 @@ module OpenAI module Models module Beta # @abstract - # class AssistantTool < OpenAI::Union discriminator :type @@ -13,6 +12,12 @@ class AssistantTool < OpenAI::Union variant :file_search, -> { OpenAI::Models::Beta::FileSearchTool } variant :function, -> { OpenAI::Models::Beta::FunctionTool } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/assistant_tool_choice_option.rb b/lib/openai/models/beta/assistant_tool_choice_option.rb index 84736979..62a2c795 100644 --- a/lib/openai/models/beta/assistant_tool_choice_option.rb +++ b/lib/openai/models/beta/assistant_tool_choice_option.rb @@ -32,6 +32,12 @@ class Auto < OpenAI::Enum finalize! end + + # @!parse + # class << self + # # @return [Array(Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/assistant_update_params.rb b/lib/openai/models/beta/assistant_update_params.rb index 5cedcb79..494df652 100644 --- a/lib/openai/models/beta/assistant_update_params.rb +++ b/lib/openai/models/beta/assistant_update_params.rb @@ -39,11 +39,11 @@ class AssistantUpdateParams < OpenAI::BaseModel # [Model overview](https://platform.openai.com/docs/models) for descriptions of # them. # - # @return [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model::AssistantSupportedModels, nil] + # @return [String, Symbol, nil] optional :model, union: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model } # @!parse - # # @return [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model::AssistantSupportedModels] + # # @return [String, Symbol] # attr_writer :model # @!attribute name @@ -131,7 +131,7 @@ class AssistantUpdateParams < OpenAI::BaseModel # # @param description [String, nil] # # @param instructions [String, nil] # # @param metadata [Hash{Symbol=>String}, nil] - # # @param model [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model::AssistantSupportedModels] + # # @param model [String, Symbol] # # @param name [String, nil] # # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] # # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] @@ -169,52 +169,108 @@ class AssistantUpdateParams < OpenAI::BaseModel # [Model overview](https://platform.openai.com/docs/models) for descriptions of # them. class Model < OpenAI::Union + # @!group + + O3_MINI = :"o3-mini" + O3_MINI_2025_01_31 = :"o3-mini-2025-01-31" + O1 = :o1 + O1_2024_12_17 = :"o1-2024-12-17" + GPT_4O = :"gpt-4o" + GPT_4O_2024_11_20 = :"gpt-4o-2024-11-20" + GPT_4O_2024_08_06 = :"gpt-4o-2024-08-06" + GPT_4O_2024_05_13 = :"gpt-4o-2024-05-13" + GPT_4O_MINI = :"gpt-4o-mini" + GPT_4O_MINI_2024_07_18 = :"gpt-4o-mini-2024-07-18" + GPT_4_5_PREVIEW = :"gpt-4.5-preview" + GPT_4_5_PREVIEW_2025_02_27 = :"gpt-4.5-preview-2025-02-27" + GPT_4_TURBO = :"gpt-4-turbo" + GPT_4_TURBO_2024_04_09 = :"gpt-4-turbo-2024-04-09" + GPT_4_0125_PREVIEW = :"gpt-4-0125-preview" + GPT_4_TURBO_PREVIEW = :"gpt-4-turbo-preview" + GPT_4_1106_PREVIEW = :"gpt-4-1106-preview" + GPT_4_VISION_PREVIEW = :"gpt-4-vision-preview" + GPT_4 = :"gpt-4" + GPT_4_0314 = :"gpt-4-0314" + GPT_4_0613 = :"gpt-4-0613" + GPT_4_32K = :"gpt-4-32k" + GPT_4_32K_0314 = :"gpt-4-32k-0314" + GPT_4_32K_0613 = :"gpt-4-32k-0613" + GPT_3_5_TURBO = :"gpt-3.5-turbo" + GPT_3_5_TURBO_16K = :"gpt-3.5-turbo-16k" + GPT_3_5_TURBO_0613 = :"gpt-3.5-turbo-0613" + GPT_3_5_TURBO_1106 = :"gpt-3.5-turbo-1106" + GPT_3_5_TURBO_0125 = :"gpt-3.5-turbo-0125" + GPT_3_5_TURBO_16K_0613 = :"gpt-3.5-turbo-16k-0613" + + # @!endgroup + variant String - # ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them. - variant enum: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::AssistantSupportedModels } + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::O3_MINI - # @abstract - # - # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. - class AssistantSupportedModels < OpenAI::Enum - O3_MINI = :"o3-mini" - O3_MINI_2025_01_31 = :"o3-mini-2025-01-31" - O1 = :o1 - O1_2024_12_17 = :"o1-2024-12-17" - GPT_4O = :"gpt-4o" - GPT_4O_2024_11_20 = :"gpt-4o-2024-11-20" - GPT_4O_2024_08_06 = :"gpt-4o-2024-08-06" - GPT_4O_2024_05_13 = :"gpt-4o-2024-05-13" - GPT_4O_MINI = :"gpt-4o-mini" - GPT_4O_MINI_2024_07_18 = :"gpt-4o-mini-2024-07-18" - GPT_4_5_PREVIEW = :"gpt-4.5-preview" - GPT_4_5_PREVIEW_2025_02_27 = :"gpt-4.5-preview-2025-02-27" - GPT_4_TURBO = :"gpt-4-turbo" - GPT_4_TURBO_2024_04_09 = :"gpt-4-turbo-2024-04-09" - GPT_4_0125_PREVIEW = :"gpt-4-0125-preview" - GPT_4_TURBO_PREVIEW = :"gpt-4-turbo-preview" - GPT_4_1106_PREVIEW = :"gpt-4-1106-preview" - GPT_4_VISION_PREVIEW = :"gpt-4-vision-preview" - GPT_4 = :"gpt-4" - GPT_4_0314 = :"gpt-4-0314" - GPT_4_0613 = :"gpt-4-0613" - GPT_4_32K = :"gpt-4-32k" - GPT_4_32K_0314 = :"gpt-4-32k-0314" - GPT_4_32K_0613 = :"gpt-4-32k-0613" - GPT_3_5_TURBO = :"gpt-3.5-turbo" - GPT_3_5_TURBO_16K = :"gpt-3.5-turbo-16k" - GPT_3_5_TURBO_0613 = :"gpt-3.5-turbo-0613" - GPT_3_5_TURBO_1106 = :"gpt-3.5-turbo-1106" - GPT_3_5_TURBO_0125 = :"gpt-3.5-turbo-0125" - GPT_3_5_TURBO_16K_0613 = :"gpt-3.5-turbo-16k-0613" - - finalize! - end + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::O3_MINI_2025_01_31 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::O1 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::O1_2024_12_17 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_2024_11_20 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_2024_08_06 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_2024_05_13 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_MINI + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_MINI_2024_07_18 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_5_PREVIEW + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_5_PREVIEW_2025_02_27 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_TURBO + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_TURBO_2024_04_09 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_0125_PREVIEW + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_TURBO_PREVIEW + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1106_PREVIEW + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_VISION_PREVIEW + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_0314 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_0613 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_32K + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_32K_0314 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_32K_0613 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_16K + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_0613 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_1106 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_0125 + + variant const: OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_16K_0613 + + # @!parse + # class << self + # # @return [Array(String, Symbol)] + # def variants; end + # end end class ToolResources < OpenAI::BaseModel diff --git a/lib/openai/models/beta/message_stream_event.rb b/lib/openai/models/beta/message_stream_event.rb index 32e6ee21..b110bfed 100644 --- a/lib/openai/models/beta/message_stream_event.rb +++ b/lib/openai/models/beta/message_stream_event.rb @@ -158,6 +158,12 @@ class ThreadMessageIncomplete < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCreated, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageInProgress, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageDelta, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCompleted, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageIncomplete)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/run_step_stream_event.rb b/lib/openai/models/beta/run_step_stream_event.rb index 645de8bd..82c7266e 100644 --- a/lib/openai/models/beta/run_step_stream_event.rb +++ b/lib/openai/models/beta/run_step_stream_event.rb @@ -210,6 +210,12 @@ class ThreadRunStepExpired < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCreated, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepInProgress, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepDelta, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCompleted, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepFailed, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCancelled, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepExpired)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/run_stream_event.rb b/lib/openai/models/beta/run_stream_event.rb index 1e792b0e..f8548343 100644 --- a/lib/openai/models/beta/run_stream_event.rb +++ b/lib/openai/models/beta/run_stream_event.rb @@ -290,6 +290,12 @@ class ThreadRunExpired < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::RunStreamEvent::ThreadRunCreated, OpenAI::Models::Beta::RunStreamEvent::ThreadRunQueued, OpenAI::Models::Beta::RunStreamEvent::ThreadRunInProgress, OpenAI::Models::Beta::RunStreamEvent::ThreadRunRequiresAction, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCompleted, OpenAI::Models::Beta::RunStreamEvent::ThreadRunIncomplete, OpenAI::Models::Beta::RunStreamEvent::ThreadRunFailed, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelling, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelled, OpenAI::Models::Beta::RunStreamEvent::ThreadRunExpired)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/thread_create_and_run_params.rb b/lib/openai/models/beta/thread_create_and_run_params.rb index 3cf817fa..72d1ae24 100644 --- a/lib/openai/models/beta/thread_create_and_run_params.rb +++ b/lib/openai/models/beta/thread_create_and_run_params.rb @@ -221,6 +221,12 @@ class Model < OpenAI::Union # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. variant enum: -> { OpenAI::Models::ChatModel } + + # @!parse + # class << self + # # @return [Array(String, Symbol, OpenAI::Models::ChatModel)] + # def variants; end + # end end class Thread < OpenAI::BaseModel @@ -328,6 +334,12 @@ class Content < OpenAI::Union # An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](https://platform.openai.com/docs/models). variant OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Content::MessageContentPartParamArray + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end # @abstract @@ -376,7 +388,6 @@ class Attachment < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Tool < OpenAI::Union discriminator :type @@ -399,6 +410,12 @@ class FileSearch < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch)] + # def variants; end + # end end end end @@ -617,6 +634,12 @@ class Static < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static)] + # def variants; end + # end end end end @@ -701,13 +724,18 @@ class FileSearch < OpenAI::BaseModel end # @abstract - # class Tool < OpenAI::Union variant -> { OpenAI::Models::Beta::CodeInterpreterTool } variant -> { OpenAI::Models::Beta::FileSearchTool } variant -> { OpenAI::Models::Beta::FunctionTool } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool)] + # def variants; end + # end end class TruncationStrategy < OpenAI::BaseModel diff --git a/lib/openai/models/beta/thread_create_params.rb b/lib/openai/models/beta/thread_create_params.rb index 46dfaee7..6d67b56a 100644 --- a/lib/openai/models/beta/thread_create_params.rb +++ b/lib/openai/models/beta/thread_create_params.rb @@ -107,6 +107,12 @@ class Content < OpenAI::Union # An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](https://platform.openai.com/docs/models). variant OpenAI::Models::Beta::ThreadCreateParams::Message::Content::MessageContentPartParamArray + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end # @abstract @@ -155,7 +161,6 @@ class Attachment < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Tool < OpenAI::Union discriminator :type @@ -178,6 +183,12 @@ class FileSearch < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch)] + # def variants; end + # end end end end @@ -395,6 +406,12 @@ class Static < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/threads/annotation.rb b/lib/openai/models/beta/threads/annotation.rb index 3e21c302..3f0a547d 100644 --- a/lib/openai/models/beta/threads/annotation.rb +++ b/lib/openai/models/beta/threads/annotation.rb @@ -17,6 +17,12 @@ class Annotation < OpenAI::Union # A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. variant :file_path, -> { OpenAI::Models::Beta::Threads::FilePathAnnotation } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::Threads::FileCitationAnnotation, OpenAI::Models::Beta::Threads::FilePathAnnotation)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/threads/annotation_delta.rb b/lib/openai/models/beta/threads/annotation_delta.rb index dfaf1d9e..6b22d5be 100644 --- a/lib/openai/models/beta/threads/annotation_delta.rb +++ b/lib/openai/models/beta/threads/annotation_delta.rb @@ -17,6 +17,12 @@ class AnnotationDelta < OpenAI::Union # A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. variant :file_path, -> { OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation, OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/threads/message.rb b/lib/openai/models/beta/threads/message.rb index b434fe46..0a6b9d1b 100644 --- a/lib/openai/models/beta/threads/message.rb +++ b/lib/openai/models/beta/threads/message.rb @@ -173,7 +173,6 @@ class Attachment < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Tool < OpenAI::Union variant -> { OpenAI::Models::Beta::CodeInterpreterTool } @@ -193,6 +192,12 @@ class AssistantToolsFileSearchTypeOnly < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly)] + # def variants; end + # end end end diff --git a/lib/openai/models/beta/threads/message_content.rb b/lib/openai/models/beta/threads/message_content.rb index df27bb65..7e6e4698 100644 --- a/lib/openai/models/beta/threads/message_content.rb +++ b/lib/openai/models/beta/threads/message_content.rb @@ -22,6 +22,12 @@ class MessageContent < OpenAI::Union # The refusal content generated by the assistant. variant :refusal, -> { OpenAI::Models::Beta::Threads::RefusalContentBlock } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlock, OpenAI::Models::Beta::Threads::RefusalContentBlock)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/threads/message_content_delta.rb b/lib/openai/models/beta/threads/message_content_delta.rb index f2e05dfb..bcf970c1 100644 --- a/lib/openai/models/beta/threads/message_content_delta.rb +++ b/lib/openai/models/beta/threads/message_content_delta.rb @@ -22,6 +22,12 @@ class MessageContentDelta < OpenAI::Union # References an image URL in the content of a message. variant :image_url, -> { OpenAI::Models::Beta::Threads::ImageURLDeltaBlock } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::Threads::ImageFileDeltaBlock, OpenAI::Models::Beta::Threads::TextDeltaBlock, OpenAI::Models::Beta::Threads::RefusalDeltaBlock, OpenAI::Models::Beta::Threads::ImageURLDeltaBlock)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/threads/message_content_part_param.rb b/lib/openai/models/beta/threads/message_content_part_param.rb index 86766a56..74766529 100644 --- a/lib/openai/models/beta/threads/message_content_part_param.rb +++ b/lib/openai/models/beta/threads/message_content_part_param.rb @@ -19,6 +19,12 @@ class MessageContentPartParam < OpenAI::Union # The text content that is part of a message. variant :text, -> { OpenAI::Models::Beta::Threads::TextContentBlockParam } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/threads/message_create_params.rb b/lib/openai/models/beta/threads/message_create_params.rb index 544cf794..b0a54ec9 100644 --- a/lib/openai/models/beta/threads/message_create_params.rb +++ b/lib/openai/models/beta/threads/message_create_params.rb @@ -67,6 +67,12 @@ class Content < OpenAI::Union # An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](https://platform.openai.com/docs/models). variant OpenAI::Models::Beta::Threads::MessageCreateParams::Content::MessageContentPartParamArray + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end # @abstract @@ -115,7 +121,6 @@ class Attachment < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Tool < OpenAI::Union discriminator :type @@ -138,6 +143,12 @@ class FileSearch < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/threads/run_create_params.rb b/lib/openai/models/beta/threads/run_create_params.rb index c655fda0..c5c473e5 100644 --- a/lib/openai/models/beta/threads/run_create_params.rb +++ b/lib/openai/models/beta/threads/run_create_params.rb @@ -296,6 +296,12 @@ class Content < OpenAI::Union # An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](https://platform.openai.com/docs/models). variant OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Content::MessageContentPartParamArray + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end # @abstract @@ -344,7 +350,6 @@ class Attachment < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Tool < OpenAI::Union discriminator :type @@ -367,6 +372,12 @@ class FileSearch < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch)] + # def variants; end + # end end end end @@ -382,6 +393,12 @@ class Model < OpenAI::Union # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. variant enum: -> { OpenAI::Models::ChatModel } + + # @!parse + # class << self + # # @return [Array(String, Symbol, OpenAI::Models::ChatModel)] + # def variants; end + # end end class TruncationStrategy < OpenAI::BaseModel diff --git a/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb b/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb index fcde0967..0313a163 100644 --- a/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb +++ b/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb @@ -137,6 +137,12 @@ class Image < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb b/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb index ce8315ac..9e9097b3 100644 --- a/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb +++ b/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb @@ -96,6 +96,12 @@ class Output < OpenAI::Union variant :logs, -> { OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs } variant :image, -> { OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/threads/runs/run_step.rb b/lib/openai/models/beta/threads/runs/run_step.rb index ae1e41c3..4a7aa3e2 100644 --- a/lib/openai/models/beta/threads/runs/run_step.rb +++ b/lib/openai/models/beta/threads/runs/run_step.rb @@ -219,6 +219,12 @@ class StepDetails < OpenAI::Union # Details of the tool call. variant :tool_calls, -> { OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails)] + # def variants; end + # end end # @abstract diff --git a/lib/openai/models/beta/threads/runs/run_step_delta.rb b/lib/openai/models/beta/threads/runs/run_step_delta.rb index 3f19839e..c8ba4aa1 100644 --- a/lib/openai/models/beta/threads/runs/run_step_delta.rb +++ b/lib/openai/models/beta/threads/runs/run_step_delta.rb @@ -36,6 +36,12 @@ class StepDetails < OpenAI::Union # Details of the tool call. variant :tool_calls, -> { OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/threads/runs/run_step_include.rb b/lib/openai/models/beta/threads/runs/run_step_include.rb index 1d4c531e..ae9413a8 100644 --- a/lib/openai/models/beta/threads/runs/run_step_include.rb +++ b/lib/openai/models/beta/threads/runs/run_step_include.rb @@ -6,7 +6,6 @@ module Beta module Threads module Runs # @abstract - # class RunStepInclude < OpenAI::Enum STEP_DETAILS_TOOL_CALLS_FILE_SEARCH_RESULTS_CONTENT = :"step_details.tool_calls[*].file_search.results[*].content" diff --git a/lib/openai/models/beta/threads/runs/tool_call.rb b/lib/openai/models/beta/threads/runs/tool_call.rb index d5d780ad..20cc29a6 100644 --- a/lib/openai/models/beta/threads/runs/tool_call.rb +++ b/lib/openai/models/beta/threads/runs/tool_call.rb @@ -17,6 +17,12 @@ class ToolCall < OpenAI::Union variant :file_search, -> { OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall } variant :function, -> { OpenAI::Models::Beta::Threads::Runs::FunctionToolCall } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall, OpenAI::Models::Beta::Threads::Runs::FunctionToolCall)] + # def variants; end + # end end end end diff --git a/lib/openai/models/beta/threads/runs/tool_call_delta.rb b/lib/openai/models/beta/threads/runs/tool_call_delta.rb index a1b92351..3dae6b9b 100644 --- a/lib/openai/models/beta/threads/runs/tool_call_delta.rb +++ b/lib/openai/models/beta/threads/runs/tool_call_delta.rb @@ -17,6 +17,12 @@ class ToolCallDelta < OpenAI::Union variant :file_search, -> { OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta } variant :function, -> { OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta, OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta)] + # def variants; end + # end end end end diff --git a/lib/openai/models/chat/chat_completion_assistant_message_param.rb b/lib/openai/models/chat/chat_completion_assistant_message_param.rb index 7dcbccab..0ab1ed53 100644 --- a/lib/openai/models/chat/chat_completion_assistant_message_param.rb +++ b/lib/openai/models/chat/chat_completion_assistant_message_param.rb @@ -130,11 +130,22 @@ class ArrayOfContentPart < OpenAI::Union variant :text, -> { OpenAI::Models::Chat::ChatCompletionContentPartText } variant :refusal, -> { OpenAI::Models::Chat::ChatCompletionContentPartRefusal } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartRefusal)] + # def variants; end + # end end + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end # @deprecated - # class FunctionCall < OpenAI::BaseModel # @!attribute arguments # The arguments to call the function with, as generated by the model in JSON diff --git a/lib/openai/models/chat/chat_completion_chunk.rb b/lib/openai/models/chat/chat_completion_chunk.rb index 8d0ec4c1..0c8a0cea 100644 --- a/lib/openai/models/chat/chat_completion_chunk.rb +++ b/lib/openai/models/chat/chat_completion_chunk.rb @@ -194,7 +194,6 @@ class Delta < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @deprecated - # class FunctionCall < OpenAI::BaseModel # @!attribute [r] arguments # The arguments to call the function with, as generated by the model in JSON diff --git a/lib/openai/models/chat/chat_completion_content_part.rb b/lib/openai/models/chat/chat_completion_content_part.rb index 836e1222..f9fede31 100644 --- a/lib/openai/models/chat/chat_completion_content_part.rb +++ b/lib/openai/models/chat/chat_completion_content_part.rb @@ -87,6 +87,12 @@ class File < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, OpenAI::Models::Chat::ChatCompletionContentPart::File)] + # def variants; end + # end end end diff --git a/lib/openai/models/chat/chat_completion_developer_message_param.rb b/lib/openai/models/chat/chat_completion_developer_message_param.rb index 7735915f..d7d8b7c4 100644 --- a/lib/openai/models/chat/chat_completion_developer_message_param.rb +++ b/lib/openai/models/chat/chat_completion_developer_message_param.rb @@ -51,6 +51,12 @@ class Content < OpenAI::Union # An array of content parts with a defined type. For developer messages, only type `text` is supported. variant OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam::Content::ChatCompletionContentPartTextArray + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end end end diff --git a/lib/openai/models/chat/chat_completion_function_message_param.rb b/lib/openai/models/chat/chat_completion_function_message_param.rb index 4ce20d75..1da70875 100644 --- a/lib/openai/models/chat/chat_completion_function_message_param.rb +++ b/lib/openai/models/chat/chat_completion_function_message_param.rb @@ -4,7 +4,6 @@ module OpenAI module Models module Chat # @deprecated - # class ChatCompletionFunctionMessageParam < OpenAI::BaseModel # @!attribute content # The contents of the function message. diff --git a/lib/openai/models/chat/chat_completion_message.rb b/lib/openai/models/chat/chat_completion_message.rb index 96223d57..228616ad 100644 --- a/lib/openai/models/chat/chat_completion_message.rb +++ b/lib/openai/models/chat/chat_completion_message.rb @@ -151,7 +151,6 @@ class URLCitation < OpenAI::BaseModel end # @deprecated - # class FunctionCall < OpenAI::BaseModel # @!attribute arguments # The arguments to call the function with, as generated by the model in JSON diff --git a/lib/openai/models/chat/chat_completion_message_param.rb b/lib/openai/models/chat/chat_completion_message_param.rb index dac398d3..811b7f8c 100644 --- a/lib/openai/models/chat/chat_completion_message_param.rb +++ b/lib/openai/models/chat/chat_completion_message_param.rb @@ -31,6 +31,12 @@ class ChatCompletionMessageParam < OpenAI::Union variant :tool, -> { OpenAI::Models::Chat::ChatCompletionToolMessageParam } variant :function, -> { OpenAI::Models::Chat::ChatCompletionFunctionMessageParam } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, OpenAI::Models::Chat::ChatCompletionSystemMessageParam, OpenAI::Models::Chat::ChatCompletionUserMessageParam, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, OpenAI::Models::Chat::ChatCompletionToolMessageParam, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam)] + # def variants; end + # end end end diff --git a/lib/openai/models/chat/chat_completion_modality.rb b/lib/openai/models/chat/chat_completion_modality.rb index e7558545..725b907d 100644 --- a/lib/openai/models/chat/chat_completion_modality.rb +++ b/lib/openai/models/chat/chat_completion_modality.rb @@ -4,7 +4,6 @@ module OpenAI module Models module Chat # @abstract - # class ChatCompletionModality < OpenAI::Enum TEXT = :text AUDIO = :audio diff --git a/lib/openai/models/chat/chat_completion_prediction_content.rb b/lib/openai/models/chat/chat_completion_prediction_content.rb index b7cb311e..d5aca19d 100644 --- a/lib/openai/models/chat/chat_completion_prediction_content.rb +++ b/lib/openai/models/chat/chat_completion_prediction_content.rb @@ -44,6 +44,12 @@ class Content < OpenAI::Union # An array of content parts with a defined type. Supported options differ based on the [model](https://platform.openai.com/docs/models) being used to generate the response. Can contain text inputs. variant OpenAI::Models::Chat::ChatCompletionPredictionContent::Content::ChatCompletionContentPartTextArray + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end end end diff --git a/lib/openai/models/chat/chat_completion_system_message_param.rb b/lib/openai/models/chat/chat_completion_system_message_param.rb index 188aa0be..658bb497 100644 --- a/lib/openai/models/chat/chat_completion_system_message_param.rb +++ b/lib/openai/models/chat/chat_completion_system_message_param.rb @@ -51,6 +51,12 @@ class Content < OpenAI::Union # An array of content parts with a defined type. For system messages, only type `text` is supported. variant OpenAI::Models::Chat::ChatCompletionSystemMessageParam::Content::ChatCompletionContentPartTextArray + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end end end diff --git a/lib/openai/models/chat/chat_completion_tool_choice_option.rb b/lib/openai/models/chat/chat_completion_tool_choice_option.rb index c704bffb..28f7750e 100644 --- a/lib/openai/models/chat/chat_completion_tool_choice_option.rb +++ b/lib/openai/models/chat/chat_completion_tool_choice_option.rb @@ -33,6 +33,12 @@ class Auto < OpenAI::Enum finalize! end + + # @!parse + # class << self + # # @return [Array(Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice)] + # def variants; end + # end end end diff --git a/lib/openai/models/chat/chat_completion_tool_message_param.rb b/lib/openai/models/chat/chat_completion_tool_message_param.rb index 9ec24f99..6ba3a959 100644 --- a/lib/openai/models/chat/chat_completion_tool_message_param.rb +++ b/lib/openai/models/chat/chat_completion_tool_message_param.rb @@ -42,6 +42,12 @@ class Content < OpenAI::Union # An array of content parts with a defined type. For tool messages, only type `text` is supported. variant OpenAI::Models::Chat::ChatCompletionToolMessageParam::Content::ChatCompletionContentPartTextArray + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end end end diff --git a/lib/openai/models/chat/chat_completion_user_message_param.rb b/lib/openai/models/chat/chat_completion_user_message_param.rb index 9f51546a..18451c7b 100644 --- a/lib/openai/models/chat/chat_completion_user_message_param.rb +++ b/lib/openai/models/chat/chat_completion_user_message_param.rb @@ -50,6 +50,12 @@ class Content < OpenAI::Union # An array of content parts with a defined type. Supported options differ based on the [model](https://platform.openai.com/docs/models) being used to generate the response. Can contain text, image, or audio inputs. variant OpenAI::Models::Chat::ChatCompletionUserMessageParam::Content::ChatCompletionContentPartArray + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end end end diff --git a/lib/openai/models/chat/completion_create_params.rb b/lib/openai/models/chat/completion_create_params.rb index b77143a8..49e175c1 100644 --- a/lib/openai/models/chat/completion_create_params.rb +++ b/lib/openai/models/chat/completion_create_params.rb @@ -432,6 +432,12 @@ class Model < OpenAI::Union # characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models) # to browse and compare available models. variant enum: -> { OpenAI::Models::ChatModel } + + # @!parse + # class << self + # # @return [Array(String, Symbol, OpenAI::Models::ChatModel)] + # def variants; end + # end end # @abstract @@ -470,10 +476,15 @@ class FunctionCallMode < OpenAI::Enum finalize! end + + # @!parse + # class << self + # # @return [Array(Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption)] + # def variants; end + # end end # @deprecated - # class Function < OpenAI::BaseModel # @!attribute name # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain @@ -520,7 +531,6 @@ class Function < OpenAI::BaseModel end # @abstract - # class Modality < OpenAI::Enum TEXT = :text AUDIO = :audio @@ -553,6 +563,12 @@ class ResponseFormat < OpenAI::Union # model will not generate JSON without a system or user message instructing it # to do so. variant -> { OpenAI::Models::ResponseFormatJSONObject } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject)] + # def variants; end + # end end # @abstract @@ -588,6 +604,12 @@ class Stop < OpenAI::Union variant String variant OpenAI::Models::Chat::CompletionCreateParams::Stop::StringArray + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end class WebSearchOptions < OpenAI::BaseModel diff --git a/lib/openai/models/chat_model.rb b/lib/openai/models/chat_model.rb index 7b512490..29b0a851 100644 --- a/lib/openai/models/chat_model.rb +++ b/lib/openai/models/chat_model.rb @@ -3,7 +3,6 @@ module OpenAI module Models # @abstract - # class ChatModel < OpenAI::Enum O3_MINI = :"o3-mini" O3_MINI_2025_01_31 = :"o3-mini-2025-01-31" diff --git a/lib/openai/models/comparison_filter.rb b/lib/openai/models/comparison_filter.rb index 5e74b3b4..17be219d 100644 --- a/lib/openai/models/comparison_filter.rb +++ b/lib/openai/models/comparison_filter.rb @@ -72,6 +72,12 @@ class Value < OpenAI::Union variant Float variant OpenAI::BooleanModel + + # @!parse + # class << self + # # @return [Array(String, Float, Boolean)] + # def variants; end + # end end end end diff --git a/lib/openai/models/completion_create_params.rb b/lib/openai/models/completion_create_params.rb index b2835980..c1c44876 100644 --- a/lib/openai/models/completion_create_params.rb +++ b/lib/openai/models/completion_create_params.rb @@ -14,7 +14,7 @@ class CompletionCreateParams < OpenAI::BaseModel # [Model overview](https://platform.openai.com/docs/models) for descriptions of # them. # - # @return [String, Symbol, OpenAI::Models::CompletionCreateParams::Model::Preset] + # @return [String, Symbol] required :model, union: -> { OpenAI::Models::CompletionCreateParams::Model } # @!attribute prompt @@ -183,7 +183,7 @@ class CompletionCreateParams < OpenAI::BaseModel # attr_writer :user # @!parse - # # @param model [String, Symbol, OpenAI::Models::CompletionCreateParams::Model::Preset] + # # @param model [String, Symbol] # # @param prompt [String, Array, Array, Array>, nil] # # @param best_of [Integer, nil] # # @param echo [Boolean, nil] @@ -236,25 +236,27 @@ class CompletionCreateParams < OpenAI::BaseModel # [Model overview](https://platform.openai.com/docs/models) for descriptions of # them. class Model < OpenAI::Union + # @!group + + GPT_3_5_TURBO_INSTRUCT = :"gpt-3.5-turbo-instruct" + DAVINCI_002 = :"davinci-002" + BABBAGE_002 = :"babbage-002" + + # @!endgroup + variant String - # ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them. - variant enum: -> { OpenAI::Models::CompletionCreateParams::Model::Preset } - - # @abstract - # - # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. - class Preset < OpenAI::Enum - GPT_3_5_TURBO_INSTRUCT = :"gpt-3.5-turbo-instruct" - DAVINCI_002 = :"davinci-002" - BABBAGE_002 = :"babbage-002" - - finalize! - end + variant const: OpenAI::Models::CompletionCreateParams::Model::GPT_3_5_TURBO_INSTRUCT + + variant const: OpenAI::Models::CompletionCreateParams::Model::DAVINCI_002 + + variant const: OpenAI::Models::CompletionCreateParams::Model::BABBAGE_002 + + # @!parse + # class << self + # # @return [Array(String, Symbol)] + # def variants; end + # end end # @abstract @@ -279,6 +281,12 @@ class Prompt < OpenAI::Union variant OpenAI::Models::CompletionCreateParams::Prompt::IntegerArray variant OpenAI::Models::CompletionCreateParams::Prompt::ArrayOfToken2DArray + + # @!parse + # class << self + # # @return [Array(String, Array, Array, Array>)] + # def variants; end + # end end # @abstract @@ -291,6 +299,12 @@ class Stop < OpenAI::Union variant String variant OpenAI::Models::CompletionCreateParams::Stop::StringArray + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end end end diff --git a/lib/openai/models/compound_filter.rb b/lib/openai/models/compound_filter.rb index 68d4e0a2..11452bfe 100644 --- a/lib/openai/models/compound_filter.rb +++ b/lib/openai/models/compound_filter.rb @@ -35,6 +35,12 @@ class Filter < OpenAI::Union variant -> { OpenAI::Models::ComparisonFilter } variant OpenAI::Unknown + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::ComparisonFilter, Object)] + # def variants; end + # end end # @abstract diff --git a/lib/openai/models/embedding_create_params.rb b/lib/openai/models/embedding_create_params.rb index 7bfe8e35..4d126d18 100644 --- a/lib/openai/models/embedding_create_params.rb +++ b/lib/openai/models/embedding_create_params.rb @@ -104,6 +104,12 @@ class Input < OpenAI::Union # The array of arrays containing integers that will be turned into an embedding. variant OpenAI::Models::EmbeddingCreateParams::Input::ArrayOfToken2DArray + + # @!parse + # class << self + # # @return [Array(String, Array, Array, Array>)] + # def variants; end + # end end # @abstract @@ -118,6 +124,12 @@ class Model < OpenAI::Union # ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them. variant enum: -> { OpenAI::Models::EmbeddingModel } + + # @!parse + # class << self + # # @return [Array(String, Symbol, OpenAI::Models::EmbeddingModel)] + # def variants; end + # end end # @abstract diff --git a/lib/openai/models/embedding_model.rb b/lib/openai/models/embedding_model.rb index ae14fe32..65247fdf 100644 --- a/lib/openai/models/embedding_model.rb +++ b/lib/openai/models/embedding_model.rb @@ -3,7 +3,6 @@ module OpenAI module Models # @abstract - # class EmbeddingModel < OpenAI::Enum TEXT_EMBEDDING_ADA_002 = :"text-embedding-ada-002" TEXT_EMBEDDING_3_SMALL = :"text-embedding-3-small" diff --git a/lib/openai/models/file_chunking_strategy.rb b/lib/openai/models/file_chunking_strategy.rb index 948ebefc..24c4dd7e 100644 --- a/lib/openai/models/file_chunking_strategy.rb +++ b/lib/openai/models/file_chunking_strategy.rb @@ -12,6 +12,12 @@ class FileChunkingStrategy < OpenAI::Union # This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. variant :other, -> { OpenAI::Models::OtherFileChunkingStrategyObject } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject)] + # def variants; end + # end end end end diff --git a/lib/openai/models/file_chunking_strategy_param.rb b/lib/openai/models/file_chunking_strategy_param.rb index 77ca2e6a..d9e6a634 100644 --- a/lib/openai/models/file_chunking_strategy_param.rb +++ b/lib/openai/models/file_chunking_strategy_param.rb @@ -14,6 +14,12 @@ class FileChunkingStrategyParam < OpenAI::Union # Customize your own chunking strategy by setting chunk size and chunk overlap. variant :static, -> { OpenAI::Models::StaticFileChunkingStrategyObjectParam } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam)] + # def variants; end + # end end end end diff --git a/lib/openai/models/fine_tuning/fine_tuning_job.rb b/lib/openai/models/fine_tuning/fine_tuning_job.rb index 86996d90..68876089 100644 --- a/lib/openai/models/fine_tuning/fine_tuning_job.rb +++ b/lib/openai/models/fine_tuning/fine_tuning_job.rb @@ -280,6 +280,12 @@ class BatchSize < OpenAI::Union variant const: :auto variant Integer + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Integer)] + # def variants; end + # end end # @abstract @@ -290,6 +296,12 @@ class LearningRateMultiplier < OpenAI::Union variant const: :auto variant Float + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Float)] + # def variants; end + # end end # @abstract @@ -300,6 +312,12 @@ class NEpochs < OpenAI::Union variant const: :auto variant Integer + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Integer)] + # def variants; end + # end end end @@ -449,6 +467,12 @@ class BatchSize < OpenAI::Union variant const: :auto variant Integer + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Integer)] + # def variants; end + # end end # @abstract @@ -459,6 +483,12 @@ class Beta < OpenAI::Union variant const: :auto variant Float + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Float)] + # def variants; end + # end end # @abstract @@ -469,6 +499,12 @@ class LearningRateMultiplier < OpenAI::Union variant const: :auto variant Float + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Float)] + # def variants; end + # end end # @abstract @@ -479,6 +515,12 @@ class NEpochs < OpenAI::Union variant const: :auto variant Integer + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Integer)] + # def variants; end + # end end end end @@ -560,6 +602,12 @@ class BatchSize < OpenAI::Union variant const: :auto variant Integer + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Integer)] + # def variants; end + # end end # @abstract @@ -570,6 +618,12 @@ class LearningRateMultiplier < OpenAI::Union variant const: :auto variant Float + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Float)] + # def variants; end + # end end # @abstract @@ -580,6 +634,12 @@ class NEpochs < OpenAI::Union variant const: :auto variant Integer + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Integer)] + # def variants; end + # end end end end diff --git a/lib/openai/models/fine_tuning/job_create_params.rb b/lib/openai/models/fine_tuning/job_create_params.rb index 926e7f82..6900ac01 100644 --- a/lib/openai/models/fine_tuning/job_create_params.rb +++ b/lib/openai/models/fine_tuning/job_create_params.rb @@ -12,7 +12,7 @@ class JobCreateParams < OpenAI::BaseModel # The name of the model to fine-tune. You can select one of the # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). # - # @return [String, Symbol, OpenAI::Models::FineTuning::JobCreateParams::Model::Preset] + # @return [String, Symbol] required :model, union: -> { OpenAI::Models::FineTuning::JobCreateParams::Model } # @!attribute training_file @@ -113,7 +113,7 @@ class JobCreateParams < OpenAI::BaseModel optional :validation_file, String, nil?: true # @!parse - # # @param model [String, Symbol, OpenAI::Models::FineTuning::JobCreateParams::Model::Preset] + # # @param model [String, Symbol] # # @param training_file [String] # # @param hyperparameters [OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters] # # @param integrations [Array, nil] @@ -147,28 +147,33 @@ class JobCreateParams < OpenAI::BaseModel # The name of the model to fine-tune. You can select one of the # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). class Model < OpenAI::Union + # @!group + + BABBAGE_002 = :"babbage-002" + DAVINCI_002 = :"davinci-002" + GPT_3_5_TURBO = :"gpt-3.5-turbo" + GPT_4O_MINI = :"gpt-4o-mini" + + # @!endgroup + variant String - # The name of the model to fine-tune. You can select one of the - # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). - variant enum: -> { OpenAI::Models::FineTuning::JobCreateParams::Model::Preset } + variant const: OpenAI::Models::FineTuning::JobCreateParams::Model::BABBAGE_002 - # @abstract - # - # The name of the model to fine-tune. You can select one of the - # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). - class Preset < OpenAI::Enum - BABBAGE_002 = :"babbage-002" - DAVINCI_002 = :"davinci-002" - GPT_3_5_TURBO = :"gpt-3.5-turbo" - GPT_4O_MINI = :"gpt-4o-mini" + variant const: OpenAI::Models::FineTuning::JobCreateParams::Model::DAVINCI_002 - finalize! - end + variant const: OpenAI::Models::FineTuning::JobCreateParams::Model::GPT_3_5_TURBO + + variant const: OpenAI::Models::FineTuning::JobCreateParams::Model::GPT_4O_MINI + + # @!parse + # class << self + # # @return [Array(String, Symbol)] + # def variants; end + # end end # @deprecated - # class Hyperparameters < OpenAI::BaseModel # @!attribute [r] batch_size # Number of examples in each batch. A larger batch size means that model @@ -225,6 +230,12 @@ class BatchSize < OpenAI::Union variant const: :auto variant Integer + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Integer)] + # def variants; end + # end end # @abstract @@ -235,6 +246,12 @@ class LearningRateMultiplier < OpenAI::Union variant const: :auto variant Float + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Float)] + # def variants; end + # end end # @abstract @@ -245,6 +262,12 @@ class NEpochs < OpenAI::Union variant const: :auto variant Integer + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Integer)] + # def variants; end + # end end end @@ -456,6 +479,12 @@ class BatchSize < OpenAI::Union variant const: :auto variant Integer + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Integer)] + # def variants; end + # end end # @abstract @@ -466,6 +495,12 @@ class Beta < OpenAI::Union variant const: :auto variant Float + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Float)] + # def variants; end + # end end # @abstract @@ -476,6 +511,12 @@ class LearningRateMultiplier < OpenAI::Union variant const: :auto variant Float + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Float)] + # def variants; end + # end end # @abstract @@ -486,6 +527,12 @@ class NEpochs < OpenAI::Union variant const: :auto variant Integer + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Integer)] + # def variants; end + # end end end end @@ -567,6 +614,12 @@ class BatchSize < OpenAI::Union variant const: :auto variant Integer + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Integer)] + # def variants; end + # end end # @abstract @@ -577,6 +630,12 @@ class LearningRateMultiplier < OpenAI::Union variant const: :auto variant Float + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Float)] + # def variants; end + # end end # @abstract @@ -587,6 +646,12 @@ class NEpochs < OpenAI::Union variant const: :auto variant Integer + + # @!parse + # class << self + # # @return [Array(Symbol, :auto, Integer)] + # def variants; end + # end end end end diff --git a/lib/openai/models/image_create_variation_params.rb b/lib/openai/models/image_create_variation_params.rb index df16e7e4..d4546f63 100644 --- a/lib/openai/models/image_create_variation_params.rb +++ b/lib/openai/models/image_create_variation_params.rb @@ -79,6 +79,12 @@ class Model < OpenAI::Union # The model to use for image generation. Only `dall-e-2` is supported at this time. variant enum: -> { OpenAI::Models::ImageModel } + + # @!parse + # class << self + # # @return [Array(String, Symbol, OpenAI::Models::ImageModel)] + # def variants; end + # end end # @abstract diff --git a/lib/openai/models/image_edit_params.rb b/lib/openai/models/image_edit_params.rb index 87317727..14c8b8cf 100644 --- a/lib/openai/models/image_edit_params.rb +++ b/lib/openai/models/image_edit_params.rb @@ -110,6 +110,12 @@ class Model < OpenAI::Union # The model to use for image generation. Only `dall-e-2` is supported at this time. variant enum: -> { OpenAI::Models::ImageModel } + + # @!parse + # class << self + # # @return [Array(String, Symbol, OpenAI::Models::ImageModel)] + # def variants; end + # end end # @abstract diff --git a/lib/openai/models/image_generate_params.rb b/lib/openai/models/image_generate_params.rb index 9f1f00e2..577665bd 100644 --- a/lib/openai/models/image_generate_params.rb +++ b/lib/openai/models/image_generate_params.rb @@ -112,6 +112,12 @@ class Model < OpenAI::Union # The model to use for image generation. variant enum: -> { OpenAI::Models::ImageModel } + + # @!parse + # class << self + # # @return [Array(String, Symbol, OpenAI::Models::ImageModel)] + # def variants; end + # end end # @abstract diff --git a/lib/openai/models/image_model.rb b/lib/openai/models/image_model.rb index c9c62780..e49e6699 100644 --- a/lib/openai/models/image_model.rb +++ b/lib/openai/models/image_model.rb @@ -3,7 +3,6 @@ module OpenAI module Models # @abstract - # class ImageModel < OpenAI::Enum DALL_E_2 = :"dall-e-2" DALL_E_3 = :"dall-e-3" diff --git a/lib/openai/models/moderation.rb b/lib/openai/models/moderation.rb index 98a25176..0f3c5a90 100644 --- a/lib/openai/models/moderation.rb +++ b/lib/openai/models/moderation.rb @@ -310,7 +310,6 @@ class CategoryAppliedInputTypes < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Harassment < OpenAI::Enum TEXT = :text @@ -318,7 +317,6 @@ class Harassment < OpenAI::Enum end # @abstract - # class HarassmentThreatening < OpenAI::Enum TEXT = :text @@ -326,7 +324,6 @@ class HarassmentThreatening < OpenAI::Enum end # @abstract - # class Hate < OpenAI::Enum TEXT = :text @@ -334,7 +331,6 @@ class Hate < OpenAI::Enum end # @abstract - # class HateThreatening < OpenAI::Enum TEXT = :text @@ -342,7 +338,6 @@ class HateThreatening < OpenAI::Enum end # @abstract - # class Illicit < OpenAI::Enum TEXT = :text @@ -350,7 +345,6 @@ class Illicit < OpenAI::Enum end # @abstract - # class IllicitViolent < OpenAI::Enum TEXT = :text @@ -358,7 +352,6 @@ class IllicitViolent < OpenAI::Enum end # @abstract - # class SelfHarm < OpenAI::Enum TEXT = :text IMAGE = :image @@ -367,7 +360,6 @@ class SelfHarm < OpenAI::Enum end # @abstract - # class SelfHarmInstruction < OpenAI::Enum TEXT = :text IMAGE = :image @@ -376,7 +368,6 @@ class SelfHarmInstruction < OpenAI::Enum end # @abstract - # class SelfHarmIntent < OpenAI::Enum TEXT = :text IMAGE = :image @@ -385,7 +376,6 @@ class SelfHarmIntent < OpenAI::Enum end # @abstract - # class Sexual < OpenAI::Enum TEXT = :text IMAGE = :image @@ -394,7 +384,6 @@ class Sexual < OpenAI::Enum end # @abstract - # class SexualMinor < OpenAI::Enum TEXT = :text @@ -402,7 +391,6 @@ class SexualMinor < OpenAI::Enum end # @abstract - # class Violence < OpenAI::Enum TEXT = :text IMAGE = :image @@ -411,7 +399,6 @@ class Violence < OpenAI::Enum end # @abstract - # class ViolenceGraphic < OpenAI::Enum TEXT = :text IMAGE = :image diff --git a/lib/openai/models/moderation_create_params.rb b/lib/openai/models/moderation_create_params.rb index 17a49eb2..e0789618 100644 --- a/lib/openai/models/moderation_create_params.rb +++ b/lib/openai/models/moderation_create_params.rb @@ -53,6 +53,12 @@ class Input < OpenAI::Union # An array of multi-modal inputs to the moderation model. variant OpenAI::Models::ModerationCreateParams::Input::ModerationMultiModalInputArray + + # @!parse + # class << self + # # @return [Array(String, Array, Array)] + # def variants; end + # end end # @abstract @@ -68,6 +74,12 @@ class Model < OpenAI::Union # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and learn about # available models [here](https://platform.openai.com/docs/models#moderation). variant enum: -> { OpenAI::Models::ModerationModel } + + # @!parse + # class << self + # # @return [Array(String, Symbol, OpenAI::Models::ModerationModel)] + # def variants; end + # end end end end diff --git a/lib/openai/models/moderation_model.rb b/lib/openai/models/moderation_model.rb index 2abe4a13..4089ad86 100644 --- a/lib/openai/models/moderation_model.rb +++ b/lib/openai/models/moderation_model.rb @@ -3,7 +3,6 @@ module OpenAI module Models # @abstract - # class ModerationModel < OpenAI::Enum OMNI_MODERATION_LATEST = :"omni-moderation-latest" OMNI_MODERATION_2024_09_26 = :"omni-moderation-2024-09-26" diff --git a/lib/openai/models/moderation_multi_modal_input.rb b/lib/openai/models/moderation_multi_modal_input.rb index d5f91171..47271a66 100644 --- a/lib/openai/models/moderation_multi_modal_input.rb +++ b/lib/openai/models/moderation_multi_modal_input.rb @@ -13,6 +13,12 @@ class ModerationMultiModalInput < OpenAI::Union # An object describing text to classify. variant :text, -> { OpenAI::Models::ModerationTextInput } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)] + # def variants; end + # end end end end diff --git a/lib/openai/models/responses/easy_input_message.rb b/lib/openai/models/responses/easy_input_message.rb index 280bc258..e4effc89 100644 --- a/lib/openai/models/responses/easy_input_message.rb +++ b/lib/openai/models/responses/easy_input_message.rb @@ -54,6 +54,12 @@ class Content < OpenAI::Union # A list of one or many input items to the model, containing different content # types. variant -> { OpenAI::Models::Responses::ResponseInputMessageContentList } + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end # @abstract diff --git a/lib/openai/models/responses/file_search_tool.rb b/lib/openai/models/responses/file_search_tool.rb index 5c138fa3..c2ba4177 100644 --- a/lib/openai/models/responses/file_search_tool.rb +++ b/lib/openai/models/responses/file_search_tool.rb @@ -71,6 +71,12 @@ class Filters < OpenAI::Union # Combine multiple filters using `and` or `or`. variant -> { OpenAI::Models::CompoundFilter } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter)] + # def variants; end + # end end class RankingOptions < OpenAI::BaseModel diff --git a/lib/openai/models/responses/response.rb b/lib/openai/models/responses/response.rb index 4ff08aa8..98b2c145 100644 --- a/lib/openai/models/responses/response.rb +++ b/lib/openai/models/responses/response.rb @@ -315,6 +315,12 @@ class Model < OpenAI::Union # characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models) # to browse and compare available models. variant enum: -> { OpenAI::Models::ChatModel } + + # @!parse + # class << self + # # @return [Array(String, Symbol, OpenAI::Models::ChatModel)] + # def variants; end + # end end # @abstract @@ -339,6 +345,12 @@ class ToolChoice < OpenAI::Union # Use this option to force the model to call a specific function. variant -> { OpenAI::Models::Responses::ToolChoiceFunction } + + # @!parse + # class << self + # # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction)] + # def variants; end + # end end # @abstract diff --git a/lib/openai/models/responses/response_code_interpreter_tool_call.rb b/lib/openai/models/responses/response_code_interpreter_tool_call.rb index 25181e47..e93e62cf 100644 --- a/lib/openai/models/responses/response_code_interpreter_tool_call.rb +++ b/lib/openai/models/responses/response_code_interpreter_tool_call.rb @@ -129,6 +129,12 @@ class File < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Logs, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files)] + # def variants; end + # end end # @abstract diff --git a/lib/openai/models/responses/response_computer_tool_call.rb b/lib/openai/models/responses/response_computer_tool_call.rb index 268cd184..20bd85ae 100644 --- a/lib/openai/models/responses/response_computer_tool_call.rb +++ b/lib/openai/models/responses/response_computer_tool_call.rb @@ -399,6 +399,12 @@ class Wait < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait)] + # def variants; end + # end end class PendingSafetyCheck < OpenAI::BaseModel diff --git a/lib/openai/models/responses/response_content.rb b/lib/openai/models/responses/response_content.rb index 379aaed8..72456bac 100644 --- a/lib/openai/models/responses/response_content.rb +++ b/lib/openai/models/responses/response_content.rb @@ -21,6 +21,12 @@ class ResponseContent < OpenAI::Union # A refusal from the model. variant -> { OpenAI::Models::Responses::ResponseOutputRefusal } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)] + # def variants; end + # end end end end diff --git a/lib/openai/models/responses/response_content_part_added_event.rb b/lib/openai/models/responses/response_content_part_added_event.rb index 094f7f60..1fa8be8b 100644 --- a/lib/openai/models/responses/response_content_part_added_event.rb +++ b/lib/openai/models/responses/response_content_part_added_event.rb @@ -58,6 +58,12 @@ class Part < OpenAI::Union # A refusal from the model. variant :refusal, -> { OpenAI::Models::Responses::ResponseOutputRefusal } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)] + # def variants; end + # end end end end diff --git a/lib/openai/models/responses/response_content_part_done_event.rb b/lib/openai/models/responses/response_content_part_done_event.rb index 33a8cedb..0911d697 100644 --- a/lib/openai/models/responses/response_content_part_done_event.rb +++ b/lib/openai/models/responses/response_content_part_done_event.rb @@ -58,6 +58,12 @@ class Part < OpenAI::Union # A refusal from the model. variant :refusal, -> { OpenAI::Models::Responses::ResponseOutputRefusal } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)] + # def variants; end + # end end end end diff --git a/lib/openai/models/responses/response_create_params.rb b/lib/openai/models/responses/response_create_params.rb index 94db6d99..4c6c485b 100644 --- a/lib/openai/models/responses/response_create_params.rb +++ b/lib/openai/models/responses/response_create_params.rb @@ -261,6 +261,12 @@ class Input < OpenAI::Union # A list of one or many input items to the model, containing # different content types. variant -> { OpenAI::Models::Responses::ResponseInput } + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end # @abstract @@ -278,6 +284,12 @@ class Model < OpenAI::Union # characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models) # to browse and compare available models. variant enum: -> { OpenAI::Models::ChatModel } + + # @!parse + # class << self + # # @return [Array(String, Symbol, OpenAI::Models::ChatModel)] + # def variants; end + # end end # @abstract @@ -302,6 +314,12 @@ class ToolChoice < OpenAI::Union # Use this option to force the model to call a specific function. variant -> { OpenAI::Models::Responses::ToolChoiceFunction } + + # @!parse + # class << self + # # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction)] + # def variants; end + # end end # @abstract diff --git a/lib/openai/models/responses/response_file_search_tool_call.rb b/lib/openai/models/responses/response_file_search_tool_call.rb index df500ca6..8054ec60 100644 --- a/lib/openai/models/responses/response_file_search_tool_call.rb +++ b/lib/openai/models/responses/response_file_search_tool_call.rb @@ -131,13 +131,18 @@ class Result < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Attribute < OpenAI::Union variant String variant Float variant OpenAI::BooleanModel + + # @!parse + # class << self + # # @return [Array(String, Float, Boolean)] + # def variants; end + # end end end end diff --git a/lib/openai/models/responses/response_format_text_config.rb b/lib/openai/models/responses/response_format_text_config.rb index beac92f7..6749d6bc 100644 --- a/lib/openai/models/responses/response_format_text_config.rb +++ b/lib/openai/models/responses/response_format_text_config.rb @@ -33,6 +33,12 @@ class ResponseFormatTextConfig < OpenAI::Union # model will not generate JSON without a system or user message instructing it # to do so. variant :json_object, -> { OpenAI::Models::ResponseFormatJSONObject } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject)] + # def variants; end + # end end end end diff --git a/lib/openai/models/responses/response_input_content.rb b/lib/openai/models/responses/response_input_content.rb index 1ce3a1ca..8d6226d4 100644 --- a/lib/openai/models/responses/response_input_content.rb +++ b/lib/openai/models/responses/response_input_content.rb @@ -17,6 +17,12 @@ class ResponseInputContent < OpenAI::Union # A file input to the model. variant :input_file, -> { OpenAI::Models::Responses::ResponseInputFile } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile)] + # def variants; end + # end end end end diff --git a/lib/openai/models/responses/response_input_item.rb b/lib/openai/models/responses/response_input_item.rb index c94a979c..cd5d395b 100644 --- a/lib/openai/models/responses/response_input_item.rb +++ b/lib/openai/models/responses/response_input_item.rb @@ -379,6 +379,12 @@ class ItemReference < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ItemReference)] + # def variants; end + # end end end end diff --git a/lib/openai/models/responses/response_item_list.rb b/lib/openai/models/responses/response_item_list.rb index 595ec411..7890dc3a 100644 --- a/lib/openai/models/responses/response_item_list.rb +++ b/lib/openai/models/responses/response_item_list.rb @@ -370,6 +370,12 @@ class Status < OpenAI::Enum finalize! end end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Responses::ResponseItemList::Data::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput)] + # def variants; end + # end end end end diff --git a/lib/openai/models/responses/response_output_item.rb b/lib/openai/models/responses/response_output_item.rb index 2a71d21e..2ed933fd 100644 --- a/lib/openai/models/responses/response_output_item.rb +++ b/lib/openai/models/responses/response_output_item.rb @@ -31,6 +31,12 @@ class ResponseOutputItem < OpenAI::Union # A description of the chain of thought used by a reasoning model while generating # a response. variant :reasoning, -> { OpenAI::Models::Responses::ResponseReasoningItem } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem)] + # def variants; end + # end end end end diff --git a/lib/openai/models/responses/response_output_message.rb b/lib/openai/models/responses/response_output_message.rb index 7fcb4c6a..1dee0300 100644 --- a/lib/openai/models/responses/response_output_message.rb +++ b/lib/openai/models/responses/response_output_message.rb @@ -60,6 +60,12 @@ class Content < OpenAI::Union # A refusal from the model. variant :refusal, -> { OpenAI::Models::Responses::ResponseOutputRefusal } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)] + # def variants; end + # end end # @abstract diff --git a/lib/openai/models/responses/response_output_text.rb b/lib/openai/models/responses/response_output_text.rb index 8e83fef4..175d5eda 100644 --- a/lib/openai/models/responses/response_output_text.rb +++ b/lib/openai/models/responses/response_output_text.rb @@ -155,6 +155,12 @@ class FilePath < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath)] + # def variants; end + # end end end end diff --git a/lib/openai/models/responses/response_stream_event.rb b/lib/openai/models/responses/response_stream_event.rb index 571c3d7a..85fe7a25 100644 --- a/lib/openai/models/responses/response_stream_event.rb +++ b/lib/openai/models/responses/response_stream_event.rb @@ -120,6 +120,12 @@ class ResponseStreamEvent < OpenAI::Union # Emitted when a web search call is executing. variant :"response.web_search_call.searching", -> { OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent)] + # def variants; end + # end end end end diff --git a/lib/openai/models/responses/response_text_annotation_delta_event.rb b/lib/openai/models/responses/response_text_annotation_delta_event.rb index 27041443..4d980266 100644 --- a/lib/openai/models/responses/response_text_annotation_delta_event.rb +++ b/lib/openai/models/responses/response_text_annotation_delta_event.rb @@ -189,6 +189,12 @@ class FilePath < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void end + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath)] + # def variants; end + # end end end end diff --git a/lib/openai/models/responses/tool.rb b/lib/openai/models/responses/tool.rb index a97afcb0..9093989e 100644 --- a/lib/openai/models/responses/tool.rb +++ b/lib/openai/models/responses/tool.rb @@ -26,6 +26,12 @@ class Tool < OpenAI::Union # This tool searches the web for relevant results to use in a response. # Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search). variant -> { OpenAI::Models::Responses::WebSearchTool } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::WebSearchTool)] + # def variants; end + # end end end end diff --git a/lib/openai/models/vector_store_search_params.rb b/lib/openai/models/vector_store_search_params.rb index ff451753..30d37581 100644 --- a/lib/openai/models/vector_store_search_params.rb +++ b/lib/openai/models/vector_store_search_params.rb @@ -85,6 +85,12 @@ class Query < OpenAI::Union variant String variant OpenAI::Models::VectorStoreSearchParams::Query::StringArray + + # @!parse + # class << self + # # @return [Array(String, Array)] + # def variants; end + # end end # @abstract @@ -96,6 +102,12 @@ class Filters < OpenAI::Union # Combine multiple filters using `and` or `or`. variant -> { OpenAI::Models::CompoundFilter } + + # @!parse + # class << self + # # @return [Array(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter)] + # def variants; end + # end end class RankingOptions < OpenAI::BaseModel @@ -128,7 +140,6 @@ class RankingOptions < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Ranker < OpenAI::Enum AUTO = :auto DEFAULT_2024_11_15 = :"default-2024-11-15" diff --git a/lib/openai/models/vector_store_search_response.rb b/lib/openai/models/vector_store_search_response.rb index aa09a30a..233cdad0 100644 --- a/lib/openai/models/vector_store_search_response.rb +++ b/lib/openai/models/vector_store_search_response.rb @@ -51,13 +51,18 @@ class VectorStoreSearchResponse < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Attribute < OpenAI::Union variant String variant Float variant OpenAI::BooleanModel + + # @!parse + # class << self + # # @return [Array(String, Float, Boolean)] + # def variants; end + # end end class Content < OpenAI::BaseModel diff --git a/lib/openai/models/vector_stores/file_batch_create_params.rb b/lib/openai/models/vector_stores/file_batch_create_params.rb index f5859e61..e35057be 100644 --- a/lib/openai/models/vector_stores/file_batch_create_params.rb +++ b/lib/openai/models/vector_stores/file_batch_create_params.rb @@ -50,13 +50,18 @@ class FileBatchCreateParams < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Attribute < OpenAI::Union variant String variant Float variant OpenAI::BooleanModel + + # @!parse + # class << self + # # @return [Array(String, Float, Boolean)] + # def variants; end + # end end end end diff --git a/lib/openai/models/vector_stores/file_create_params.rb b/lib/openai/models/vector_stores/file_create_params.rb index c91648aa..3e7c7817 100644 --- a/lib/openai/models/vector_stores/file_create_params.rb +++ b/lib/openai/models/vector_stores/file_create_params.rb @@ -50,13 +50,18 @@ class FileCreateParams < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Attribute < OpenAI::Union variant String variant Float variant OpenAI::BooleanModel + + # @!parse + # class << self + # # @return [Array(String, Float, Boolean)] + # def variants; end + # end end end end diff --git a/lib/openai/models/vector_stores/file_update_params.rb b/lib/openai/models/vector_stores/file_update_params.rb index 047e821f..6b9100d4 100644 --- a/lib/openai/models/vector_stores/file_update_params.rb +++ b/lib/openai/models/vector_stores/file_update_params.rb @@ -35,13 +35,18 @@ class FileUpdateParams < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Attribute < OpenAI::Union variant String variant Float variant OpenAI::BooleanModel + + # @!parse + # class << self + # # @return [Array(String, Float, Boolean)] + # def variants; end + # end end end end diff --git a/lib/openai/models/vector_stores/vector_store_file.rb b/lib/openai/models/vector_stores/vector_store_file.rb index 27d1234a..5aa034c1 100644 --- a/lib/openai/models/vector_stores/vector_store_file.rb +++ b/lib/openai/models/vector_stores/vector_store_file.rb @@ -156,13 +156,18 @@ class Status < OpenAI::Enum end # @abstract - # class Attribute < OpenAI::Union variant String variant Float variant OpenAI::BooleanModel + + # @!parse + # class << self + # # @return [Array(String, Float, Boolean)] + # def variants; end + # end end end end diff --git a/lib/openai/page.rb b/lib/openai/page.rb index fa3bd198..40ccc49a 100644 --- a/lib/openai/page.rb +++ b/lib/openai/page.rb @@ -17,26 +17,31 @@ module OpenAI # # @example # ```ruby - # models = page.to_enum.take(2) + # models = page + # .to_enum + # .lazy + # .select { _1.object_id.even? } + # .map(&:itself) + # .take(2) + # .to_a # # models => Array # ``` class Page include OpenAI::BasePage - # @return [Array] + # @return [Array, nil] attr_accessor :data # @return [String] attr_accessor :object - # @private + # @api private # # @param client [OpenAI::BaseClient] # @param req [Hash{Symbol=>Object}] # @param headers [Hash{String=>String}, Net::HTTPHeader] # @param page_data [Array] - # def initialize(client:, req:, headers:, page_data:) super model = req.fetch(:model) @@ -48,7 +53,7 @@ def initialize(client:, req:, headers:, page_data:) end case page_data - in {object: String | nil => object} + in {object: String => object} @object = object else end @@ -61,13 +66,11 @@ def next_page? # @raise [OpenAI::HTTP::Error] # @return [OpenAI::Page] - # def next_page RuntimeError.new("No more pages available.") end # @param blk [Proc] - # def auto_paging_each(&blk) unless block_given? raise ArgumentError.new("A block must be given to ##{__method__}") @@ -81,7 +84,6 @@ def auto_paging_each(&blk) end # @return [String] - # def inspect "#<#{self.class}:0x#{object_id.to_s(16)} data=#{data.inspect} object=#{object.inspect}>" end diff --git a/lib/openai/pooled_net_requester.rb b/lib/openai/pooled_net_requester.rb index d1a15ffe..4b0ae742 100644 --- a/lib/openai/pooled_net_requester.rb +++ b/lib/openai/pooled_net_requester.rb @@ -1,16 +1,14 @@ # frozen_string_literal: true module OpenAI - # @private - # + # @api private class PooledNetRequester class << self - # @private + # @api private # # @param url [URI::Generic] # # @return [Net::HTTP] - # def connect(url) port = case [url.port, url.scheme] @@ -28,17 +26,16 @@ def connect(url) end end - # @private + # @api private # # @param conn [Net::HTTP] # @param deadline [Float] - # def calibrate_socket_timeout(conn, deadline) timeout = deadline - OpenAI::Util.monotonic_secs conn.open_timeout = conn.read_timeout = conn.write_timeout = conn.continue_timeout = timeout end - # @private + # @api private # # @param request [Hash{Symbol=>Object}] . # @@ -51,7 +48,6 @@ def calibrate_socket_timeout(conn, deadline) # @param blk [Proc] # # @return [Net::HTTPGenericRequest] - # def build_request(request, &) method, url, headers, body = request.fetch_values(:method, :url, :headers, :body) req = Net::HTTPGenericRequest.new( @@ -65,6 +61,7 @@ def build_request(request, &) case body in nil + nil in String req["content-length"] ||= body.bytesize.to_s unless req["transfer-encoding"] req.body_stream = OpenAI::Util::ReadIOAdapter.new(body, &) @@ -80,13 +77,14 @@ def build_request(request, &) end end - # @private + # @api private # # @param url [URI::Generic] + # @param deadline [Float] # @param blk [Proc] - # - private def with_pool(url, &) + private def with_pool(url, deadline:, &blk) origin = OpenAI::Util.uri_origin(url) + timeout = deadline - OpenAI::Util.monotonic_secs pool = @mutex.synchronize do @pools[origin] ||= ConnectionPool.new(size: @size) do @@ -94,10 +92,10 @@ def build_request(request, &) end end - pool.with(&) + pool.with(timeout: timeout, &blk) end - # @private + # @api private # # @param request [Hash{Symbol=>Object}] . # @@ -111,15 +109,14 @@ def build_request(request, &) # # @option request [Float] :deadline # - # @return [Array(Net::HTTPResponse, Enumerable)] - # + # @return [Array(Integer, Net::HTTPResponse, Enumerable)] def execute(request) url, deadline = request.fetch_values(:url, :deadline) eof = false finished = false enum = Enumerator.new do |y| - with_pool(url) do |conn| + with_pool(url, deadline: deadline) do |conn| next if finished req = self.class.build_request(request) do @@ -131,7 +128,7 @@ def execute(request) self.class.calibrate_socket_timeout(conn, deadline) conn.request(req) do |rsp| - y << [conn, rsp] + y << [conn, req, rsp] break if finished rsp.read_body do |bytes| @@ -143,9 +140,11 @@ def execute(request) eof = true end end + rescue Timeout::Error + raise OpenAI::APITimeoutError end - conn, response = enum.next + conn, _, response = enum.next body = OpenAI::Util.fused_enum(enum, external: true) do finished = true tap do @@ -155,13 +154,12 @@ def execute(request) end conn.finish if !eof && conn&.started? end - [response, (response.body = body)] + [Integer(response.code), response, (response.body = body)] end - # @private + # @api private # # @param size [Integer] - # def initialize(size: Etc.nprocessors) @mutex = Mutex.new @size = size diff --git a/lib/openai/request_options.rb b/lib/openai/request_options.rb index befdf96f..a3245591 100644 --- a/lib/openai/request_options.rb +++ b/lib/openai/request_options.rb @@ -1,10 +1,7 @@ # frozen_string_literal: true module OpenAI - # @private - # - # @abstract - # + # @api private module RequestParameters # @!parse # # Options to specify HTTP behaviour for this request. @@ -12,7 +9,6 @@ module RequestParameters # attr_accessor :request_options # @param mod [Module] - # def self.included(mod) return unless mod <= OpenAI::BaseModel @@ -20,15 +16,13 @@ def self.included(mod) mod.optional(:request_options, OpenAI::RequestOptions) end - # @private - # + # @api private module Converter - # @private + # @api private # # @param params [Object] # # @return [Array(Object, Hash{Symbol=>Object})] - # def dump_request(params) case (dumped = dump(params)) in Hash @@ -46,12 +40,11 @@ def dump_request(params) # When making a request, you can pass an actual {RequestOptions} instance, or # simply pass a Hash with symbol keys matching the attributes on this class. class RequestOptions < OpenAI::BaseModel - # @private + # @api private # # @param opts [OpenAI::RequestOptions, Hash{Symbol=>Object}] # # @raise [ArgumentError] - # def self.validate!(opts) case opts in OpenAI::RequestOptions | Hash diff --git a/lib/openai/resources/audio.rb b/lib/openai/resources/audio.rb index db698a4b..e82c41f4 100644 --- a/lib/openai/resources/audio.rb +++ b/lib/openai/resources/audio.rb @@ -13,7 +13,6 @@ class Audio attr_reader :speech # @param client [OpenAI::Client] - # def initialize(client:) @client = client @transcriptions = OpenAI::Resources::Audio::Transcriptions.new(client: client) diff --git a/lib/openai/resources/audio/speech.rb b/lib/openai/resources/audio/speech.rb index dce52041..9c5d8284 100644 --- a/lib/openai/resources/audio/speech.rb +++ b/lib/openai/resources/audio/speech.rb @@ -27,7 +27,6 @@ class Speech # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [Object] - # def create(params) parsed, options = OpenAI::Models::Audio::SpeechCreateParams.dump_request(params) @client.request( @@ -41,7 +40,6 @@ def create(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/audio/transcriptions.rb b/lib/openai/resources/audio/transcriptions.rb index 4c158734..9e291700 100644 --- a/lib/openai/resources/audio/transcriptions.rb +++ b/lib/openai/resources/audio/transcriptions.rb @@ -41,7 +41,6 @@ class Transcriptions # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Audio::Transcription, OpenAI::Models::Audio::TranscriptionVerbose] - # def create(params) parsed, options = OpenAI::Models::Audio::TranscriptionCreateParams.dump_request(params) @client.request( @@ -55,7 +54,6 @@ def create(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/audio/translations.rb b/lib/openai/resources/audio/translations.rb index 4e1431c1..c1de4f8e 100644 --- a/lib/openai/resources/audio/translations.rb +++ b/lib/openai/resources/audio/translations.rb @@ -31,7 +31,6 @@ class Translations # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Audio::Translation, OpenAI::Models::Audio::TranslationVerbose] - # def create(params) parsed, options = OpenAI::Models::Audio::TranslationCreateParams.dump_request(params) @client.request( @@ -45,7 +44,6 @@ def create(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/batches.rb b/lib/openai/resources/batches.rb index 751d6548..8f0799eb 100644 --- a/lib/openai/resources/batches.rb +++ b/lib/openai/resources/batches.rb @@ -35,7 +35,6 @@ class Batches # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Batch] - # def create(params) parsed, options = OpenAI::Models::BatchCreateParams.dump_request(params) @client.request( @@ -56,7 +55,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Batch] - # def retrieve(batch_id, params = {}) @client.request( method: :get, @@ -81,7 +79,6 @@ def retrieve(batch_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(params = {}) parsed, options = OpenAI::Models::BatchListParams.dump_request(params) @client.request( @@ -105,7 +102,6 @@ def list(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Batch] - # def cancel(batch_id, params = {}) @client.request( method: :post, @@ -116,7 +112,6 @@ def cancel(batch_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/beta.rb b/lib/openai/resources/beta.rb index 4eee364b..c1b7273a 100644 --- a/lib/openai/resources/beta.rb +++ b/lib/openai/resources/beta.rb @@ -10,7 +10,6 @@ class Beta attr_reader :threads # @param client [OpenAI::Client] - # def initialize(client:) @client = client @assistants = OpenAI::Resources::Beta::Assistants.new(client: client) diff --git a/lib/openai/resources/beta/assistants.rb b/lib/openai/resources/beta/assistants.rb index 45d0010d..8fbb01b9 100644 --- a/lib/openai/resources/beta/assistants.rb +++ b/lib/openai/resources/beta/assistants.rb @@ -78,7 +78,6 @@ class Assistants # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Assistant] - # def create(params) parsed, options = OpenAI::Models::Beta::AssistantCreateParams.dump_request(params) @client.request( @@ -99,7 +98,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Assistant] - # def retrieve(assistant_id, params = {}) @client.request( method: :get, @@ -127,7 +125,7 @@ def retrieve(assistant_id, params = {}) # Keys are strings with a maximum length of 64 characters. Values are strings with # a maximum length of 512 characters. # - # @option params [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model::AssistantSupportedModels] :model ID of the model to use. You can use the + # @option params [String, Symbol] :model ID of the model to use. You can use the # [List models](https://platform.openai.com/docs/api-reference/models/list) API to # see all of your available models, or see our # [Model overview](https://platform.openai.com/docs/models) for descriptions of @@ -185,7 +183,6 @@ def retrieve(assistant_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Assistant] - # def update(assistant_id, params = {}) parsed, options = OpenAI::Models::Beta::AssistantUpdateParams.dump_request(params) @client.request( @@ -220,7 +217,6 @@ def update(assistant_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(params = {}) parsed, options = OpenAI::Models::Beta::AssistantListParams.dump_request(params) @client.request( @@ -242,7 +238,6 @@ def list(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::AssistantDeleted] - # def delete(assistant_id, params = {}) @client.request( method: :delete, @@ -253,7 +248,6 @@ def delete(assistant_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/beta/threads.rb b/lib/openai/resources/beta/threads.rb index b7e58a7c..d8f2e660 100644 --- a/lib/openai/resources/beta/threads.rb +++ b/lib/openai/resources/beta/threads.rb @@ -32,7 +32,6 @@ class Threads # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Thread] - # def create(params = {}) parsed, options = OpenAI::Models::Beta::ThreadCreateParams.dump_request(params) @client.request( @@ -53,7 +52,6 @@ def create(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Thread] - # def retrieve(thread_id, params = {}) @client.request( method: :get, @@ -84,7 +82,6 @@ def retrieve(thread_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Thread] - # def update(thread_id, params = {}) parsed, options = OpenAI::Models::Beta::ThreadUpdateParams.dump_request(params) @client.request( @@ -105,7 +102,6 @@ def update(thread_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::ThreadDeleted] - # def delete(thread_id, params = {}) @client.request( method: :delete, @@ -210,7 +206,6 @@ def delete(thread_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Run] - # def create_and_run(params) parsed, options = OpenAI::Models::Beta::ThreadCreateAndRunParams.dump_request(params) parsed.delete(:stream) @@ -318,7 +313,6 @@ def create_and_run(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Stream] - # def create_and_run_streaming(params) parsed, options = OpenAI::Models::Beta::ThreadCreateAndRunParams.dump_request(params) parsed.store(:stream, true) @@ -334,7 +328,6 @@ def create_and_run_streaming(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client @runs = OpenAI::Resources::Beta::Threads::Runs.new(client: client) diff --git a/lib/openai/resources/beta/threads/messages.rb b/lib/openai/resources/beta/threads/messages.rb index 3ea2f318..4e5141de 100644 --- a/lib/openai/resources/beta/threads/messages.rb +++ b/lib/openai/resources/beta/threads/messages.rb @@ -33,7 +33,6 @@ class Messages # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Message] - # def create(thread_id, params) parsed, options = OpenAI::Models::Beta::Threads::MessageCreateParams.dump_request(params) @client.request( @@ -57,7 +56,6 @@ def create(thread_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Message] - # def retrieve(message_id, params) parsed, options = OpenAI::Models::Beta::Threads::MessageRetrieveParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -89,7 +87,6 @@ def retrieve(message_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Message] - # def update(message_id, params) parsed, options = OpenAI::Models::Beta::Threads::MessageUpdateParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -132,7 +129,6 @@ def update(message_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(thread_id, params = {}) parsed, options = OpenAI::Models::Beta::Threads::MessageListParams.dump_request(params) @client.request( @@ -156,7 +152,6 @@ def list(thread_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::MessageDeleted] - # def delete(message_id, params) parsed, options = OpenAI::Models::Beta::Threads::MessageDeleteParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -171,7 +166,6 @@ def delete(message_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/beta/threads/runs.rb b/lib/openai/resources/beta/threads/runs.rb index 34f408d7..2f3b3b12 100644 --- a/lib/openai/resources/beta/threads/runs.rb +++ b/lib/openai/resources/beta/threads/runs.rb @@ -123,7 +123,6 @@ class Runs # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Run] - # def create(thread_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunCreateParams.dump_request(params) parsed.delete(:stream) @@ -253,7 +252,6 @@ def create(thread_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Stream] - # def create_streaming(thread_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunCreateParams.dump_request(params) parsed.store(:stream, true) @@ -282,7 +280,6 @@ def create_streaming(thread_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Run] - # def retrieve(run_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunRetrieveParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -315,7 +312,6 @@ def retrieve(run_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Run] - # def update(run_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunUpdateParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -355,7 +351,6 @@ def update(run_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(thread_id, params = {}) parsed, options = OpenAI::Models::Beta::Threads::RunListParams.dump_request(params) @client.request( @@ -379,7 +374,6 @@ def list(thread_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Run] - # def cancel(run_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunCancelParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -411,7 +405,6 @@ def cancel(run_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Run] - # def submit_tool_outputs(run_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams.dump_request(params) parsed.delete(:stream) @@ -445,7 +438,6 @@ def submit_tool_outputs(run_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Stream] - # def submit_tool_outputs_streaming(run_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams.dump_request(params) parsed.store(:stream, true) @@ -464,7 +456,6 @@ def submit_tool_outputs_streaming(run_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client @steps = OpenAI::Resources::Beta::Threads::Runs::Steps.new(client: client) diff --git a/lib/openai/resources/beta/threads/runs/steps.rb b/lib/openai/resources/beta/threads/runs/steps.rb index e44fb9a5..d5c4ddb3 100644 --- a/lib/openai/resources/beta/threads/runs/steps.rb +++ b/lib/openai/resources/beta/threads/runs/steps.rb @@ -28,7 +28,6 @@ class Steps # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] - # def retrieve(step_id, params) parsed, options = OpenAI::Models::Beta::Threads::Runs::StepRetrieveParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -82,7 +81,6 @@ def retrieve(step_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(run_id, params) parsed, options = OpenAI::Models::Beta::Threads::Runs::StepListParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -99,7 +97,6 @@ def list(run_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/chat.rb b/lib/openai/resources/chat.rb index 8945e202..d5bf1e2e 100644 --- a/lib/openai/resources/chat.rb +++ b/lib/openai/resources/chat.rb @@ -7,7 +7,6 @@ class Chat attr_reader :completions # @param client [OpenAI::Client] - # def initialize(client:) @client = client @completions = OpenAI::Resources::Chat::Completions.new(client: client) diff --git a/lib/openai/resources/chat/completions.rb b/lib/openai/resources/chat/completions.rb index 7be5f0ae..94e76ede 100644 --- a/lib/openai/resources/chat/completions.rb +++ b/lib/openai/resources/chat/completions.rb @@ -213,7 +213,6 @@ class Completions # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Chat::ChatCompletion] - # def create(params) parsed, options = OpenAI::Models::Chat::CompletionCreateParams.dump_request(params) parsed.delete(:stream) @@ -432,7 +431,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Stream] - # def create_streaming(params) parsed, options = OpenAI::Models::Chat::CompletionCreateParams.dump_request(params) parsed.store(:stream, true) @@ -457,7 +455,6 @@ def create_streaming(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Chat::ChatCompletion] - # def retrieve(completion_id, params = {}) @client.request( method: :get, @@ -485,7 +482,6 @@ def retrieve(completion_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Chat::ChatCompletion] - # def update(completion_id, params) parsed, options = OpenAI::Models::Chat::CompletionUpdateParams.dump_request(params) @client.request( @@ -518,7 +514,6 @@ def update(completion_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(params = {}) parsed, options = OpenAI::Models::Chat::CompletionListParams.dump_request(params) @client.request( @@ -541,7 +536,6 @@ def list(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Chat::ChatCompletionDeleted] - # def delete(completion_id, params = {}) @client.request( method: :delete, @@ -552,7 +546,6 @@ def delete(completion_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client @messages = OpenAI::Resources::Chat::Completions::Messages.new(client: client) diff --git a/lib/openai/resources/chat/completions/messages.rb b/lib/openai/resources/chat/completions/messages.rb index 3bc5880d..decc122d 100644 --- a/lib/openai/resources/chat/completions/messages.rb +++ b/lib/openai/resources/chat/completions/messages.rb @@ -22,7 +22,6 @@ class Messages # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(completion_id, params = {}) parsed, options = OpenAI::Models::Chat::Completions::MessageListParams.dump_request(params) @client.request( @@ -36,7 +35,6 @@ def list(completion_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/completions.rb b/lib/openai/resources/completions.rb index 1a6dcb98..79cc6805 100644 --- a/lib/openai/resources/completions.rb +++ b/lib/openai/resources/completions.rb @@ -7,7 +7,7 @@ class Completions # # @param params [OpenAI::Models::CompletionCreateParams, Hash{Symbol=>Object}] . # - # @option params [String, Symbol, OpenAI::Models::CompletionCreateParams::Model::Preset] :model ID of the model to use. You can use the + # @option params [String, Symbol] :model ID of the model to use. You can use the # [List models](https://platform.openai.com/docs/api-reference/models/list) API to # see all of your available models, or see our # [Model overview](https://platform.openai.com/docs/models) for descriptions of @@ -113,7 +113,6 @@ class Completions # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Completion] - # def create(params) parsed, options = OpenAI::Models::CompletionCreateParams.dump_request(params) parsed.delete(:stream) @@ -130,7 +129,7 @@ def create(params) # # @param params [OpenAI::Models::CompletionCreateParams, Hash{Symbol=>Object}] . # - # @option params [String, Symbol, OpenAI::Models::CompletionCreateParams::Model::Preset] :model ID of the model to use. You can use the + # @option params [String, Symbol] :model ID of the model to use. You can use the # [List models](https://platform.openai.com/docs/api-reference/models/list) API to # see all of your available models, or see our # [Model overview](https://platform.openai.com/docs/models) for descriptions of @@ -236,7 +235,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Stream] - # def create_streaming(params) parsed, options = OpenAI::Models::CompletionCreateParams.dump_request(params) parsed.store(:stream, true) @@ -252,7 +250,6 @@ def create_streaming(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/embeddings.rb b/lib/openai/resources/embeddings.rb index b5e6eda1..b70c1ef5 100644 --- a/lib/openai/resources/embeddings.rb +++ b/lib/openai/resources/embeddings.rb @@ -35,7 +35,6 @@ class Embeddings # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::CreateEmbeddingResponse] - # def create(params) parsed, options = OpenAI::Models::EmbeddingCreateParams.dump_request(params) @client.request( @@ -48,7 +47,6 @@ def create(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/files.rb b/lib/openai/resources/files.rb index 2164c02b..191387e3 100644 --- a/lib/openai/resources/files.rb +++ b/lib/openai/resources/files.rb @@ -37,7 +37,6 @@ class Files # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::FileObject] - # def create(params) parsed, options = OpenAI::Models::FileCreateParams.dump_request(params) @client.request( @@ -59,7 +58,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::FileObject] - # def retrieve(file_id, params = {}) @client.request( method: :get, @@ -89,7 +87,6 @@ def retrieve(file_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(params = {}) parsed, options = OpenAI::Models::FileListParams.dump_request(params) @client.request( @@ -111,7 +108,6 @@ def list(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::FileDeleted] - # def delete(file_id, params = {}) @client.request( method: :delete, @@ -130,7 +126,6 @@ def delete(file_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [Object] - # def content(file_id, params = {}) @client.request( method: :get, @@ -142,7 +137,6 @@ def content(file_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/fine_tuning.rb b/lib/openai/resources/fine_tuning.rb index e7a161e4..61663e79 100644 --- a/lib/openai/resources/fine_tuning.rb +++ b/lib/openai/resources/fine_tuning.rb @@ -7,7 +7,6 @@ class FineTuning attr_reader :jobs # @param client [OpenAI::Client] - # def initialize(client:) @client = client @jobs = OpenAI::Resources::FineTuning::Jobs.new(client: client) diff --git a/lib/openai/resources/fine_tuning/jobs.rb b/lib/openai/resources/fine_tuning/jobs.rb index bcfa238e..9d00777f 100644 --- a/lib/openai/resources/fine_tuning/jobs.rb +++ b/lib/openai/resources/fine_tuning/jobs.rb @@ -17,7 +17,7 @@ class Jobs # # @param params [OpenAI::Models::FineTuning::JobCreateParams, Hash{Symbol=>Object}] . # - # @option params [String, Symbol, OpenAI::Models::FineTuning::JobCreateParams::Model::Preset] :model The name of the model to fine-tune. You can select one of the + # @option params [String, Symbol] :model The name of the model to fine-tune. You can select one of the # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). # # @option params [String] :training_file The ID of an uploaded file that contains training data. @@ -78,7 +78,6 @@ class Jobs # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::FineTuning::FineTuningJob] - # def create(params) parsed, options = OpenAI::Models::FineTuning::JobCreateParams.dump_request(params) @client.request( @@ -101,7 +100,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::FineTuning::FineTuningJob] - # def retrieve(fine_tuning_job_id, params = {}) @client.request( method: :get, @@ -125,7 +123,6 @@ def retrieve(fine_tuning_job_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(params = {}) parsed, options = OpenAI::Models::FineTuning::JobListParams.dump_request(params) @client.request( @@ -147,7 +144,6 @@ def list(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::FineTuning::FineTuningJob] - # def cancel(fine_tuning_job_id, params = {}) @client.request( method: :post, @@ -170,7 +166,6 @@ def cancel(fine_tuning_job_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list_events(fine_tuning_job_id, params = {}) parsed, options = OpenAI::Models::FineTuning::JobListEventsParams.dump_request(params) @client.request( @@ -184,7 +179,6 @@ def list_events(fine_tuning_job_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client @checkpoints = OpenAI::Resources::FineTuning::Jobs::Checkpoints.new(client: client) diff --git a/lib/openai/resources/fine_tuning/jobs/checkpoints.rb b/lib/openai/resources/fine_tuning/jobs/checkpoints.rb index e9f2d303..cb4b3c18 100644 --- a/lib/openai/resources/fine_tuning/jobs/checkpoints.rb +++ b/lib/openai/resources/fine_tuning/jobs/checkpoints.rb @@ -18,7 +18,6 @@ class Checkpoints # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(fine_tuning_job_id, params = {}) parsed, options = OpenAI::Models::FineTuning::Jobs::CheckpointListParams.dump_request(params) @client.request( @@ -32,7 +31,6 @@ def list(fine_tuning_job_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/images.rb b/lib/openai/resources/images.rb index 8cc42136..48b2ac87 100644 --- a/lib/openai/resources/images.rb +++ b/lib/openai/resources/images.rb @@ -30,7 +30,6 @@ class Images # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::ImagesResponse] - # def create_variation(params) parsed, options = OpenAI::Models::ImageCreateVariationParams.dump_request(params) @client.request( @@ -76,7 +75,6 @@ def create_variation(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::ImagesResponse] - # def edit(params) parsed, options = OpenAI::Models::ImageEditParams.dump_request(params) @client.request( @@ -125,7 +123,6 @@ def edit(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::ImagesResponse] - # def generate(params) parsed, options = OpenAI::Models::ImageGenerateParams.dump_request(params) @client.request( @@ -138,7 +135,6 @@ def generate(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/models.rb b/lib/openai/resources/models.rb index 9c086955..cff4bf06 100644 --- a/lib/openai/resources/models.rb +++ b/lib/openai/resources/models.rb @@ -13,7 +13,6 @@ class Models # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Model] - # def retrieve(model, params = {}) @client.request( method: :get, @@ -31,7 +30,6 @@ def retrieve(model, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Page] - # def list(params = {}) @client.request( method: :get, @@ -52,7 +50,6 @@ def list(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::ModelDeleted] - # def delete(model, params = {}) @client.request( method: :delete, @@ -63,7 +60,6 @@ def delete(model, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/moderations.rb b/lib/openai/resources/moderations.rb index f1375d48..85d8cd8f 100644 --- a/lib/openai/resources/moderations.rb +++ b/lib/openai/resources/moderations.rb @@ -19,7 +19,6 @@ class Moderations # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::ModerationCreateResponse] - # def create(params) parsed, options = OpenAI::Models::ModerationCreateParams.dump_request(params) @client.request( @@ -32,7 +31,6 @@ def create(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/responses.rb b/lib/openai/resources/responses.rb index 3b4c2c2d..fdbe46bc 100644 --- a/lib/openai/resources/responses.rb +++ b/lib/openai/resources/responses.rb @@ -127,7 +127,6 @@ class Responses # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Responses::Response] - # def create(params) parsed, options = OpenAI::Models::Responses::ResponseCreateParams.dump_request(params) parsed.delete(:stream) @@ -261,7 +260,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Stream] - # def create_streaming(params) parsed, options = OpenAI::Models::Responses::ResponseCreateParams.dump_request(params) parsed.store(:stream, true) @@ -288,7 +286,6 @@ def create_streaming(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Responses::Response] - # def retrieve(response_id, params = {}) parsed, options = OpenAI::Models::Responses::ResponseRetrieveParams.dump_request(params) @client.request( @@ -309,7 +306,6 @@ def retrieve(response_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [nil] - # def delete(response_id, params = {}) @client.request( method: :delete, @@ -320,7 +316,6 @@ def delete(response_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client @input_items = OpenAI::Resources::Responses::InputItems.new(client: client) diff --git a/lib/openai/resources/responses/input_items.rb b/lib/openai/resources/responses/input_items.rb index 87ea88ab..1fe57b71 100644 --- a/lib/openai/resources/responses/input_items.rb +++ b/lib/openai/resources/responses/input_items.rb @@ -25,7 +25,6 @@ class InputItems # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(response_id, params = {}) parsed, options = OpenAI::Models::Responses::InputItemListParams.dump_request(params) @client.request( @@ -39,7 +38,6 @@ def list(response_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/uploads.rb b/lib/openai/resources/uploads.rb index e7a40175..f72e4255 100644 --- a/lib/openai/resources/uploads.rb +++ b/lib/openai/resources/uploads.rb @@ -45,7 +45,6 @@ class Uploads # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Upload] - # def create(params) parsed, options = OpenAI::Models::UploadCreateParams.dump_request(params) @client.request( @@ -66,7 +65,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Upload] - # def cancel(upload_id, params = {}) @client.request( method: :post, @@ -102,7 +100,6 @@ def cancel(upload_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Upload] - # def complete(upload_id, params) parsed, options = OpenAI::Models::UploadCompleteParams.dump_request(params) @client.request( @@ -115,7 +112,6 @@ def complete(upload_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client @parts = OpenAI::Resources::Uploads::Parts.new(client: client) diff --git a/lib/openai/resources/uploads/parts.rb b/lib/openai/resources/uploads/parts.rb index c6f90707..b90eef15 100644 --- a/lib/openai/resources/uploads/parts.rb +++ b/lib/openai/resources/uploads/parts.rb @@ -25,7 +25,6 @@ class Parts # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Uploads::UploadPart] - # def create(upload_id, params) parsed, options = OpenAI::Models::Uploads::PartCreateParams.dump_request(params) @client.request( @@ -39,7 +38,6 @@ def create(upload_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/vector_stores.rb b/lib/openai/resources/vector_stores.rb index 18803daa..e60a1592 100644 --- a/lib/openai/resources/vector_stores.rb +++ b/lib/openai/resources/vector_stores.rb @@ -34,7 +34,6 @@ class VectorStores # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStore] - # def create(params = {}) parsed, options = OpenAI::Models::VectorStoreCreateParams.dump_request(params) @client.request( @@ -55,7 +54,6 @@ def create(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStore] - # def retrieve(vector_store_id, params = {}) @client.request( method: :get, @@ -85,7 +83,6 @@ def retrieve(vector_store_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStore] - # def update(vector_store_id, params = {}) parsed, options = OpenAI::Models::VectorStoreUpdateParams.dump_request(params) @client.request( @@ -120,7 +117,6 @@ def update(vector_store_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(params = {}) parsed, options = OpenAI::Models::VectorStoreListParams.dump_request(params) @client.request( @@ -142,7 +138,6 @@ def list(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStoreDeleted] - # def delete(vector_store_id, params = {}) @client.request( method: :delete, @@ -173,7 +168,6 @@ def delete(vector_store_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Page] - # def search(vector_store_id, params) parsed, options = OpenAI::Models::VectorStoreSearchParams.dump_request(params) @client.request( @@ -187,7 +181,6 @@ def search(vector_store_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client @files = OpenAI::Resources::VectorStores::Files.new(client: client) diff --git a/lib/openai/resources/vector_stores/file_batches.rb b/lib/openai/resources/vector_stores/file_batches.rb index 3c484618..7893fd17 100644 --- a/lib/openai/resources/vector_stores/file_batches.rb +++ b/lib/openai/resources/vector_stores/file_batches.rb @@ -26,7 +26,6 @@ class FileBatches # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStores::VectorStoreFileBatch] - # def create(vector_store_id, params) parsed, options = OpenAI::Models::VectorStores::FileBatchCreateParams.dump_request(params) @client.request( @@ -49,7 +48,6 @@ def create(vector_store_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStores::VectorStoreFileBatch] - # def retrieve(batch_id, params) parsed, options = OpenAI::Models::VectorStores::FileBatchRetrieveParams.dump_request(params) vector_store_id = parsed.delete(:vector_store_id) do @@ -75,7 +73,6 @@ def retrieve(batch_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStores::VectorStoreFileBatch] - # def cancel(batch_id, params) parsed, options = OpenAI::Models::VectorStores::FileBatchCancelParams.dump_request(params) vector_store_id = parsed.delete(:vector_store_id) do @@ -119,7 +116,6 @@ def cancel(batch_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list_files(batch_id, params) parsed, options = OpenAI::Models::VectorStores::FileBatchListFilesParams.dump_request(params) vector_store_id = parsed.delete(:vector_store_id) do @@ -136,7 +132,6 @@ def list_files(batch_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/vector_stores/files.rb b/lib/openai/resources/vector_stores/files.rb index 23eafaa3..0c9ae2a5 100644 --- a/lib/openai/resources/vector_stores/files.rb +++ b/lib/openai/resources/vector_stores/files.rb @@ -28,7 +28,6 @@ class Files # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStores::VectorStoreFile] - # def create(vector_store_id, params) parsed, options = OpenAI::Models::VectorStores::FileCreateParams.dump_request(params) @client.request( @@ -51,7 +50,6 @@ def create(vector_store_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStores::VectorStoreFile] - # def retrieve(file_id, params) parsed, options = OpenAI::Models::VectorStores::FileRetrieveParams.dump_request(params) vector_store_id = parsed.delete(:vector_store_id) do @@ -82,7 +80,6 @@ def retrieve(file_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStores::VectorStoreFile] - # def update(file_id, params) parsed, options = OpenAI::Models::VectorStores::FileUpdateParams.dump_request(params) vector_store_id = parsed.delete(:vector_store_id) do @@ -124,7 +121,6 @@ def update(file_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(vector_store_id, params = {}) parsed, options = OpenAI::Models::VectorStores::FileListParams.dump_request(params) @client.request( @@ -151,7 +147,6 @@ def list(vector_store_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStores::VectorStoreFileDeleted] - # def delete(file_id, params) parsed, options = OpenAI::Models::VectorStores::FileDeleteParams.dump_request(params) vector_store_id = parsed.delete(:vector_store_id) do @@ -176,7 +171,6 @@ def delete(file_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Page] - # def content(file_id, params) parsed, options = OpenAI::Models::VectorStores::FileContentParams.dump_request(params) vector_store_id = parsed.delete(:vector_store_id) do @@ -192,7 +186,6 @@ def content(file_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/stream.rb b/lib/openai/stream.rb index f9319992..2d448ef0 100644 --- a/lib/openai/stream.rb +++ b/lib/openai/stream.rb @@ -1,28 +1,31 @@ # frozen_string_literal: true module OpenAI - # @private - # # @example # ```ruby - # stream.for_each do |message| - # puts(message) + # stream.for_each do |event| + # puts(event) # end # ``` # # @example # ```ruby - # messages = stream.to_enum.take(2) + # events = stream + # .to_enum + # .lazy + # .select { _1.object_id.even? } + # .map(&:itself) + # .take(2) + # .to_a # - # messages => Array + # events => Array # ``` class Stream include OpenAI::BaseStream - # @private + # @api private # # @return [Enumerable] - # private def iterator # rubocop:disable Metrics/BlockLength @iterator ||= OpenAI::Util.chain_fused(@messages) do |y| diff --git a/lib/openai/util.rb b/lib/openai/util.rb index 10368046..ef208d81 100644 --- a/lib/openai/util.rb +++ b/lib/openai/util.rb @@ -3,20 +3,17 @@ module OpenAI # rubocop:disable Metrics/ModuleLength - # @private - # + # @api private module Util - # @private + # @api private # # @return [Float] - # def self.monotonic_secs = Process.clock_gettime(Process::CLOCK_MONOTONIC) class << self - # @private + # @api private # # @return [String] - # def arch case (arch = RbConfig::CONFIG["arch"])&.downcase in nil @@ -32,10 +29,9 @@ def arch end end - # @private + # @api private # # @return [String] - # def os case (host = RbConfig::CONFIG["host_os"])&.downcase in nil @@ -57,12 +53,11 @@ def os end class << self - # @private + # @api private # # @param input [Object] # - # @return [Boolean, Object] - # + # @return [Boolean] def primitive?(input) case input in true | false | Integer | Float | Symbol | String @@ -72,12 +67,11 @@ def primitive?(input) end end - # @private + # @api private # # @param input [Object] # # @return [Boolean, Object] - # def coerce_boolean(input) case input.is_a?(String) ? input.downcase : input in Numeric @@ -91,13 +85,12 @@ def coerce_boolean(input) end end - # @private + # @api private # # @param input [Object] # # @raise [ArgumentError] # @return [Boolean, nil] - # def coerce_boolean!(input) case coerce_boolean(input) in true | false | nil => coerced @@ -107,12 +100,11 @@ def coerce_boolean!(input) end end - # @private + # @api private # # @param input [Object] # # @return [Integer, Object] - # def coerce_integer(input) case input in true @@ -124,12 +116,11 @@ def coerce_integer(input) end end - # @private + # @api private # # @param input [Object] # # @return [Float, Object] - # def coerce_float(input) case input in true @@ -141,12 +132,11 @@ def coerce_float(input) end end - # @private + # @api private # # @param input [Object] # # @return [Hash{Object=>Object}, Object] - # def coerce_hash(input) case input in NilClass | Array | Set | Enumerator @@ -165,14 +155,13 @@ def coerce_hash(input) OMIT = Object.new.freeze class << self - # @private + # @api private # # @param lhs [Object] # @param rhs [Object] # @param concat [Boolean] # # @return [Object] - # private def deep_merge_lr(lhs, rhs, concat: false) case [lhs, rhs, concat] in [Hash, Hash, _] @@ -191,7 +180,7 @@ class << self end end - # @private + # @api private # # Recursively merge one hash with another. If the values at a given key are not # both hashes, just take the new value. @@ -203,7 +192,6 @@ class << self # @param concat [Boolean] whether to merge sequences by concatenation. # # @return [Object] - # def deep_merge(*values, sentinel: nil, concat: false) case values in [value, *values] @@ -215,7 +203,7 @@ def deep_merge(*values, sentinel: nil, concat: false) end end - # @private + # @api private # # @param data [Hash{Symbol=>Object}, Array, Object] # @param pick [Symbol, Integer, Array, nil] @@ -223,7 +211,6 @@ def deep_merge(*values, sentinel: nil, concat: false) # @param blk [Proc, nil] # # @return [Object, nil] - # def dig(data, pick, sentinel = nil, &blk) case [data, pick, blk] in [_, nil, nil] @@ -248,22 +235,20 @@ def dig(data, pick, sentinel = nil, &blk) end class << self - # @private + # @api private # # @param uri [URI::Generic] # # @return [String] - # def uri_origin(uri) "#{uri.scheme}://#{uri.host}#{uri.port == uri.default_port ? '' : ":#{uri.port}"}" end - # @private + # @api private # # @param path [String, Array] # # @return [String] - # def interpolate_path(path) case path in String @@ -278,40 +263,37 @@ def interpolate_path(path) end class << self - # @private + # @api private # # @param query [String, nil] # # @return [Hash{String=>Array}] - # def decode_query(query) CGI.parse(query.to_s) end - # @private + # @api private # # @param query [Hash{String=>Array, String, nil}, nil] # # @return [String, nil] - # def encode_query(query) query.to_h.empty? ? nil : URI.encode_www_form(query) end end class << self - # @private + # @api private # # @param url [URI::Generic, String] # # @return [Hash{Symbol=>String, Integer, nil}] - # def parse_uri(url) parsed = URI::Generic.component.zip(URI.split(url)).to_h {**parsed, query: decode_query(parsed.fetch(:query))} end - # @private + # @api private # # @param parsed [Hash{Symbol=>String, Integer, nil}] . # @@ -326,12 +308,11 @@ def parse_uri(url) # @option parsed [Hash{String=>Array}] :query # # @return [URI::Generic] - # def unparse_uri(parsed) URI::Generic.build(**parsed, query: encode_query(parsed.fetch(:query))) end - # @private + # @api private # # @param lhs [Hash{Symbol=>String, Integer, nil}] . # @@ -358,7 +339,6 @@ def unparse_uri(parsed) # @option rhs [Hash{String=>Array}] :query # # @return [URI::Generic] - # def join_parsed_uri(lhs, rhs) base_path, base_query = lhs.fetch_values(:path, :query) slashed = base_path.end_with?("/") ? base_path : "#{base_path}/" @@ -380,12 +360,11 @@ def join_parsed_uri(lhs, rhs) end class << self - # @private + # @api private # # @param headers [Hash{String=>String, Integer, Array, nil}] # # @return [Hash{String=>String}] - # def normalized_headers(*headers) {}.merge(*headers.compact).to_h do |key, val| case val @@ -399,16 +378,15 @@ def normalized_headers(*headers) end end - # @private + # @api private # # An adapter that satisfies the IO interface required by `::IO.copy_stream` class ReadIOAdapter - # @private + # @api private # # @param max_len [Integer, nil] # # @return [String] - # private def read_enum(max_len) case max_len in nil @@ -422,13 +400,12 @@ class ReadIOAdapter @buf.slice!(0..) end - # @private + # @api private # # @param max_len [Integer, nil] # @param out_string [String, nil] # # @return [String, nil] - # def read(max_len = nil, out_string = nil) case @stream in nil @@ -447,11 +424,10 @@ def read(max_len = nil, out_string = nil) .tap(&@blk) end - # @private + # @api private # # @param stream [String, IO, StringIO, Enumerable] # @param blk [Proc] - # def initialize(stream, &blk) @stream = stream.is_a?(String) ? StringIO.new(stream) : stream @buf = String.new.b @@ -463,7 +439,6 @@ class << self # @param blk [Proc] # # @return [Enumerable] - # def string_io(&blk) Enumerator.new do |y| y.define_singleton_method(:write) do @@ -477,13 +452,12 @@ def string_io(&blk) end class << self - # @private + # @api private # # @param y [Enumerator::Yielder] # @param boundary [String] # @param key [Symbol, String] # @param val [Object] - # private def encode_multipart_formdata(y, boundary:, key:, val:) y << "--#{boundary}\r\n" y << "Content-Disposition: form-data" @@ -516,12 +490,11 @@ class << self y << "\r\n" end - # @private + # @api private # # @param body [Object] # # @return [Array(String, Enumerable)] - # private def encode_multipart_streaming(body) boundary = SecureRandom.urlsafe_base64(60) @@ -547,18 +520,19 @@ class << self [boundary, strio] end - # @private + # @api private # # @param headers [Hash{String=>String}] # @param body [Object] # # @return [Object] - # def encode_content(headers, body) content_type = headers["content-type"] case [content_type, body] - in ["application/json", Hash | Array] + in [%r{^application/(?:vnd\.api\+)?json}, Hash | Array] [headers, JSON.fast_generate(body)] + in [%r{^application/(?:x-)?jsonl}, Enumerable] + [headers, body.lazy.map { JSON.fast_generate(_1) }] in [%r{^multipart/form-data}, Hash | IO | StringIO] boundary, strio = encode_multipart_streaming(body) headers = {**headers, "content-type" => "#{content_type}; boundary=#{boundary}"} @@ -570,7 +544,7 @@ def encode_content(headers, body) end end - # @private + # @api private # # @param headers [Hash{String=>String}, Net::HTTPHeader] # @param stream [Enumerable] @@ -578,7 +552,6 @@ def encode_content(headers, body) # # @raise [JSON::ParserError] # @return [Object] - # def decode_content(headers, stream:, suppress_error: false) case headers["content-type"] in %r{^application/(?:vnd\.api\+)?json} @@ -589,11 +562,14 @@ def decode_content(headers, stream:, suppress_error: false) raise e unless suppress_error json end + in %r{^application/(?:x-)?jsonl} + lines = decode_lines(stream) + chain_fused(lines) do |y| + lines.each { y << JSON.parse(_1, symbolize_names: true) } + end in %r{^text/event-stream} lines = decode_lines(stream) decode_sse(lines) - in %r{^application/(?:x-)?jsonl} - decode_lines(stream) in %r{^text/} stream.to_a.join else @@ -604,7 +580,7 @@ def decode_content(headers, stream:, suppress_error: false) end class << self - # @private + # @api private # # https://doc.rust-lang.org/std/iter/trait.FusedIterator.html # @@ -613,7 +589,6 @@ class << self # @param close [Proc] # # @return [Enumerable] - # def fused_enum(enum, external: false, &close) fused = false iter = Enumerator.new do |y| @@ -637,10 +612,9 @@ def fused_enum(enum, external: false, &close) iter end - # @private + # @api private # # @param enum [Enumerable, nil] - # def close_fused!(enum) return unless enum.is_a?(Enumerator) @@ -649,11 +623,12 @@ def close_fused!(enum) # rubocop:enable Lint/UnreachableLoop end - # @private + # @api private # # @param enum [Enumerable, nil] # @param blk [Proc] # + # @return [Enumerable] def chain_fused(enum, &blk) iter = Enumerator.new { blk.call(_1) } fused_enum(iter) { close_fused!(enum) } @@ -661,12 +636,11 @@ def chain_fused(enum, &blk) end class << self - # @private + # @api private # # @param enum [Enumerable] # # @return [Enumerable] - # def decode_lines(enum) re = /(\r\n|\r|\n)/ buffer = String.new.b @@ -696,14 +670,13 @@ def decode_lines(enum) end end - # @private + # @api private # # https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream # # @param lines [Enumerable] # # @return [Hash{Symbol=>Object}] - # def decode_sse(lines) # rubocop:disable Metrics/BlockLength chain_fused(lines) do |y| diff --git a/lib/openai/version.rb b/lib/openai/version.rb index bcab79ff..8175d17f 100644 --- a/lib/openai/version.rb +++ b/lib/openai/version.rb @@ -1,5 +1,5 @@ # frozen_string_literal: true module OpenAI - VERSION = "0.1.0-alpha.1" + VERSION = "0.1.0-alpha.2" end diff --git a/rbi/lib/openai/base_client.rbi b/rbi/lib/openai/base_client.rbi index fd80c3c5..e4208ab7 100644 --- a/rbi/lib/openai/base_client.rbi +++ b/rbi/lib/openai/base_client.rbi @@ -1,6 +1,7 @@ # typed: strong module OpenAI + # @api private class BaseClient abstract! @@ -44,16 +45,19 @@ module OpenAI PLATFORM_HEADERS = T::Hash[String, String] class << self + # @api private sig { params(req: OpenAI::BaseClient::RequestComponentsShape).void } def validate!(req) end + # @api private sig do params(status: Integer, headers: T.any(T::Hash[String, String], Net::HTTPHeader)).returns(T::Boolean) end def should_retry?(status, headers:) end + # @api private sig do params( request: OpenAI::BaseClient::RequestInputShape, @@ -64,6 +68,16 @@ module OpenAI end def follow_redirect(request, status:, response_headers:) end + + # @api private + sig do + params( + status: T.any(Integer, OpenAI::APIConnectionError), + stream: T.nilable(T::Enumerable[String]) + ).void + end + def reap_connection!(status, stream:) + end end sig { returns(T.anything) } @@ -74,6 +88,7 @@ module OpenAI def requester=(_) end + # @api private sig do params( base_url: String, @@ -98,14 +113,17 @@ module OpenAI ) end + # @api private sig { overridable.returns(T::Hash[String, String]) } private def auth_headers end + # @api private sig { returns(String) } private def generate_idempotency_key end + # @api private sig do overridable .params(req: OpenAI::BaseClient::RequestComponentsShape, opts: T::Hash[Symbol, T.anything]) @@ -114,10 +132,12 @@ module OpenAI private def build_request(req, opts) end + # @api private sig { params(headers: T::Hash[String, String], retry_count: Integer).returns(Float) } private def retry_delay(headers, retry_count:) end + # @api private sig do params( request: OpenAI::BaseClient::RequestInputShape, @@ -130,6 +150,8 @@ module OpenAI private def send_request(request, redirect_count:, retry_count:, send_retry_header:) end + # Execute the request specified by `req`. This is the method that all resource + # methods call into. sig do params( method: Symbol, diff --git a/rbi/lib/openai/base_model.rbi b/rbi/lib/openai/base_model.rbi index f64d87e2..8840d3a8 100644 --- a/rbi/lib/openai/base_model.rbi +++ b/rbi/lib/openai/base_model.rbi @@ -1,19 +1,21 @@ # typed: strong module OpenAI + # @api private module Converter - abstract! - Input = T.type_alias { T.any(OpenAI::Converter, T::Class[T.anything]) } + # @api private sig { overridable.params(value: T.anything).returns(T.anything) } def coerce(value) end + # @api private sig { overridable.params(value: T.anything).returns(T.anything) } def dump(value) end + # @api private sig do overridable .params(value: T.anything) @@ -23,6 +25,7 @@ module OpenAI end class << self + # @api private sig do params( spec: T.any( @@ -40,43 +43,69 @@ module OpenAI def self.type_info(spec) end + # @api private + # + # Based on `target`, transform `value` into `target`, to the extent possible: + # + # 1. if the given `value` conforms to `target` already, return the given `value` + # 2. if it's possible and safe to convert the given `value` to `target`, then the + # converted value + # 3. otherwise, the given `value` unaltered sig { params(target: OpenAI::Converter::Input, value: T.anything).returns(T.anything) } def self.coerce(target, value) end + # @api private sig { params(target: OpenAI::Converter::Input, value: T.anything).returns(T.anything) } def self.dump(target, value) end + # @api private + # + # The underlying algorithm for computing maximal compatibility is subject to + # future improvements. + # + # Similar to `#.coerce`, used to determine the best union variant to decode into. + # + # 1. determine if strict-ish coercion is possible + # 2. return either result of successful coercion or if loose coercion is possible + # 3. return a score for recursively tallied count for fields that can be coerced sig { params(target: OpenAI::Converter::Input, value: T.anything).returns(T.anything) } def self.try_strict_coerce(target, value) end end end + # @api private + # + # When we don't know what to expect for the value. class Unknown - abstract! - extend OpenAI::Converter - sig { params(other: T.anything).returns(T::Boolean) } + abstract! + final! + + sig(:final) { params(other: T.anything).returns(T::Boolean) } def self.===(other) end - sig { params(other: T.anything).returns(T::Boolean) } + sig(:final) { params(other: T.anything).returns(T::Boolean) } def self.==(other) end class << self - sig { override.params(value: T.anything).returns(T.anything) } + # @api private + sig(:final) { override.params(value: T.anything).returns(T.anything) } def coerce(value) end - sig { override.params(value: T.anything).returns(T.anything) } + # @api private + sig(:final) { override.params(value: T.anything).returns(T.anything) } def dump(value) end - sig do + # @api private + sig(:final) do override .params(value: T.anything) .returns(T.any([T::Boolean, T.anything, NilClass], [T::Boolean, T::Boolean, Integer])) @@ -86,29 +115,40 @@ module OpenAI end end + # @api private + # + # Ruby has no Boolean class; this is something for models to refer to. class BooleanModel - abstract! - extend OpenAI::Converter - sig { params(other: T.anything).returns(T::Boolean) } + abstract! + final! + + sig(:final) { params(other: T.anything).returns(T::Boolean) } def self.===(other) end - sig { params(other: T.anything).returns(T::Boolean) } + sig(:final) { params(other: T.anything).returns(T::Boolean) } def self.==(other) end class << self - sig { override.params(value: T.any(T::Boolean, T.anything)).returns(T.any(T::Boolean, T.anything)) } + # @api private + sig(:final) do + override.params(value: T.any(T::Boolean, T.anything)).returns(T.any(T::Boolean, T.anything)) + end def coerce(value) end - sig { override.params(value: T.any(T::Boolean, T.anything)).returns(T.any(T::Boolean, T.anything)) } + # @api private + sig(:final) do + override.params(value: T.any(T::Boolean, T.anything)).returns(T.any(T::Boolean, T.anything)) + end def dump(value) end - sig do + # @api private + sig(:final) do override .params(value: T.anything) .returns(T.any([T::Boolean, T.anything, NilClass], [T::Boolean, T::Boolean, Integer])) @@ -118,16 +158,34 @@ module OpenAI end end + # @api private + # + # A value from among a specified list of options. OpenAPI enum values map to Ruby + # values in the SDK as follows: + # + # 1. boolean => true | false + # 2. integer => Integer + # 3. float => Float + # 4. string => Symbol + # + # We can therefore convert string values to Symbols, but can't convert other + # values safely. class Enum + extend OpenAI::Converter + abstract! - extend OpenAI::Converter + Value = type_template(:out) class << self - sig { overridable.returns(T::Array[T.any(NilClass, T::Boolean, Integer, Float, Symbol)]) } + # All of the valid Symbol values for this enum. + sig { overridable.returns(T::Array[Value]) } def values end + # @api private + # + # Guard against thread safety issues by instantiating `@values`. sig { void } private def finalize! end @@ -142,14 +200,17 @@ module OpenAI end class << self + # @api private sig { override.params(value: T.any(String, Symbol, T.anything)).returns(T.any(Symbol, T.anything)) } def coerce(value) end + # @api private sig { override.params(value: T.any(Symbol, T.anything)).returns(T.any(Symbol, T.anything)) } def dump(value) end + # @api private sig do override .params(value: T.anything) @@ -160,44 +221,50 @@ module OpenAI end end + # @api private class Union + extend OpenAI::Converter + abstract! - extend OpenAI::Converter + Variants = type_template(:out) class << self - sig { returns(T::Array[[T.nilable(Symbol), Proc]]) } + # @api private + # + # All of the specified variant info for this union. + sig { returns(T::Array[[T.nilable(Symbol), T.proc.returns(Variants)]]) } private def known_variants end - sig { overridable.returns(T::Array[[T.nilable(Symbol), T.anything]]) } - protected def variants + # @api private + sig { returns(T::Array[[T.nilable(Symbol), Variants]]) } + protected def derefed_variants end + # All of the specified variants for this union. + sig { overridable.returns(T::Array[Variants]) } + def variants + end + + # @api private sig { params(property: Symbol).void } private def discriminator(property) end + # @api private sig do params( - key: T.any( - Symbol, - T::Hash[Symbol, T.anything], - T.proc.returns(OpenAI::Converter::Input), - OpenAI::Converter::Input - ), - spec: T.any( - T::Hash[Symbol, T.anything], - T.proc.returns(OpenAI::Converter::Input), - OpenAI::Converter::Input - ) + key: T.any(Symbol, T::Hash[Symbol, T.anything], T.proc.returns(Variants), Variants), + spec: T.any(T::Hash[Symbol, T.anything], T.proc.returns(Variants), Variants) ) .void end private def variant(key, spec = nil) end - sig { params(value: T.anything).returns(T.nilable(OpenAI::Converter::Input)) } + # @api private + sig { params(value: T.anything).returns(T.nilable(Variants)) } private def resolve_variant(value) end end @@ -211,14 +278,17 @@ module OpenAI end class << self + # @api private sig { override.params(value: T.anything).returns(T.anything) } def coerce(value) end + # @api private sig { override.params(value: T.anything).returns(T.anything) } def dump(value) end + # @api private sig do override .params(value: T.anything) @@ -229,20 +299,27 @@ module OpenAI end end + # @api private + # + # Array of items of a given type. class ArrayOf + include OpenAI::Converter + abstract! + final! - include OpenAI::Converter + Elem = type_member(:out) - sig { params(other: T.anything).returns(T::Boolean) } + sig(:final) { params(other: T.anything).returns(T::Boolean) } def ===(other) end - sig { params(other: T.anything).returns(T::Boolean) } + sig(:final) { params(other: T.anything).returns(T::Boolean) } def ==(other) end - sig do + # @api private + sig(:final) do override .params(value: T.any(T::Enumerable[T.anything], T.anything)) .returns(T.any(T::Array[T.anything], T.anything)) @@ -250,7 +327,8 @@ module OpenAI def coerce(value) end - sig do + # @api private + sig(:final) do override .params(value: T.any(T::Enumerable[T.anything], T.anything)) .returns(T.any(T::Array[T.anything], T.anything)) @@ -258,7 +336,8 @@ module OpenAI def dump(value) end - sig do + # @api private + sig(:final) do override .params(value: T.anything) .returns(T.any([T::Boolean, T.anything, NilClass], [T::Boolean, T::Boolean, Integer])) @@ -266,11 +345,13 @@ module OpenAI def try_strict_coerce(value) end - sig { returns(OpenAI::Converter::Input) } + # @api private + sig(:final) { returns(Elem) } protected def item_type end - sig do + # @api private + sig(:final) do params( type_info: T.any( T::Hash[Symbol, T.anything], @@ -285,20 +366,27 @@ module OpenAI end end + # @api private + # + # Hash of items of a given type. class HashOf + include OpenAI::Converter + abstract! + final! - include OpenAI::Converter + Elem = type_member(:out) - sig { params(other: T.anything).returns(T::Boolean) } + sig(:final) { params(other: T.anything).returns(T::Boolean) } def ===(other) end - sig { params(other: T.anything).returns(T::Boolean) } + sig(:final) { params(other: T.anything).returns(T::Boolean) } def ==(other) end - sig do + # @api private + sig(:final) do override .params(value: T.any(T::Hash[T.anything, T.anything], T.anything)) .returns(T.any(T::Hash[Symbol, T.anything], T.anything)) @@ -306,7 +394,8 @@ module OpenAI def coerce(value) end - sig do + # @api private + sig(:final) do override .params(value: T.any(T::Hash[T.anything, T.anything], T.anything)) .returns(T.any(T::Hash[Symbol, T.anything], T.anything)) @@ -314,7 +403,8 @@ module OpenAI def dump(value) end - sig do + # @api private + sig(:final) do override .params(value: T.anything) .returns(T.any([T::Boolean, T.anything, NilClass], [T::Boolean, T::Boolean, Integer])) @@ -322,11 +412,13 @@ module OpenAI def try_strict_coerce(value) end - sig { returns(OpenAI::Converter::Input) } + # @api private + sig(:final) { returns(Elem) } protected def item_type end - sig do + # @api private + sig(:final) do params( type_info: T.any( T::Hash[Symbol, T.anything], @@ -342,13 +434,17 @@ module OpenAI end class BaseModel - abstract! - extend OpenAI::Converter + abstract! + KnownFieldShape = T.type_alias { {mode: T.nilable(Symbol), required: T::Boolean} } class << self + # @api private + # + # Assumes superclass fields are totally defined before fields are accessed / + # defined on subclasses. sig do returns( T::Hash[Symbol, @@ -361,16 +457,19 @@ module OpenAI def known_fields end + # @api private sig do returns(T::Hash[Symbol, T.all(OpenAI::BaseModel::KnownFieldShape, {type: OpenAI::Converter::Input})]) end def fields end + # @api private sig { returns(T::Hash[Symbol, T.proc.returns(T::Class[T.anything])]) } def defaults end + # @api private sig do params( name_sym: Symbol, @@ -393,6 +492,7 @@ module OpenAI private def add_field(name_sym, required:, type_info:, spec:) end + # @api private sig do params( name_sym: Symbol, @@ -408,6 +508,7 @@ module OpenAI def required(name_sym, type_info, spec = {}) end + # @api private sig do params( name_sym: Symbol, @@ -423,10 +524,17 @@ module OpenAI def optional(name_sym, type_info, spec = {}) end + # @api private + # + # `request_only` attributes not excluded from `.#coerce` when receiving responses + # even if well behaved servers should not send them sig { params(blk: T.proc.void).void } private def request_only(&blk) end + # @api private + # + # `response_only` attributes are omitted from `.#dump` when making requests sig { params(blk: T.proc.void).void } private def response_only(&blk) end @@ -437,6 +545,7 @@ module OpenAI end class << self + # @api private sig do override .params(value: T.any(OpenAI::BaseModel, T::Hash[T.anything, T.anything], T.anything)) @@ -445,6 +554,7 @@ module OpenAI def coerce(value) end + # @api private sig do override .params(value: T.any(T.attached_class, T.anything)) @@ -453,6 +563,7 @@ module OpenAI def dump(value) end + # @api private sig do override .params(value: T.anything) @@ -462,10 +573,24 @@ module OpenAI end end + # Returns the raw value associated with the given key, if found. Otherwise, nil is + # returned. + # + # It is valid to lookup keys that are not in the API spec, for example to access + # undocumented features. This method does not parse response data into + # higher-level types. Lookup by anything other than a Symbol is an ArgumentError. sig { params(key: Symbol).returns(T.nilable(T.anything)) } def [](key) end + # Returns a Hash of the data underlying this object. O(1) + # + # Keys are Symbols and values are the raw values from the response. The return + # value indicates which values were ever set on the object. i.e. there will be a + # key in this hash if they ever were, even if the set value was nil. + # + # This method is not recursive. The returned value is shared by the object, so it + # should not be mutated. sig { overridable.returns(T::Hash[Symbol, T.anything]) } def to_h end @@ -476,6 +601,7 @@ module OpenAI def deconstruct_keys(keys) end + # Create a new instance of a model. sig { params(data: T.any(T::Hash[Symbol, T.anything], T.self_type)).returns(T.attached_class) } def self.new(data = {}) end diff --git a/rbi/lib/openai/base_page.rbi b/rbi/lib/openai/base_page.rbi index b4a17615..c5dc2a2d 100644 --- a/rbi/lib/openai/base_page.rbi +++ b/rbi/lib/openai/base_page.rbi @@ -2,8 +2,6 @@ module OpenAI module BasePage - abstract! - Elem = type_member(:out) sig { overridable.returns(T::Boolean) } @@ -24,6 +22,7 @@ module OpenAI alias_method :enum_for, :to_enum + # @api private sig do params( client: OpenAI::BaseClient, diff --git a/rbi/lib/openai/base_stream.rbi b/rbi/lib/openai/base_stream.rbi index 8b829bd1..c5f6c58e 100644 --- a/rbi/lib/openai/base_stream.rbi +++ b/rbi/lib/openai/base_stream.rbi @@ -9,6 +9,7 @@ module OpenAI def close end + # @api private sig { overridable.returns(T::Enumerable[Elem]) } private def iterator end @@ -23,6 +24,7 @@ module OpenAI alias_method :enum_for, :to_enum + # @api private sig do params( model: T.any(T::Class[T.anything], OpenAI::Converter), diff --git a/rbi/lib/openai/client.rbi b/rbi/lib/openai/client.rbi index ba6253be..2d8c31bf 100644 --- a/rbi/lib/openai/client.rbi +++ b/rbi/lib/openai/client.rbi @@ -78,10 +78,12 @@ module OpenAI def responses end + # @api private sig { override.returns(T::Hash[String, String]) } private def auth_headers end + # Creates and returns a new client for interacting with the API. sig do params( base_url: T.nilable(String), @@ -96,10 +98,15 @@ module OpenAI .returns(T.attached_class) end def self.new( + # Override the default base URL for the API, e.g., `"https://api.example.com/v2/"` base_url: nil, + # Defaults to `ENV["OPENAI_API_KEY"]` api_key: ENV["OPENAI_API_KEY"], + # Defaults to `ENV["OPENAI_ORG_ID"]` organization: ENV["OPENAI_ORG_ID"], + # Defaults to `ENV["OPENAI_PROJECT_ID"]` project: ENV["OPENAI_PROJECT_ID"], + # Max number of retries to attempt after a failed retryable request. max_retries: DEFAULT_MAX_RETRIES, timeout: DEFAULT_TIMEOUT_IN_SECONDS, initial_retry_delay: DEFAULT_INITIAL_RETRY_DELAY, diff --git a/rbi/lib/openai/cursor_page.rbi b/rbi/lib/openai/cursor_page.rbi index ffbe0d60..c40bab43 100644 --- a/rbi/lib/openai/cursor_page.rbi +++ b/rbi/lib/openai/cursor_page.rbi @@ -6,11 +6,11 @@ module OpenAI Elem = type_member - sig { returns(T::Array[Elem]) } + sig { returns(T.nilable(T::Array[Elem])) } def data end - sig { params(_: T::Array[Elem]).returns(T::Array[Elem]) } + sig { params(_: T.nilable(T::Array[Elem])).returns(T.nilable(T::Array[Elem])) } def data=(_) end @@ -21,17 +21,5 @@ module OpenAI sig { params(_: T::Boolean).returns(T::Boolean) } def has_more=(_) end - - sig do - params( - client: OpenAI::BaseClient, - req: OpenAI::BaseClient::RequestComponentsShape, - headers: T.any(T::Hash[String, String], Net::HTTPHeader), - page_data: T::Hash[Symbol, T.anything] - ) - .returns(T.attached_class) - end - def self.new(client:, req:, headers:, page_data:) - end end end diff --git a/rbi/lib/openai/errors.rbi b/rbi/lib/openai/errors.rbi index 69c98916..56d79f0b 100644 --- a/rbi/lib/openai/errors.rbi +++ b/rbi/lib/openai/errors.rbi @@ -5,6 +5,10 @@ module OpenAI sig { returns(T.nilable(StandardError)) } def cause end + + sig { params(_: T.nilable(StandardError)).returns(T.nilable(StandardError)) } + def cause=(_) + end end class ConversionError < OpenAI::Error @@ -15,26 +19,51 @@ module OpenAI def url end + sig { params(_: URI::Generic).returns(URI::Generic) } + def url=(_) + end + sig { returns(T.nilable(Integer)) } def status end + sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } + def status=(_) + end + sig { returns(T.nilable(T.anything)) } def body end + sig { params(_: T.nilable(T.anything)).returns(T.nilable(T.anything)) } + def body=(_) + end + sig { returns(T.nilable(String)) } def code end + sig { params(_: T.nilable(String)).returns(T.nilable(String)) } + def code=(_) + end + sig { returns(T.nilable(String)) } def param end + sig { params(_: T.nilable(String)).returns(T.nilable(String)) } + def param=(_) + end + sig { returns(T.nilable(String)) } def type end + sig { params(_: T.nilable(String)).returns(T.nilable(String)) } + def type=(_) + end + + # @api private sig do params( url: URI::Generic, @@ -55,22 +84,43 @@ module OpenAI def status end + sig { params(_: NilClass).void } + def status=(_) + end + sig { void } def body end + sig { params(_: NilClass).void } + def body=(_) + end + sig { void } def code end + sig { params(_: NilClass).void } + def code=(_) + end + sig { void } def param end + sig { params(_: NilClass).void } + def param=(_) + end + sig { void } def type end + sig { params(_: NilClass).void } + def type=(_) + end + + # @api private sig do params( url: URI::Generic, @@ -87,6 +137,7 @@ module OpenAI end class APITimeoutError < OpenAI::APIConnectionError + # @api private sig do params( url: URI::Generic, @@ -103,6 +154,7 @@ module OpenAI end class APIStatusError < OpenAI::APIError + # @api private sig do params( url: URI::Generic, @@ -121,18 +173,35 @@ module OpenAI def status end + sig { params(_: Integer).returns(Integer) } + def status=(_) + end + sig { returns(T.nilable(String)) } def code end + sig { params(_: T.nilable(String)).returns(T.nilable(String)) } + def code=(_) + end + sig { returns(T.nilable(String)) } def param end + sig { params(_: T.nilable(String)).returns(T.nilable(String)) } + def param=(_) + end + sig { returns(T.nilable(String)) } def type end + sig { params(_: T.nilable(String)).returns(T.nilable(String)) } + def type=(_) + end + + # @api private sig do params( url: URI::Generic, diff --git a/rbi/lib/openai/extern.rbi b/rbi/lib/openai/extern.rbi index ca7768e3..e5e18a8d 100644 --- a/rbi/lib/openai/extern.rbi +++ b/rbi/lib/openai/extern.rbi @@ -1,7 +1,7 @@ # typed: strong module OpenAI + # @api private module Extern - abstract! end end diff --git a/rbi/lib/openai/models/audio/speech_create_params.rbi b/rbi/lib/openai/models/audio/speech_create_params.rbi index db0763fb..043a7179 100644 --- a/rbi/lib/openai/models/audio/speech_create_params.rbi +++ b/rbi/lib/openai/models/audio/speech_create_params.rbi @@ -7,6 +7,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The text to generate audio for. The maximum length is 4096 characters. sig { returns(String) } def input end @@ -15,6 +16,8 @@ module OpenAI def input=(_) end + # One of the available [TTS models](https://platform.openai.com/docs/models#tts): + # `tts-1` or `tts-1-hd` sig { returns(T.any(String, Symbol)) } def model end @@ -23,6 +26,10 @@ module OpenAI def model=(_) end + # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + # `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the + # voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). sig { returns(Symbol) } def voice end @@ -31,6 +38,8 @@ module OpenAI def voice=(_) end + # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + # `wav`, and `pcm`. sig { returns(T.nilable(Symbol)) } def response_format end @@ -39,6 +48,8 @@ module OpenAI def response_format=(_) end + # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is + # the default. sig { returns(T.nilable(Float)) } def speed end @@ -77,19 +88,23 @@ module OpenAI def to_hash end + # One of the available [TTS models](https://platform.openai.com/docs/models#tts): + # `tts-1` or `tts-1-hd` class Model < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end + # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + # `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the + # voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). class Voice < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ALLOY = :alloy ASH = :ash CORAL = :coral @@ -99,29 +114,21 @@ module OpenAI NOVA = :nova SAGE = :sage SHIMMER = :shimmer - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + # `wav`, and `pcm`. class ResponseFormat < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + MP3 = :mp3 OPUS = :opus AAC = :aac FLAC = :flac WAV = :wav PCM = :pcm - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/audio/speech_model.rbi b/rbi/lib/openai/models/audio/speech_model.rbi index 5caef0af..f465baf8 100644 --- a/rbi/lib/openai/models/audio/speech_model.rbi +++ b/rbi/lib/openai/models/audio/speech_model.rbi @@ -6,14 +6,10 @@ module OpenAI class SpeechModel < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + TTS_1 = :"tts-1" TTS_1_HD = :"tts-1-hd" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/audio/transcription.rbi b/rbi/lib/openai/models/audio/transcription.rbi index c314036c..bc8940ae 100644 --- a/rbi/lib/openai/models/audio/transcription.rbi +++ b/rbi/lib/openai/models/audio/transcription.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Audio class Transcription < OpenAI::BaseModel + # The transcribed text. sig { returns(String) } def text end @@ -12,6 +13,8 @@ module OpenAI def text=(_) end + # Represents a transcription response returned by model, based on the provided + # input. sig { params(text: String).returns(T.attached_class) } def self.new(text:) end diff --git a/rbi/lib/openai/models/audio/transcription_create_params.rbi b/rbi/lib/openai/models/audio/transcription_create_params.rbi index 324c2060..29ecd8fb 100644 --- a/rbi/lib/openai/models/audio/transcription_create_params.rbi +++ b/rbi/lib/openai/models/audio/transcription_create_params.rbi @@ -7,6 +7,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The audio file object (not file name) to transcribe, in one of these formats: + # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. sig { returns(T.any(IO, StringIO)) } def file end @@ -15,6 +17,8 @@ module OpenAI def file=(_) end + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. sig { returns(T.any(String, Symbol)) } def model end @@ -23,6 +27,9 @@ module OpenAI def model=(_) end + # The language of the input audio. Supplying the input language in + # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + # format will improve accuracy and latency. sig { returns(T.nilable(String)) } def language end @@ -31,6 +38,10 @@ module OpenAI def language=(_) end + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should match the audio language. sig { returns(T.nilable(String)) } def prompt end @@ -39,6 +50,8 @@ module OpenAI def prompt=(_) end + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. sig { returns(T.nilable(Symbol)) } def response_format end @@ -47,6 +60,11 @@ module OpenAI def response_format=(_) end + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. sig { returns(T.nilable(Float)) } def temperature end @@ -55,6 +73,11 @@ module OpenAI def temperature=(_) end + # The timestamp granularities to populate for this transcription. + # `response_format` must be set `verbose_json` to use timestamp granularities. + # Either or both of these options are supported: `word`, or `segment`. Note: There + # is no additional latency for segment timestamps, but generating word timestamps + # incurs additional latency. sig { returns(T.nilable(T::Array[Symbol])) } def timestamp_granularities end @@ -106,27 +129,21 @@ module OpenAI def to_hash end + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. class Model < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end class TimestampGranularity < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + WORD = :word SEGMENT = :segment - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/audio/transcription_create_response.rbi b/rbi/lib/openai/models/audio/transcription_create_response.rbi index ba16b2e7..c414dc3d 100644 --- a/rbi/lib/openai/models/audio/transcription_create_response.rbi +++ b/rbi/lib/openai/models/audio/transcription_create_response.rbi @@ -3,19 +3,12 @@ module OpenAI module Models module Audio + # Represents a transcription response returned by model, based on the provided + # input. class TranscriptionCreateResponse < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[NilClass, OpenAI::Models::Audio::Transcription], [NilClass, OpenAI::Models::Audio::TranscriptionVerbose]] - ) - end - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(OpenAI::Models::Audio::Transcription, OpenAI::Models::Audio::TranscriptionVerbose)} } end end end diff --git a/rbi/lib/openai/models/audio/transcription_segment.rbi b/rbi/lib/openai/models/audio/transcription_segment.rbi index 2e1f1c65..92a534a2 100644 --- a/rbi/lib/openai/models/audio/transcription_segment.rbi +++ b/rbi/lib/openai/models/audio/transcription_segment.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Audio class TranscriptionSegment < OpenAI::BaseModel + # Unique identifier of the segment. sig { returns(Integer) } def id end @@ -12,6 +13,8 @@ module OpenAI def id=(_) end + # Average logprob of the segment. If the value is lower than -1, consider the + # logprobs failed. sig { returns(Float) } def avg_logprob end @@ -20,6 +23,8 @@ module OpenAI def avg_logprob=(_) end + # Compression ratio of the segment. If the value is greater than 2.4, consider the + # compression failed. sig { returns(Float) } def compression_ratio end @@ -28,6 +33,7 @@ module OpenAI def compression_ratio=(_) end + # End time of the segment in seconds. sig { returns(Float) } def end_ end @@ -36,6 +42,8 @@ module OpenAI def end_=(_) end + # Probability of no speech in the segment. If the value is higher than 1.0 and the + # `avg_logprob` is below -1, consider this segment silent. sig { returns(Float) } def no_speech_prob end @@ -44,6 +52,7 @@ module OpenAI def no_speech_prob=(_) end + # Seek offset of the segment. sig { returns(Integer) } def seek end @@ -52,6 +61,7 @@ module OpenAI def seek=(_) end + # Start time of the segment in seconds. sig { returns(Float) } def start end @@ -60,6 +70,7 @@ module OpenAI def start=(_) end + # Temperature parameter used for generating the segment. sig { returns(Float) } def temperature end @@ -68,6 +79,7 @@ module OpenAI def temperature=(_) end + # Text content of the segment. sig { returns(String) } def text end @@ -76,6 +88,7 @@ module OpenAI def text=(_) end + # Array of token IDs for the text content. sig { returns(T::Array[Integer]) } def tokens end diff --git a/rbi/lib/openai/models/audio/transcription_verbose.rbi b/rbi/lib/openai/models/audio/transcription_verbose.rbi index 7dbf05e8..55eb1b23 100644 --- a/rbi/lib/openai/models/audio/transcription_verbose.rbi +++ b/rbi/lib/openai/models/audio/transcription_verbose.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Audio class TranscriptionVerbose < OpenAI::BaseModel + # The duration of the input audio. sig { returns(Float) } def duration end @@ -12,6 +13,7 @@ module OpenAI def duration=(_) end + # The language of the input audio. sig { returns(String) } def language end @@ -20,6 +22,7 @@ module OpenAI def language=(_) end + # The transcribed text. sig { returns(String) } def text end @@ -28,6 +31,7 @@ module OpenAI def text=(_) end + # Segments of the transcribed text and their corresponding details. sig { returns(T.nilable(T::Array[OpenAI::Models::Audio::TranscriptionSegment])) } def segments end @@ -39,6 +43,7 @@ module OpenAI def segments=(_) end + # Extracted words and their corresponding timestamps. sig { returns(T.nilable(T::Array[OpenAI::Models::Audio::TranscriptionWord])) } def words end @@ -50,6 +55,8 @@ module OpenAI def words=(_) end + # Represents a verbose json transcription response returned by model, based on the + # provided input. sig do params( duration: Float, diff --git a/rbi/lib/openai/models/audio/transcription_word.rbi b/rbi/lib/openai/models/audio/transcription_word.rbi index 8a81058f..a3be4b46 100644 --- a/rbi/lib/openai/models/audio/transcription_word.rbi +++ b/rbi/lib/openai/models/audio/transcription_word.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Audio class TranscriptionWord < OpenAI::BaseModel + # End time of the word in seconds. sig { returns(Float) } def end_ end @@ -12,6 +13,7 @@ module OpenAI def end_=(_) end + # Start time of the word in seconds. sig { returns(Float) } def start end @@ -20,6 +22,7 @@ module OpenAI def start=(_) end + # The text content of the word. sig { returns(String) } def word end diff --git a/rbi/lib/openai/models/audio/translation_create_params.rbi b/rbi/lib/openai/models/audio/translation_create_params.rbi index c8175a8d..fb5d4a71 100644 --- a/rbi/lib/openai/models/audio/translation_create_params.rbi +++ b/rbi/lib/openai/models/audio/translation_create_params.rbi @@ -7,6 +7,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The audio file object (not file name) translate, in one of these formats: flac, + # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. sig { returns(T.any(IO, StringIO)) } def file end @@ -15,6 +17,8 @@ module OpenAI def file=(_) end + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. sig { returns(T.any(String, Symbol)) } def model end @@ -23,6 +27,10 @@ module OpenAI def model=(_) end + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should be in English. sig { returns(T.nilable(String)) } def prompt end @@ -31,6 +39,8 @@ module OpenAI def prompt=(_) end + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. sig { returns(T.nilable(Symbol)) } def response_format end @@ -39,6 +49,11 @@ module OpenAI def response_format=(_) end + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. sig { returns(T.nilable(Float)) } def temperature end @@ -77,14 +92,12 @@ module OpenAI def to_hash end + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. class Model < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end end end diff --git a/rbi/lib/openai/models/audio/translation_create_response.rbi b/rbi/lib/openai/models/audio/translation_create_response.rbi index 9a25186f..79ac8c66 100644 --- a/rbi/lib/openai/models/audio/translation_create_response.rbi +++ b/rbi/lib/openai/models/audio/translation_create_response.rbi @@ -6,16 +6,7 @@ module OpenAI class TranslationCreateResponse < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[NilClass, OpenAI::Models::Audio::Translation], [NilClass, OpenAI::Models::Audio::TranslationVerbose]] - ) - end - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(OpenAI::Models::Audio::Translation, OpenAI::Models::Audio::TranslationVerbose)} } end end end diff --git a/rbi/lib/openai/models/audio/translation_verbose.rbi b/rbi/lib/openai/models/audio/translation_verbose.rbi index 566f2db2..bceb7944 100644 --- a/rbi/lib/openai/models/audio/translation_verbose.rbi +++ b/rbi/lib/openai/models/audio/translation_verbose.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Audio class TranslationVerbose < OpenAI::BaseModel + # The duration of the input audio. sig { returns(Float) } def duration end @@ -12,6 +13,7 @@ module OpenAI def duration=(_) end + # The language of the output translation (always `english`). sig { returns(String) } def language end @@ -20,6 +22,7 @@ module OpenAI def language=(_) end + # The translated text. sig { returns(String) } def text end @@ -28,6 +31,7 @@ module OpenAI def text=(_) end + # Segments of the translated text and their corresponding details. sig { returns(T.nilable(T::Array[OpenAI::Models::Audio::TranscriptionSegment])) } def segments end diff --git a/rbi/lib/openai/models/audio_model.rbi b/rbi/lib/openai/models/audio_model.rbi index 7f4186f7..85348552 100644 --- a/rbi/lib/openai/models/audio_model.rbi +++ b/rbi/lib/openai/models/audio_model.rbi @@ -5,13 +5,9 @@ module OpenAI class AudioModel < OpenAI::Enum abstract! - WHISPER_1 = :"whisper-1" + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + WHISPER_1 = :"whisper-1" end end end diff --git a/rbi/lib/openai/models/audio_response_format.rbi b/rbi/lib/openai/models/audio_response_format.rbi index 087af985..fb54aad0 100644 --- a/rbi/lib/openai/models/audio_response_format.rbi +++ b/rbi/lib/openai/models/audio_response_format.rbi @@ -2,20 +2,18 @@ module OpenAI module Models + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. class AudioResponseFormat < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + JSON = :json TEXT = :text SRT = :srt VERBOSE_JSON = :verbose_json VTT = :vtt - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi b/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi index 5d106690..b1d97ad3 100644 --- a/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi +++ b/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class AutoFileChunkingStrategyParam < OpenAI::BaseModel + # Always `auto`. sig { returns(Symbol) } def type end @@ -11,6 +12,8 @@ module OpenAI def type=(_) end + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :auto) end diff --git a/rbi/lib/openai/models/batch.rbi b/rbi/lib/openai/models/batch.rbi index cec4b9fb..699d0782 100644 --- a/rbi/lib/openai/models/batch.rbi +++ b/rbi/lib/openai/models/batch.rbi @@ -11,6 +11,7 @@ module OpenAI def id=(_) end + # The time frame within which the batch should be processed. sig { returns(String) } def completion_window end @@ -19,6 +20,7 @@ module OpenAI def completion_window=(_) end + # The Unix timestamp (in seconds) for when the batch was created. sig { returns(Integer) } def created_at end @@ -27,6 +29,7 @@ module OpenAI def created_at=(_) end + # The OpenAI API endpoint used by the batch. sig { returns(String) } def endpoint end @@ -35,6 +38,7 @@ module OpenAI def endpoint=(_) end + # The ID of the input file for the batch. sig { returns(String) } def input_file_id end @@ -43,6 +47,7 @@ module OpenAI def input_file_id=(_) end + # The object type, which is always `batch`. sig { returns(Symbol) } def object end @@ -51,6 +56,7 @@ module OpenAI def object=(_) end + # The current status of the batch. sig { returns(Symbol) } def status end @@ -59,6 +65,7 @@ module OpenAI def status=(_) end + # The Unix timestamp (in seconds) for when the batch was cancelled. sig { returns(T.nilable(Integer)) } def cancelled_at end @@ -67,6 +74,7 @@ module OpenAI def cancelled_at=(_) end + # The Unix timestamp (in seconds) for when the batch started cancelling. sig { returns(T.nilable(Integer)) } def cancelling_at end @@ -75,6 +83,7 @@ module OpenAI def cancelling_at=(_) end + # The Unix timestamp (in seconds) for when the batch was completed. sig { returns(T.nilable(Integer)) } def completed_at end @@ -83,6 +92,7 @@ module OpenAI def completed_at=(_) end + # The ID of the file containing the outputs of requests with errors. sig { returns(T.nilable(String)) } def error_file_id end @@ -99,6 +109,7 @@ module OpenAI def errors=(_) end + # The Unix timestamp (in seconds) for when the batch expired. sig { returns(T.nilable(Integer)) } def expired_at end @@ -107,6 +118,7 @@ module OpenAI def expired_at=(_) end + # The Unix timestamp (in seconds) for when the batch will expire. sig { returns(T.nilable(Integer)) } def expires_at end @@ -115,6 +127,7 @@ module OpenAI def expires_at=(_) end + # The Unix timestamp (in seconds) for when the batch failed. sig { returns(T.nilable(Integer)) } def failed_at end @@ -123,6 +136,7 @@ module OpenAI def failed_at=(_) end + # The Unix timestamp (in seconds) for when the batch started finalizing. sig { returns(T.nilable(Integer)) } def finalizing_at end @@ -131,6 +145,7 @@ module OpenAI def finalizing_at=(_) end + # The Unix timestamp (in seconds) for when the batch started processing. sig { returns(T.nilable(Integer)) } def in_progress_at end @@ -139,6 +154,12 @@ module OpenAI def in_progress_at=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -147,6 +168,7 @@ module OpenAI def metadata=(_) end + # The ID of the file containing the outputs of successfully executed requests. sig { returns(T.nilable(String)) } def output_file_id end @@ -155,6 +177,7 @@ module OpenAI def output_file_id=(_) end + # The request counts for different statuses within the batch. sig { returns(T.nilable(OpenAI::Models::BatchRequestCounts)) } def request_counts end @@ -242,9 +265,12 @@ module OpenAI def to_hash end + # The current status of the batch. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + VALIDATING = :validating FAILED = :failed IN_PROGRESS = :in_progress @@ -253,12 +279,6 @@ module OpenAI EXPIRED = :expired CANCELLING = :cancelling CANCELLED = :cancelled - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class Errors < OpenAI::BaseModel @@ -270,6 +290,7 @@ module OpenAI def data=(_) end + # The object type, which is always `list`. sig { returns(T.nilable(String)) } def object end diff --git a/rbi/lib/openai/models/batch_create_params.rbi b/rbi/lib/openai/models/batch_create_params.rbi index ff5efeab..17c682c5 100644 --- a/rbi/lib/openai/models/batch_create_params.rbi +++ b/rbi/lib/openai/models/batch_create_params.rbi @@ -6,6 +6,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The time frame within which the batch should be processed. Currently only `24h` + # is supported. sig { returns(Symbol) } def completion_window end @@ -14,6 +16,10 @@ module OpenAI def completion_window=(_) end + # The endpoint to be used for all requests in the batch. Currently + # `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. + # Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 + # embedding inputs across all requests in the batch. sig { returns(Symbol) } def endpoint end @@ -22,6 +28,15 @@ module OpenAI def endpoint=(_) end + # The ID of an uploaded file that contains requests for the new batch. + # + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. + # + # Your input file must be formatted as a + # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), + # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 + # requests, and can be up to 200 MB in size. sig { returns(String) } def input_file_id end @@ -30,6 +45,12 @@ module OpenAI def input_file_id=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -66,30 +87,28 @@ module OpenAI def to_hash end + # The time frame within which the batch should be processed. Currently only `24h` + # is supported. class CompletionWindow < OpenAI::Enum abstract! - NUMBER_24H = :"24h" + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + NUMBER_24H = :"24h" end + # The endpoint to be used for all requests in the batch. Currently + # `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. + # Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 + # embedding inputs across all requests in the batch. class Endpoint < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + V1_CHAT_COMPLETIONS = :"/v1/chat/completions" V1_EMBEDDINGS = :"/v1/embeddings" V1_COMPLETIONS = :"/v1/completions" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/batch_error.rbi b/rbi/lib/openai/models/batch_error.rbi index b0742afb..ded1e3bb 100644 --- a/rbi/lib/openai/models/batch_error.rbi +++ b/rbi/lib/openai/models/batch_error.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class BatchError < OpenAI::BaseModel + # An error code identifying the error type. sig { returns(T.nilable(String)) } def code end @@ -11,6 +12,7 @@ module OpenAI def code=(_) end + # The line number of the input file where the error occurred, if applicable. sig { returns(T.nilable(Integer)) } def line end @@ -19,6 +21,7 @@ module OpenAI def line=(_) end + # A human-readable message providing more details about the error. sig { returns(T.nilable(String)) } def message end @@ -27,6 +30,7 @@ module OpenAI def message=(_) end + # The name of the parameter that caused the error, if applicable. sig { returns(T.nilable(String)) } def param end diff --git a/rbi/lib/openai/models/batch_list_params.rbi b/rbi/lib/openai/models/batch_list_params.rbi index 5d04cdfd..8a8368b8 100644 --- a/rbi/lib/openai/models/batch_list_params.rbi +++ b/rbi/lib/openai/models/batch_list_params.rbi @@ -6,6 +6,10 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -14,6 +18,8 @@ module OpenAI def after=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end diff --git a/rbi/lib/openai/models/batch_request_counts.rbi b/rbi/lib/openai/models/batch_request_counts.rbi index 59b86c64..df746c81 100644 --- a/rbi/lib/openai/models/batch_request_counts.rbi +++ b/rbi/lib/openai/models/batch_request_counts.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class BatchRequestCounts < OpenAI::BaseModel + # Number of requests that have been completed successfully. sig { returns(Integer) } def completed end @@ -11,6 +12,7 @@ module OpenAI def completed=(_) end + # Number of requests that have failed. sig { returns(Integer) } def failed end @@ -19,6 +21,7 @@ module OpenAI def failed=(_) end + # Total number of requests in the batch. sig { returns(Integer) } def total end @@ -27,6 +30,7 @@ module OpenAI def total=(_) end + # The request counts for different statuses within the batch. sig { params(completed: Integer, failed: Integer, total: Integer).returns(T.attached_class) } def self.new(completed:, failed:, total:) end diff --git a/rbi/lib/openai/models/beta/assistant.rbi b/rbi/lib/openai/models/beta/assistant.rbi index c573af22..c6f6f83f 100644 --- a/rbi/lib/openai/models/beta/assistant.rbi +++ b/rbi/lib/openai/models/beta/assistant.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Beta class Assistant < OpenAI::BaseModel + # The identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the assistant was created. sig { returns(Integer) } def created_at end @@ -20,6 +22,7 @@ module OpenAI def created_at=(_) end + # The description of the assistant. The maximum length is 512 characters. sig { returns(T.nilable(String)) } def description end @@ -28,6 +31,8 @@ module OpenAI def description=(_) end + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. sig { returns(T.nilable(String)) } def instructions end @@ -36,6 +41,12 @@ module OpenAI def instructions=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -44,6 +55,11 @@ module OpenAI def metadata=(_) end + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(String) } def model end @@ -52,6 +68,7 @@ module OpenAI def model=(_) end + # The name of the assistant. The maximum length is 256 characters. sig { returns(T.nilable(String)) } def name end @@ -60,6 +77,7 @@ module OpenAI def name=(_) end + # The object type, which is always `assistant`. sig { returns(Symbol) } def object end @@ -68,6 +86,9 @@ module OpenAI def object=(_) end + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. sig do returns( T::Array[ @@ -105,6 +126,26 @@ module OpenAI def tools=(_) end + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -145,6 +186,9 @@ module OpenAI def response_format=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } def temperature end @@ -153,6 +197,10 @@ module OpenAI def temperature=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig { returns(T.nilable(OpenAI::Models::Beta::Assistant::ToolResources)) } def tool_resources end @@ -164,6 +212,11 @@ module OpenAI def tool_resources=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -172,6 +225,7 @@ module OpenAI def top_p=(_) end + # Represents an `assistant` that can call the model and use tools. sig do params( id: String, @@ -279,6 +333,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig do params( code_interpreter: OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter, @@ -302,6 +360,9 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter`` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -320,6 +381,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end diff --git a/rbi/lib/openai/models/beta/assistant_create_params.rbi b/rbi/lib/openai/models/beta/assistant_create_params.rbi index 9287af82..44da54e2 100644 --- a/rbi/lib/openai/models/beta/assistant_create_params.rbi +++ b/rbi/lib/openai/models/beta/assistant_create_params.rbi @@ -7,6 +7,11 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(T.any(String, Symbol)) } def model end @@ -15,6 +20,7 @@ module OpenAI def model=(_) end + # The description of the assistant. The maximum length is 512 characters. sig { returns(T.nilable(String)) } def description end @@ -23,6 +29,8 @@ module OpenAI def description=(_) end + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. sig { returns(T.nilable(String)) } def instructions end @@ -31,6 +39,12 @@ module OpenAI def instructions=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -39,6 +53,7 @@ module OpenAI def metadata=(_) end + # The name of the assistant. The maximum length is 256 characters. sig { returns(T.nilable(String)) } def name end @@ -47,6 +62,12 @@ module OpenAI def name=(_) end + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(Symbol)) } def reasoning_effort end @@ -55,6 +76,26 @@ module OpenAI def reasoning_effort=(_) end + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -95,6 +136,9 @@ module OpenAI def response_format=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } def temperature end @@ -103,6 +147,10 @@ module OpenAI def temperature=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig { returns(T.nilable(OpenAI::Models::Beta::AssistantCreateParams::ToolResources)) } def tool_resources end @@ -114,6 +162,9 @@ module OpenAI def tool_resources=(_) end + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. sig do returns( T.nilable( @@ -153,6 +204,11 @@ module OpenAI def tools=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -242,14 +298,15 @@ module OpenAI def to_hash end + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. class Model < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end class ToolResources < OpenAI::BaseModel @@ -275,6 +332,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig do params( code_interpreter: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter, @@ -298,6 +359,9 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -316,6 +380,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end @@ -324,6 +392,10 @@ module OpenAI def vector_store_ids=(_) end + # A helper to create a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this assistant. There can be a maximum of 1 + # vector store attached to the assistant. sig do returns( T.nilable(T::Array[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore]) @@ -362,6 +434,8 @@ module OpenAI end class VectorStore < OpenAI::BaseModel + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. sig do returns( T.nilable( @@ -392,6 +466,9 @@ module OpenAI def chunking_strategy=(_) end + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -400,6 +477,12 @@ module OpenAI def file_ids=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -438,10 +521,22 @@ module OpenAI def to_hash end + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. class ChunkingStrategy < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + ) + } + end + class Auto < OpenAI::BaseModel + # Always `auto`. sig { returns(Symbol) } def type end @@ -450,6 +545,8 @@ module OpenAI def type=(_) end + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :auto) end @@ -479,6 +576,7 @@ module OpenAI def static=(_) end + # Always `static`. sig { returns(Symbol) } def type end @@ -510,6 +608,9 @@ module OpenAI end class Static < OpenAI::BaseModel + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. sig { returns(Integer) } def chunk_overlap_tokens end @@ -518,6 +619,8 @@ module OpenAI def chunk_overlap_tokens=(_) end + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. sig { returns(Integer) } def max_chunk_size_tokens end @@ -540,17 +643,6 @@ module OpenAI end end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto], [Symbol, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static]] - ) - end - private def variants - end - end end end end diff --git a/rbi/lib/openai/models/beta/assistant_list_params.rbi b/rbi/lib/openai/models/beta/assistant_list_params.rbi index 8653e3c0..bb11fb1b 100644 --- a/rbi/lib/openai/models/beta/assistant_list_params.rbi +++ b/rbi/lib/openai/models/beta/assistant_list_params.rbi @@ -7,6 +7,10 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -15,6 +19,10 @@ module OpenAI def after=(_) end + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } def before end @@ -23,6 +31,8 @@ module OpenAI def before=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -31,6 +41,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -67,17 +79,15 @@ module OpenAI def to_hash end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ASC = :asc DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/assistant_response_format_option.rbi b/rbi/lib/openai/models/beta/assistant_response_format_option.rbi index 1e789450..ae4f724e 100644 --- a/rbi/lib/openai/models/beta/assistant_response_format_option.rbi +++ b/rbi/lib/openai/models/beta/assistant_response_format_option.rbi @@ -3,18 +3,38 @@ module OpenAI module Models module Beta + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. class AssistantResponseFormatOption < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[NilClass, Symbol], [NilClass, OpenAI::Models::ResponseFormatText], [NilClass, OpenAI::Models::ResponseFormatJSONObject], [NilClass, OpenAI::Models::ResponseFormatJSONSchema]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + Symbol, + OpenAI::Models::ResponseFormatText, + OpenAI::Models::ResponseFormatJSONObject, + OpenAI::Models::ResponseFormatJSONSchema + ) + } end end end diff --git a/rbi/lib/openai/models/beta/assistant_stream_event.rbi b/rbi/lib/openai/models/beta/assistant_stream_event.rbi index 7ac9987b..e84d0007 100644 --- a/rbi/lib/openai/models/beta/assistant_stream_event.rbi +++ b/rbi/lib/openai/models/beta/assistant_stream_event.rbi @@ -3,10 +3,63 @@ module OpenAI module Models module Beta + # Represents an event emitted when streaming a Run. + # + # Each event in a server-sent events stream has an `event` and `data` property: + # + # ``` + # event: thread.created + # data: {"id": "thread_123", "object": "thread", ...} + # ``` + # + # We emit events whenever a new object is created, transitions to a new state, or + # is being streamed in parts (deltas). For example, we emit `thread.run.created` + # when a new run is created, `thread.run.completed` when a run completes, and so + # on. When an Assistant chooses to create a message during a run, we emit a + # `thread.message.created event`, a `thread.message.in_progress` event, many + # `thread.message.delta` events, and finally a `thread.message.completed` event. + # + # We may add additional events over time, so we recommend handling unknown events + # gracefully in your code. See the + # [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview) + # to learn how to integrate the Assistants API with streaming. class AssistantStreamEvent < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted, + OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete, + OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent + ) + } + end + class ThreadCreated < OpenAI::BaseModel + # Represents a thread that contains + # [messages](https://platform.openai.com/docs/api-reference/messages). sig { returns(OpenAI::Models::Beta::Thread) } def data end @@ -23,6 +76,7 @@ module OpenAI def event=(_) end + # Whether to enable input audio transcription. sig { returns(T.nilable(T::Boolean)) } def enabled end @@ -31,6 +85,9 @@ module OpenAI def enabled=(_) end + # Occurs when a new + # [thread](https://platform.openai.com/docs/api-reference/threads/object) is + # created. sig { params(data: OpenAI::Models::Beta::Thread, enabled: T::Boolean, event: Symbol).returns(T.attached_class) } def self.new(data:, enabled: nil, event: :"thread.created") end @@ -41,6 +98,8 @@ module OpenAI end class ThreadRunCreated < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -57,6 +116,8 @@ module OpenAI def event=(_) end + # Occurs when a new + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.created") end @@ -67,6 +128,8 @@ module OpenAI end class ThreadRunQueued < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -83,6 +146,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `queued` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.queued") end @@ -93,6 +158,8 @@ module OpenAI end class ThreadRunInProgress < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -109,6 +176,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to an `in_progress` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.in_progress") end @@ -119,6 +188,8 @@ module OpenAI end class ThreadRunRequiresAction < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -135,6 +206,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `requires_action` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.requires_action") end @@ -145,6 +218,8 @@ module OpenAI end class ThreadRunCompleted < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -161,6 +236,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is completed. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.completed") end @@ -171,6 +248,8 @@ module OpenAI end class ThreadRunIncomplete < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -187,6 +266,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # ends with status `incomplete`. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.incomplete") end @@ -197,6 +278,8 @@ module OpenAI end class ThreadRunFailed < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -213,6 +296,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # fails. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.failed") end @@ -223,6 +308,8 @@ module OpenAI end class ThreadRunCancelling < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -239,6 +326,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `cancelling` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.cancelling") end @@ -249,6 +338,8 @@ module OpenAI end class ThreadRunCancelled < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -265,6 +356,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is cancelled. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.cancelled") end @@ -275,6 +368,8 @@ module OpenAI end class ThreadRunExpired < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -291,6 +386,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # expires. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.expired") end @@ -301,6 +398,7 @@ module OpenAI end class ThreadRunStepCreated < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -320,6 +418,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.created") end @@ -330,6 +431,7 @@ module OpenAI end class ThreadRunStepInProgress < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -349,6 +451,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # moves to an `in_progress` state. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.in_progress") end @@ -359,6 +464,8 @@ module OpenAI end class ThreadRunStepDelta < OpenAI::BaseModel + # Represents a run step delta i.e. any changed fields on a run step during + # streaming. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent) } def data end @@ -378,6 +485,9 @@ module OpenAI def event=(_) end + # Occurs when parts of a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # are being streamed. sig do params(data: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent, event: Symbol) .returns(T.attached_class) @@ -391,6 +501,7 @@ module OpenAI end class ThreadRunStepCompleted < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -410,6 +521,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is completed. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.completed") end @@ -420,6 +534,7 @@ module OpenAI end class ThreadRunStepFailed < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -439,6 +554,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # fails. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.failed") end @@ -449,6 +567,7 @@ module OpenAI end class ThreadRunStepCancelled < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -468,6 +587,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is cancelled. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.cancelled") end @@ -478,6 +600,7 @@ module OpenAI end class ThreadRunStepExpired < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -497,6 +620,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # expires. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.expired") end @@ -507,6 +633,8 @@ module OpenAI end class ThreadMessageCreated < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -523,6 +651,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.created") end @@ -533,6 +664,8 @@ module OpenAI end class ThreadMessageInProgress < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -549,6 +682,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) moves + # to an `in_progress` state. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.in_progress") end @@ -559,6 +695,8 @@ module OpenAI end class ThreadMessageDelta < OpenAI::BaseModel + # Represents a message delta i.e. any changed fields on a message during + # streaming. sig { returns(OpenAI::Models::Beta::Threads::MessageDeltaEvent) } def data end @@ -578,6 +716,9 @@ module OpenAI def event=(_) end + # Occurs when parts of a + # [Message](https://platform.openai.com/docs/api-reference/messages/object) are + # being streamed. sig { params(data: OpenAI::Models::Beta::Threads::MessageDeltaEvent, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.delta") end @@ -588,6 +729,8 @@ module OpenAI end class ThreadMessageCompleted < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -604,6 +747,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # completed. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.completed") end @@ -614,6 +760,8 @@ module OpenAI end class ThreadMessageIncomplete < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -630,6 +778,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) ends + # before it is completed. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.incomplete") end @@ -656,6 +807,9 @@ module OpenAI def event=(_) end + # Occurs when an + # [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. + # This can happen due to an internal server error or a timeout. sig { params(data: OpenAI::Models::ErrorObject, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :error) end @@ -664,17 +818,6 @@ module OpenAI def to_hash end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent]] - ) - end - private def variants - end - end end end end diff --git a/rbi/lib/openai/models/beta/assistant_tool.rbi b/rbi/lib/openai/models/beta/assistant_tool.rbi index 18612436..af9e9f94 100644 --- a/rbi/lib/openai/models/beta/assistant_tool.rbi +++ b/rbi/lib/openai/models/beta/assistant_tool.rbi @@ -6,15 +6,14 @@ module OpenAI class AssistantTool < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::CodeInterpreterTool], [Symbol, OpenAI::Models::Beta::FileSearchTool], [Symbol, OpenAI::Models::Beta::FunctionTool]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::CodeInterpreterTool, + OpenAI::Models::Beta::FileSearchTool, + OpenAI::Models::Beta::FunctionTool + ) + } end end end diff --git a/rbi/lib/openai/models/beta/assistant_tool_choice.rbi b/rbi/lib/openai/models/beta/assistant_tool_choice.rbi index 7d82ebcf..84562850 100644 --- a/rbi/lib/openai/models/beta/assistant_tool_choice.rbi +++ b/rbi/lib/openai/models/beta/assistant_tool_choice.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Beta class AssistantToolChoice < OpenAI::BaseModel + # The type of the tool. If type is `function`, the function name must be set sig { returns(Symbol) } def type end @@ -23,6 +24,8 @@ module OpenAI def function=(_) end + # Specifies a tool the model should use. Use to force the model to call a specific + # tool. sig do params(type: Symbol, function: OpenAI::Models::Beta::AssistantToolChoiceFunction).returns(T.attached_class) end @@ -33,18 +36,15 @@ module OpenAI def to_hash end + # The type of the tool. If type is `function`, the function name must be set class Type < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + FUNCTION = :function CODE_INTERPRETER = :code_interpreter FILE_SEARCH = :file_search - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/assistant_tool_choice_function.rbi b/rbi/lib/openai/models/beta/assistant_tool_choice_function.rbi index be6c8d7f..6e594ce1 100644 --- a/rbi/lib/openai/models/beta/assistant_tool_choice_function.rbi +++ b/rbi/lib/openai/models/beta/assistant_tool_choice_function.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Beta class AssistantToolChoiceFunction < OpenAI::BaseModel + # The name of the function to call. sig { returns(String) } def name end diff --git a/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi b/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi index 6ae486e1..552ca737 100644 --- a/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi +++ b/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi @@ -3,27 +3,30 @@ module OpenAI module Models module Beta + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. class AssistantToolChoiceOption < OpenAI::Union abstract! + Variants = type_template(:out) { {fixed: T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice)} } + + # `none` means the model will not call any tools and instead generates a message. + # `auto` means the model can pick between generating a message or calling one or + # more tools. `required` means the model must call one or more tools before + # responding to the user. class Auto < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + NONE = :none AUTO = :auto REQUIRED = :required - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, OpenAI::Models::Beta::AssistantToolChoice]]) } - private def variants - end end end end diff --git a/rbi/lib/openai/models/beta/assistant_update_params.rbi b/rbi/lib/openai/models/beta/assistant_update_params.rbi index 0c75a7fb..e05cff62 100644 --- a/rbi/lib/openai/models/beta/assistant_update_params.rbi +++ b/rbi/lib/openai/models/beta/assistant_update_params.rbi @@ -7,6 +7,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The description of the assistant. The maximum length is 512 characters. sig { returns(T.nilable(String)) } def description end @@ -15,6 +16,8 @@ module OpenAI def description=(_) end + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. sig { returns(T.nilable(String)) } def instructions end @@ -23,6 +26,12 @@ module OpenAI def instructions=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -31,6 +40,11 @@ module OpenAI def metadata=(_) end + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(T.nilable(T.any(String, Symbol))) } def model end @@ -39,6 +53,7 @@ module OpenAI def model=(_) end + # The name of the assistant. The maximum length is 256 characters. sig { returns(T.nilable(String)) } def name end @@ -47,6 +62,12 @@ module OpenAI def name=(_) end + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(Symbol)) } def reasoning_effort end @@ -55,6 +76,26 @@ module OpenAI def reasoning_effort=(_) end + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -95,6 +136,9 @@ module OpenAI def response_format=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } def temperature end @@ -103,6 +147,10 @@ module OpenAI def temperature=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig { returns(T.nilable(OpenAI::Models::Beta::AssistantUpdateParams::ToolResources)) } def tool_resources end @@ -114,6 +162,9 @@ module OpenAI def tool_resources=(_) end + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. sig do returns( T.nilable( @@ -153,6 +204,11 @@ module OpenAI def tools=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -242,55 +298,15 @@ module OpenAI def to_hash end + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. class Model < OpenAI::Union abstract! - class AssistantSupportedModels < OpenAI::Enum - abstract! - - O3_MINI = :"o3-mini" - O3_MINI_2025_01_31 = :"o3-mini-2025-01-31" - O1 = :o1 - O1_2024_12_17 = :"o1-2024-12-17" - GPT_4O = :"gpt-4o" - GPT_4O_2024_11_20 = :"gpt-4o-2024-11-20" - GPT_4O_2024_08_06 = :"gpt-4o-2024-08-06" - GPT_4O_2024_05_13 = :"gpt-4o-2024-05-13" - GPT_4O_MINI = :"gpt-4o-mini" - GPT_4O_MINI_2024_07_18 = :"gpt-4o-mini-2024-07-18" - GPT_4_5_PREVIEW = :"gpt-4.5-preview" - GPT_4_5_PREVIEW_2025_02_27 = :"gpt-4.5-preview-2025-02-27" - GPT_4_TURBO = :"gpt-4-turbo" - GPT_4_TURBO_2024_04_09 = :"gpt-4-turbo-2024-04-09" - GPT_4_0125_PREVIEW = :"gpt-4-0125-preview" - GPT_4_TURBO_PREVIEW = :"gpt-4-turbo-preview" - GPT_4_1106_PREVIEW = :"gpt-4-1106-preview" - GPT_4_VISION_PREVIEW = :"gpt-4-vision-preview" - GPT_4 = :"gpt-4" - GPT_4_0314 = :"gpt-4-0314" - GPT_4_0613 = :"gpt-4-0613" - GPT_4_32K = :"gpt-4-32k" - GPT_4_32K_0314 = :"gpt-4-32k-0314" - GPT_4_32K_0613 = :"gpt-4-32k-0613" - GPT_3_5_TURBO = :"gpt-3.5-turbo" - GPT_3_5_TURBO_16K = :"gpt-3.5-turbo-16k" - GPT_3_5_TURBO_0613 = :"gpt-3.5-turbo-0613" - GPT_3_5_TURBO_1106 = :"gpt-3.5-turbo-1106" - GPT_3_5_TURBO_0125 = :"gpt-3.5-turbo-0125" - GPT_3_5_TURBO_16K_0613 = :"gpt-3.5-turbo-16k-0613" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end class ToolResources < OpenAI::BaseModel @@ -316,6 +332,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig do params( code_interpreter: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter, @@ -339,6 +359,10 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # Overrides the list of + # [file](https://platform.openai.com/docs/api-reference/files) IDs made available + # to the `code_interpreter` tool. There can be a maximum of 20 files associated + # with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -357,6 +381,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # Overrides the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end diff --git a/rbi/lib/openai/models/beta/code_interpreter_tool.rbi b/rbi/lib/openai/models/beta/code_interpreter_tool.rbi index a560387e..ba0e9924 100644 --- a/rbi/lib/openai/models/beta/code_interpreter_tool.rbi +++ b/rbi/lib/openai/models/beta/code_interpreter_tool.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Beta class CodeInterpreterTool < OpenAI::BaseModel + # The type of tool being defined: `code_interpreter` sig { returns(Symbol) } def type end diff --git a/rbi/lib/openai/models/beta/file_search_tool.rbi b/rbi/lib/openai/models/beta/file_search_tool.rbi index a7d4bbaf..ce4b782a 100644 --- a/rbi/lib/openai/models/beta/file_search_tool.rbi +++ b/rbi/lib/openai/models/beta/file_search_tool.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Beta class FileSearchTool < OpenAI::BaseModel + # The type of tool being defined: `file_search` sig { returns(Symbol) } def type end @@ -12,6 +13,7 @@ module OpenAI def type=(_) end + # Overrides for the file search tool. sig { returns(T.nilable(OpenAI::Models::Beta::FileSearchTool::FileSearch)) } def file_search end @@ -35,6 +37,14 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The maximum number of results the file search tool should output. The default is + # 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between + # 1 and 50 inclusive. + # + # Note that the file search tool may output fewer than `max_num_results` results. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(Integer)) } def max_num_results end @@ -43,6 +53,12 @@ module OpenAI def max_num_results=(_) end + # The ranking options for the file search. If not specified, the file search tool + # will use the `auto` ranker and a score_threshold of 0. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions)) } def ranking_options end @@ -54,6 +70,7 @@ module OpenAI def ranking_options=(_) end + # Overrides for the file search tool. sig do params( max_num_results: Integer, @@ -74,6 +91,8 @@ module OpenAI end class RankingOptions < OpenAI::BaseModel + # The score threshold for the file search. All values must be a floating point + # number between 0 and 1. sig { returns(Float) } def score_threshold end @@ -82,6 +101,8 @@ module OpenAI def score_threshold=(_) end + # The ranker to use for the file search. If not specified will use the `auto` + # ranker. sig { returns(T.nilable(Symbol)) } def ranker end @@ -90,6 +111,12 @@ module OpenAI def ranker=(_) end + # The ranking options for the file search. If not specified, the file search tool + # will use the `auto` ranker and a score_threshold of 0. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { params(score_threshold: Float, ranker: Symbol).returns(T.attached_class) } def self.new(score_threshold:, ranker: nil) end @@ -98,17 +125,15 @@ module OpenAI def to_hash end + # The ranker to use for the file search. If not specified will use the `auto` + # ranker. class Ranker < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + AUTO = :auto DEFAULT_2024_08_21 = :default_2024_08_21 - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/function_tool.rbi b/rbi/lib/openai/models/beta/function_tool.rbi index 0c7a758f..645a4c1c 100644 --- a/rbi/lib/openai/models/beta/function_tool.rbi +++ b/rbi/lib/openai/models/beta/function_tool.rbi @@ -12,6 +12,7 @@ module OpenAI def function=(_) end + # The type of tool being defined: `function` sig { returns(Symbol) } def type end diff --git a/rbi/lib/openai/models/beta/message_stream_event.rbi b/rbi/lib/openai/models/beta/message_stream_event.rbi index 135308ce..ef592ec4 100644 --- a/rbi/lib/openai/models/beta/message_stream_event.rbi +++ b/rbi/lib/openai/models/beta/message_stream_event.rbi @@ -3,10 +3,27 @@ module OpenAI module Models module Beta + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. class MessageStreamEvent < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCreated, + OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageInProgress, + OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageDelta, + OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCompleted, + OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageIncomplete + ) + } + end + class ThreadMessageCreated < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -23,6 +40,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.created") end @@ -33,6 +53,8 @@ module OpenAI end class ThreadMessageInProgress < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -49,6 +71,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) moves + # to an `in_progress` state. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.in_progress") end @@ -59,6 +84,8 @@ module OpenAI end class ThreadMessageDelta < OpenAI::BaseModel + # Represents a message delta i.e. any changed fields on a message during + # streaming. sig { returns(OpenAI::Models::Beta::Threads::MessageDeltaEvent) } def data end @@ -78,6 +105,9 @@ module OpenAI def event=(_) end + # Occurs when parts of a + # [Message](https://platform.openai.com/docs/api-reference/messages/object) are + # being streamed. sig { params(data: OpenAI::Models::Beta::Threads::MessageDeltaEvent, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.delta") end @@ -88,6 +118,8 @@ module OpenAI end class ThreadMessageCompleted < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -104,6 +136,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # completed. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.completed") end @@ -114,6 +149,8 @@ module OpenAI end class ThreadMessageIncomplete < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -130,6 +167,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) ends + # before it is completed. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.incomplete") end @@ -138,17 +178,6 @@ module OpenAI def to_hash end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCreated], [Symbol, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageInProgress], [Symbol, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageDelta], [Symbol, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCompleted], [Symbol, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageIncomplete]] - ) - end - private def variants - end - end end end end diff --git a/rbi/lib/openai/models/beta/run_step_stream_event.rbi b/rbi/lib/openai/models/beta/run_step_stream_event.rbi index 1adb063a..40de2bea 100644 --- a/rbi/lib/openai/models/beta/run_step_stream_event.rbi +++ b/rbi/lib/openai/models/beta/run_step_stream_event.rbi @@ -3,10 +3,28 @@ module OpenAI module Models module Beta + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. class RunStepStreamEvent < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCreated, + OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepInProgress, + OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepDelta, + OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCompleted, + OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepFailed, + OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCancelled, + OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepExpired + ) + } + end + class ThreadRunStepCreated < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -26,6 +44,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.created") end @@ -36,6 +57,7 @@ module OpenAI end class ThreadRunStepInProgress < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -55,6 +77,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # moves to an `in_progress` state. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.in_progress") end @@ -65,6 +90,8 @@ module OpenAI end class ThreadRunStepDelta < OpenAI::BaseModel + # Represents a run step delta i.e. any changed fields on a run step during + # streaming. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent) } def data end @@ -84,6 +111,9 @@ module OpenAI def event=(_) end + # Occurs when parts of a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # are being streamed. sig do params(data: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent, event: Symbol) .returns(T.attached_class) @@ -97,6 +127,7 @@ module OpenAI end class ThreadRunStepCompleted < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -116,6 +147,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is completed. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.completed") end @@ -126,6 +160,7 @@ module OpenAI end class ThreadRunStepFailed < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -145,6 +180,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # fails. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.failed") end @@ -155,6 +193,7 @@ module OpenAI end class ThreadRunStepCancelled < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -174,6 +213,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is cancelled. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.cancelled") end @@ -184,6 +226,7 @@ module OpenAI end class ThreadRunStepExpired < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -203,6 +246,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # expires. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.expired") end @@ -211,17 +257,6 @@ module OpenAI def to_hash end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCreated], [Symbol, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepInProgress], [Symbol, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepDelta], [Symbol, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCompleted], [Symbol, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepFailed], [Symbol, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCancelled], [Symbol, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepExpired]] - ) - end - private def variants - end - end end end end diff --git a/rbi/lib/openai/models/beta/run_stream_event.rbi b/rbi/lib/openai/models/beta/run_stream_event.rbi index 47d3fd71..fb5ba148 100644 --- a/rbi/lib/openai/models/beta/run_stream_event.rbi +++ b/rbi/lib/openai/models/beta/run_stream_event.rbi @@ -3,10 +3,31 @@ module OpenAI module Models module Beta + # Occurs when a new + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. class RunStreamEvent < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::RunStreamEvent::ThreadRunCreated, + OpenAI::Models::Beta::RunStreamEvent::ThreadRunQueued, + OpenAI::Models::Beta::RunStreamEvent::ThreadRunInProgress, + OpenAI::Models::Beta::RunStreamEvent::ThreadRunRequiresAction, + OpenAI::Models::Beta::RunStreamEvent::ThreadRunCompleted, + OpenAI::Models::Beta::RunStreamEvent::ThreadRunIncomplete, + OpenAI::Models::Beta::RunStreamEvent::ThreadRunFailed, + OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelling, + OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelled, + OpenAI::Models::Beta::RunStreamEvent::ThreadRunExpired + ) + } + end + class ThreadRunCreated < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -23,6 +44,8 @@ module OpenAI def event=(_) end + # Occurs when a new + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.created") end @@ -33,6 +56,8 @@ module OpenAI end class ThreadRunQueued < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -49,6 +74,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `queued` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.queued") end @@ -59,6 +86,8 @@ module OpenAI end class ThreadRunInProgress < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -75,6 +104,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to an `in_progress` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.in_progress") end @@ -85,6 +116,8 @@ module OpenAI end class ThreadRunRequiresAction < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -101,6 +134,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `requires_action` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.requires_action") end @@ -111,6 +146,8 @@ module OpenAI end class ThreadRunCompleted < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -127,6 +164,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is completed. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.completed") end @@ -137,6 +176,8 @@ module OpenAI end class ThreadRunIncomplete < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -153,6 +194,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # ends with status `incomplete`. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.incomplete") end @@ -163,6 +206,8 @@ module OpenAI end class ThreadRunFailed < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -179,6 +224,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # fails. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.failed") end @@ -189,6 +236,8 @@ module OpenAI end class ThreadRunCancelling < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -205,6 +254,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `cancelling` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.cancelling") end @@ -215,6 +266,8 @@ module OpenAI end class ThreadRunCancelled < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -231,6 +284,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is cancelled. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.cancelled") end @@ -241,6 +296,8 @@ module OpenAI end class ThreadRunExpired < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -257,6 +314,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # expires. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.expired") end @@ -265,17 +324,6 @@ module OpenAI def to_hash end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCreated], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunQueued], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunInProgress], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunRequiresAction], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCompleted], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunIncomplete], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunFailed], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelling], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelled], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunExpired]] - ) - end - private def variants - end - end end end end diff --git a/rbi/lib/openai/models/beta/thread.rbi b/rbi/lib/openai/models/beta/thread.rbi index f7263e57..799f589a 100644 --- a/rbi/lib/openai/models/beta/thread.rbi +++ b/rbi/lib/openai/models/beta/thread.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Beta class Thread < OpenAI::BaseModel + # The identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the thread was created. sig { returns(Integer) } def created_at end @@ -20,6 +22,12 @@ module OpenAI def created_at=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -28,6 +36,7 @@ module OpenAI def metadata=(_) end + # The object type, which is always `thread`. sig { returns(Symbol) } def object end @@ -36,6 +45,10 @@ module OpenAI def object=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig { returns(T.nilable(OpenAI::Models::Beta::Thread::ToolResources)) } def tool_resources end @@ -47,6 +60,8 @@ module OpenAI def tool_resources=(_) end + # Represents a thread that contains + # [messages](https://platform.openai.com/docs/api-reference/messages). sig do params( id: String, @@ -98,6 +113,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig do params( code_interpreter: OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter, @@ -121,6 +140,9 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -139,6 +161,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end diff --git a/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi b/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi index d0dd117f..60679618 100644 --- a/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi +++ b/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi @@ -7,6 +7,9 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. sig { returns(String) } def assistant_id end @@ -15,6 +18,8 @@ module OpenAI def assistant_id=(_) end + # Override the default system message of the assistant. This is useful for + # modifying the behavior on a per-run basis. sig { returns(T.nilable(String)) } def instructions end @@ -23,6 +28,11 @@ module OpenAI def instructions=(_) end + # The maximum number of completion tokens that may be used over the course of the + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. sig { returns(T.nilable(Integer)) } def max_completion_tokens end @@ -31,6 +41,11 @@ module OpenAI def max_completion_tokens=(_) end + # The maximum number of prompt tokens that may be used over the course of the run. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. sig { returns(T.nilable(Integer)) } def max_prompt_tokens end @@ -39,6 +54,12 @@ module OpenAI def max_prompt_tokens=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -47,6 +68,10 @@ module OpenAI def metadata=(_) end + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. sig { returns(T.nilable(T.any(String, Symbol))) } def model end @@ -55,6 +80,9 @@ module OpenAI def model=(_) end + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. sig { returns(T.nilable(T::Boolean)) } def parallel_tool_calls end @@ -63,6 +91,26 @@ module OpenAI def parallel_tool_calls=(_) end + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -103,6 +151,9 @@ module OpenAI def response_format=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } def temperature end @@ -111,6 +162,8 @@ module OpenAI def temperature=(_) end + # Options to create a new thread. If no thread is provided when running a request, + # an empty thread will be created. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread)) } def thread end @@ -122,6 +175,13 @@ module OpenAI def thread=(_) end + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) } def tool_choice end @@ -133,6 +193,10 @@ module OpenAI def tool_choice=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources)) } def tool_resources end @@ -144,6 +208,8 @@ module OpenAI def tool_resources=(_) end + # Override the tools the assistant can use for this run. This is useful for + # modifying the behavior on a per-run basis. sig do returns( T.nilable( @@ -187,6 +253,11 @@ module OpenAI def tools=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -195,6 +266,8 @@ module OpenAI def top_p=(_) end + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy)) } def truncation_strategy end @@ -303,17 +376,19 @@ module OpenAI def to_hash end + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. class Model < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end class Thread < OpenAI::BaseModel + # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + # start the thread with. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message])) } def messages end @@ -325,6 +400,12 @@ module OpenAI def messages=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -333,6 +414,10 @@ module OpenAI def metadata=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources)) } def tool_resources end @@ -344,6 +429,8 @@ module OpenAI def tool_resources=(_) end + # Options to create a new thread. If no thread is provided when running a request, + # an empty thread will be created. sig do params( messages: T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message], @@ -369,6 +456,7 @@ module OpenAI end class Message < OpenAI::BaseModel + # The text contents of the message. sig do returns( T.any( @@ -415,6 +503,12 @@ module OpenAI def content=(_) end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. sig { returns(Symbol) } def role end @@ -423,6 +517,7 @@ module OpenAI def role=(_) end + # A list of files attached to the message, and the tools they should be added to. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment])) } def attachments end @@ -436,6 +531,12 @@ module OpenAI def attachments=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -488,9 +589,25 @@ module OpenAI def to_hash end + # The text contents of the message. class Content < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + String, + T::Array[ + T.any( + OpenAI::Models::Beta::Threads::ImageFileContentBlock, + OpenAI::Models::Beta::Threads::ImageURLContentBlock, + OpenAI::Models::Beta::Threads::TextContentBlockParam + ) + ] + ) + } + end + MessageContentPartParamArray = T.type_alias do T::Array[ T.any( @@ -500,45 +617,25 @@ module OpenAI ) ] end - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [ - NilClass, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ] - ] - ) - end - private def variants - end - end end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. class Role < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + USER = :user ASSISTANT = :assistant - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class Attachment < OpenAI::BaseModel + # The ID of the file to attach to the message. sig { returns(T.nilable(String)) } def file_id end @@ -547,6 +644,7 @@ module OpenAI def file_id=(_) end + # The tools to add this file to. sig do returns( T.nilable( @@ -618,7 +716,17 @@ module OpenAI class Tool < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::CodeInterpreterTool, + OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch + ) + } + end + class FileSearch < OpenAI::BaseModel + # The type of tool being defined: `file_search` sig { returns(Symbol) } def type end @@ -635,17 +743,6 @@ module OpenAI def to_hash end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::CodeInterpreterTool], [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch]] - ) - end - private def variants - end - end end end end @@ -673,6 +770,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig do params( code_interpreter: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter, @@ -696,6 +797,9 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -714,6 +818,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end @@ -722,6 +830,10 @@ module OpenAI def vector_store_ids=(_) end + # A helper to create a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this thread. There can be a maximum of 1 vector + # store attached to the thread. sig do returns( T.nilable( @@ -766,6 +878,8 @@ module OpenAI end class VectorStore < OpenAI::BaseModel + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. sig do returns( T.nilable( @@ -796,6 +910,9 @@ module OpenAI def chunking_strategy=(_) end + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -804,6 +921,12 @@ module OpenAI def file_ids=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -842,10 +965,22 @@ module OpenAI def to_hash end + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. class ChunkingStrategy < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + ) + } + end + class Auto < OpenAI::BaseModel + # Always `auto`. sig { returns(Symbol) } def type end @@ -854,6 +989,8 @@ module OpenAI def type=(_) end + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :auto) end @@ -883,6 +1020,7 @@ module OpenAI def static=(_) end + # Always `static`. sig { returns(Symbol) } def type end @@ -914,6 +1052,9 @@ module OpenAI end class Static < OpenAI::BaseModel + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. sig { returns(Integer) } def chunk_overlap_tokens end @@ -922,6 +1063,8 @@ module OpenAI def chunk_overlap_tokens=(_) end + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. sig { returns(Integer) } def max_chunk_size_tokens end @@ -946,17 +1089,6 @@ module OpenAI end end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto], [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static]] - ) - end - private def variants - end - end end end end @@ -986,6 +1118,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig do params( code_interpreter: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter, @@ -1009,6 +1145,9 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -1027,6 +1166,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end @@ -1048,19 +1191,22 @@ module OpenAI class Tool < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[NilClass, OpenAI::Models::Beta::CodeInterpreterTool], [NilClass, OpenAI::Models::Beta::FileSearchTool], [NilClass, OpenAI::Models::Beta::FunctionTool]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::CodeInterpreterTool, + OpenAI::Models::Beta::FileSearchTool, + OpenAI::Models::Beta::FunctionTool + ) + } end end class TruncationStrategy < OpenAI::BaseModel + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. sig { returns(Symbol) } def type end @@ -1069,6 +1215,8 @@ module OpenAI def type=(_) end + # The number of most recent messages from the thread when constructing the context + # for the run. sig { returns(T.nilable(Integer)) } def last_messages end @@ -1077,6 +1225,8 @@ module OpenAI def last_messages=(_) end + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. sig { params(type: Symbol, last_messages: T.nilable(Integer)).returns(T.attached_class) } def self.new(type:, last_messages: nil) end @@ -1085,17 +1235,17 @@ module OpenAI def to_hash end + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. class Type < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + AUTO = :auto LAST_MESSAGES = :last_messages - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/thread_create_params.rbi b/rbi/lib/openai/models/beta/thread_create_params.rbi index 627808ef..94c632cf 100644 --- a/rbi/lib/openai/models/beta/thread_create_params.rbi +++ b/rbi/lib/openai/models/beta/thread_create_params.rbi @@ -7,6 +7,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + # start the thread with. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message])) } def messages end @@ -18,6 +20,12 @@ module OpenAI def messages=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -26,6 +34,10 @@ module OpenAI def metadata=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateParams::ToolResources)) } def tool_resources end @@ -64,6 +76,7 @@ module OpenAI end class Message < OpenAI::BaseModel + # The text contents of the message. sig do returns( T.any( @@ -110,6 +123,12 @@ module OpenAI def content=(_) end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. sig { returns(Symbol) } def role end @@ -118,6 +137,7 @@ module OpenAI def role=(_) end + # A list of files attached to the message, and the tools they should be added to. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment])) } def attachments end @@ -129,6 +149,12 @@ module OpenAI def attachments=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -181,9 +207,25 @@ module OpenAI def to_hash end + # The text contents of the message. class Content < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + String, + T::Array[ + T.any( + OpenAI::Models::Beta::Threads::ImageFileContentBlock, + OpenAI::Models::Beta::Threads::ImageURLContentBlock, + OpenAI::Models::Beta::Threads::TextContentBlockParam + ) + ] + ) + } + end + MessageContentPartParamArray = T.type_alias do T::Array[ T.any( @@ -193,45 +235,25 @@ module OpenAI ) ] end - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [ - NilClass, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ] - ] - ) - end - private def variants - end - end end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. class Role < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + USER = :user ASSISTANT = :assistant - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class Attachment < OpenAI::BaseModel + # The ID of the file to attach to the message. sig { returns(T.nilable(String)) } def file_id end @@ -240,6 +262,7 @@ module OpenAI def file_id=(_) end + # The tools to add this file to. sig do returns( T.nilable( @@ -311,7 +334,17 @@ module OpenAI class Tool < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::CodeInterpreterTool, + OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch + ) + } + end + class FileSearch < OpenAI::BaseModel + # The type of tool being defined: `file_search` sig { returns(Symbol) } def type end @@ -328,17 +361,6 @@ module OpenAI def to_hash end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::CodeInterpreterTool], [Symbol, OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch]] - ) - end - private def variants - end - end end end end @@ -366,6 +388,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig do params( code_interpreter: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter, @@ -389,6 +415,9 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -407,6 +436,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end @@ -415,6 +448,10 @@ module OpenAI def vector_store_ids=(_) end + # A helper to create a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this thread. There can be a maximum of 1 vector + # store attached to the thread. sig do returns( T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore]) @@ -453,6 +490,8 @@ module OpenAI end class VectorStore < OpenAI::BaseModel + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. sig do returns( T.nilable( @@ -483,6 +522,9 @@ module OpenAI def chunking_strategy=(_) end + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -491,6 +533,12 @@ module OpenAI def file_ids=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -529,10 +577,22 @@ module OpenAI def to_hash end + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. class ChunkingStrategy < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + ) + } + end + class Auto < OpenAI::BaseModel + # Always `auto`. sig { returns(Symbol) } def type end @@ -541,6 +601,8 @@ module OpenAI def type=(_) end + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :auto) end @@ -570,6 +632,7 @@ module OpenAI def static=(_) end + # Always `static`. sig { returns(Symbol) } def type end @@ -601,6 +664,9 @@ module OpenAI end class Static < OpenAI::BaseModel + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. sig { returns(Integer) } def chunk_overlap_tokens end @@ -609,6 +675,8 @@ module OpenAI def chunk_overlap_tokens=(_) end + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. sig { returns(Integer) } def max_chunk_size_tokens end @@ -631,17 +699,6 @@ module OpenAI end end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto], [Symbol, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static]] - ) - end - private def variants - end - end end end end diff --git a/rbi/lib/openai/models/beta/thread_stream_event.rbi b/rbi/lib/openai/models/beta/thread_stream_event.rbi index df43a6d2..b7db3495 100644 --- a/rbi/lib/openai/models/beta/thread_stream_event.rbi +++ b/rbi/lib/openai/models/beta/thread_stream_event.rbi @@ -4,6 +4,8 @@ module OpenAI module Models module Beta class ThreadStreamEvent < OpenAI::BaseModel + # Represents a thread that contains + # [messages](https://platform.openai.com/docs/api-reference/messages). sig { returns(OpenAI::Models::Beta::Thread) } def data end @@ -20,6 +22,7 @@ module OpenAI def event=(_) end + # Whether to enable input audio transcription. sig { returns(T.nilable(T::Boolean)) } def enabled end @@ -28,6 +31,9 @@ module OpenAI def enabled=(_) end + # Occurs when a new + # [thread](https://platform.openai.com/docs/api-reference/threads/object) is + # created. sig { params(data: OpenAI::Models::Beta::Thread, enabled: T::Boolean, event: Symbol).returns(T.attached_class) } def self.new(data:, enabled: nil, event: :"thread.created") end diff --git a/rbi/lib/openai/models/beta/thread_update_params.rbi b/rbi/lib/openai/models/beta/thread_update_params.rbi index f95411f9..2748a8cc 100644 --- a/rbi/lib/openai/models/beta/thread_update_params.rbi +++ b/rbi/lib/openai/models/beta/thread_update_params.rbi @@ -7,6 +7,12 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -15,6 +21,10 @@ module OpenAI def metadata=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadUpdateParams::ToolResources)) } def tool_resources end @@ -73,6 +83,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig do params( code_interpreter: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter, @@ -96,6 +110,9 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -114,6 +131,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end diff --git a/rbi/lib/openai/models/beta/threads/annotation.rbi b/rbi/lib/openai/models/beta/threads/annotation.rbi index 6510f8df..dc7434e2 100644 --- a/rbi/lib/openai/models/beta/threads/annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/annotation.rbi @@ -4,18 +4,19 @@ module OpenAI module Models module Beta module Threads + # A citation within the message that points to a specific quote from a specific + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. class Annotation < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::FileCitationAnnotation], [Symbol, OpenAI::Models::Beta::Threads::FilePathAnnotation]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::Threads::FileCitationAnnotation, + OpenAI::Models::Beta::Threads::FilePathAnnotation + ) + } end end end diff --git a/rbi/lib/openai/models/beta/threads/annotation_delta.rbi b/rbi/lib/openai/models/beta/threads/annotation_delta.rbi index 81699c4b..32598149 100644 --- a/rbi/lib/openai/models/beta/threads/annotation_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/annotation_delta.rbi @@ -4,18 +4,19 @@ module OpenAI module Models module Beta module Threads + # A citation within the message that points to a specific quote from a specific + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. class AnnotationDelta < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation], [Symbol, OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation, + OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation + ) + } end end end diff --git a/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi index 631db0a5..75c0a985 100644 --- a/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi @@ -32,6 +32,7 @@ module OpenAI def start_index=(_) end + # The text in the message content that needs to be replaced. sig { returns(String) } def text end @@ -40,6 +41,7 @@ module OpenAI def text=(_) end + # Always `file_citation`. sig { returns(Symbol) } def type end @@ -48,6 +50,9 @@ module OpenAI def type=(_) end + # A citation within the message that points to a specific quote from a specific + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. sig do params( end_index: Integer, @@ -77,6 +82,7 @@ module OpenAI end class FileCitation < OpenAI::BaseModel + # The ID of the specific File the citation is from. sig { returns(String) } def file_id end diff --git a/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi index f2e75732..7031c979 100644 --- a/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class FileCitationDeltaAnnotation < OpenAI::BaseModel + # The index of the annotation in the text content part. sig { returns(Integer) } def index end @@ -13,6 +14,7 @@ module OpenAI def index=(_) end + # Always `file_citation`. sig { returns(Symbol) } def type end @@ -48,6 +50,7 @@ module OpenAI def start_index=(_) end + # The text in the message content that needs to be replaced. sig { returns(T.nilable(String)) } def text end @@ -56,6 +59,9 @@ module OpenAI def text=(_) end + # A citation within the message that points to a specific quote from a specific + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. sig do params( index: Integer, @@ -87,6 +93,7 @@ module OpenAI end class FileCitation < OpenAI::BaseModel + # The ID of the specific File the citation is from. sig { returns(T.nilable(String)) } def file_id end @@ -95,6 +102,7 @@ module OpenAI def file_id=(_) end + # The specific quote in the file. sig { returns(T.nilable(String)) } def quote end diff --git a/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi index 0feca1b6..c5e902d9 100644 --- a/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi @@ -32,6 +32,7 @@ module OpenAI def start_index=(_) end + # The text in the message content that needs to be replaced. sig { returns(String) } def text end @@ -40,6 +41,7 @@ module OpenAI def text=(_) end + # Always `file_path`. sig { returns(Symbol) } def type end @@ -48,6 +50,8 @@ module OpenAI def type=(_) end + # A URL for the file that's generated when the assistant used the + # `code_interpreter` tool to generate a file. sig do params( end_index: Integer, @@ -77,6 +81,7 @@ module OpenAI end class FilePath < OpenAI::BaseModel + # The ID of the file that was generated. sig { returns(String) } def file_id end diff --git a/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi index f12cbc1f..3da258f2 100644 --- a/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class FilePathDeltaAnnotation < OpenAI::BaseModel + # The index of the annotation in the text content part. sig { returns(Integer) } def index end @@ -13,6 +14,7 @@ module OpenAI def index=(_) end + # Always `file_path`. sig { returns(Symbol) } def type end @@ -48,6 +50,7 @@ module OpenAI def start_index=(_) end + # The text in the message content that needs to be replaced. sig { returns(T.nilable(String)) } def text end @@ -56,6 +59,8 @@ module OpenAI def text=(_) end + # A URL for the file that's generated when the assistant used the + # `code_interpreter` tool to generate a file. sig do params( index: Integer, @@ -87,6 +92,7 @@ module OpenAI end class FilePath < OpenAI::BaseModel + # The ID of the file that was generated. sig { returns(T.nilable(String)) } def file_id end diff --git a/rbi/lib/openai/models/beta/threads/image_file.rbi b/rbi/lib/openai/models/beta/threads/image_file.rbi index 5f75f9fc..879b0320 100644 --- a/rbi/lib/openai/models/beta/threads/image_file.rbi +++ b/rbi/lib/openai/models/beta/threads/image_file.rbi @@ -5,6 +5,9 @@ module OpenAI module Beta module Threads class ImageFile < OpenAI::BaseModel + # The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + # in the message content. Set `purpose="vision"` when uploading the File if you + # need to later display the file content. sig { returns(String) } def file_id end @@ -13,6 +16,8 @@ module OpenAI def file_id=(_) end + # Specifies the detail level of the image if specified by the user. `low` uses + # fewer tokens, you can opt in to high resolution using `high`. sig { returns(T.nilable(Symbol)) } def detail end @@ -29,18 +34,16 @@ module OpenAI def to_hash end + # Specifies the detail level of the image if specified by the user. `low` uses + # fewer tokens, you can opt in to high resolution using `high`. class Detail < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + AUTO = :auto LOW = :low HIGH = :high - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi b/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi index ebd1e9c5..281b7cb7 100644 --- a/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi +++ b/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi @@ -13,6 +13,7 @@ module OpenAI def image_file=(_) end + # Always `image_file`. sig { returns(Symbol) } def type end @@ -21,6 +22,8 @@ module OpenAI def type=(_) end + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. sig { params(image_file: OpenAI::Models::Beta::Threads::ImageFile, type: Symbol).returns(T.attached_class) } def self.new(image_file:, type: :image_file) end diff --git a/rbi/lib/openai/models/beta/threads/image_file_delta.rbi b/rbi/lib/openai/models/beta/threads/image_file_delta.rbi index 2f6a3435..89c10a01 100644 --- a/rbi/lib/openai/models/beta/threads/image_file_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/image_file_delta.rbi @@ -5,6 +5,8 @@ module OpenAI module Beta module Threads class ImageFileDelta < OpenAI::BaseModel + # Specifies the detail level of the image if specified by the user. `low` uses + # fewer tokens, you can opt in to high resolution using `high`. sig { returns(T.nilable(Symbol)) } def detail end @@ -13,6 +15,9 @@ module OpenAI def detail=(_) end + # The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + # in the message content. Set `purpose="vision"` when uploading the File if you + # need to later display the file content. sig { returns(T.nilable(String)) } def file_id end @@ -29,18 +34,16 @@ module OpenAI def to_hash end + # Specifies the detail level of the image if specified by the user. `low` uses + # fewer tokens, you can opt in to high resolution using `high`. class Detail < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + AUTO = :auto LOW = :low HIGH = :high - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi b/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi index dd93fbb6..9dba68e1 100644 --- a/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi +++ b/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class ImageFileDeltaBlock < OpenAI::BaseModel + # The index of the content part in the message. sig { returns(Integer) } def index end @@ -13,6 +14,7 @@ module OpenAI def index=(_) end + # Always `image_file`. sig { returns(Symbol) } def type end @@ -32,6 +34,8 @@ module OpenAI def image_file=(_) end + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. sig do params(index: Integer, image_file: OpenAI::Models::Beta::Threads::ImageFileDelta, type: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/beta/threads/image_url.rbi b/rbi/lib/openai/models/beta/threads/image_url.rbi index 512367f1..e4aa98eb 100644 --- a/rbi/lib/openai/models/beta/threads/image_url.rbi +++ b/rbi/lib/openai/models/beta/threads/image_url.rbi @@ -5,6 +5,8 @@ module OpenAI module Beta module Threads class ImageURL < OpenAI::BaseModel + # The external URL of the image, must be a supported image types: jpeg, jpg, png, + # gif, webp. sig { returns(String) } def url end @@ -13,6 +15,8 @@ module OpenAI def url=(_) end + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # to high resolution using `high`. Default value is `auto` sig { returns(T.nilable(Symbol)) } def detail end @@ -29,18 +33,16 @@ module OpenAI def to_hash end + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # to high resolution using `high`. Default value is `auto` class Detail < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + AUTO = :auto LOW = :low HIGH = :high - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/threads/image_url_content_block.rbi b/rbi/lib/openai/models/beta/threads/image_url_content_block.rbi index 13a1daf7..a6c65020 100644 --- a/rbi/lib/openai/models/beta/threads/image_url_content_block.rbi +++ b/rbi/lib/openai/models/beta/threads/image_url_content_block.rbi @@ -13,6 +13,7 @@ module OpenAI def image_url=(_) end + # The type of the content part. sig { returns(Symbol) } def type end @@ -21,6 +22,7 @@ module OpenAI def type=(_) end + # References an image URL in the content of a message. sig { params(image_url: OpenAI::Models::Beta::Threads::ImageURL, type: Symbol).returns(T.attached_class) } def self.new(image_url:, type: :image_url) end diff --git a/rbi/lib/openai/models/beta/threads/image_url_delta.rbi b/rbi/lib/openai/models/beta/threads/image_url_delta.rbi index b5ffce82..a300a59e 100644 --- a/rbi/lib/openai/models/beta/threads/image_url_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/image_url_delta.rbi @@ -5,6 +5,8 @@ module OpenAI module Beta module Threads class ImageURLDelta < OpenAI::BaseModel + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # to high resolution using `high`. sig { returns(T.nilable(Symbol)) } def detail end @@ -13,6 +15,8 @@ module OpenAI def detail=(_) end + # The URL of the image, must be a supported image types: jpeg, jpg, png, gif, + # webp. sig { returns(T.nilable(String)) } def url end @@ -29,18 +33,16 @@ module OpenAI def to_hash end + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # to high resolution using `high`. class Detail < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + AUTO = :auto LOW = :low HIGH = :high - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/threads/image_url_delta_block.rbi b/rbi/lib/openai/models/beta/threads/image_url_delta_block.rbi index 38635411..9f8e8803 100644 --- a/rbi/lib/openai/models/beta/threads/image_url_delta_block.rbi +++ b/rbi/lib/openai/models/beta/threads/image_url_delta_block.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class ImageURLDeltaBlock < OpenAI::BaseModel + # The index of the content part in the message. sig { returns(Integer) } def index end @@ -13,6 +14,7 @@ module OpenAI def index=(_) end + # Always `image_url`. sig { returns(Symbol) } def type end @@ -32,6 +34,7 @@ module OpenAI def image_url=(_) end + # References an image URL in the content of a message. sig do params(index: Integer, image_url: OpenAI::Models::Beta::Threads::ImageURLDelta, type: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/beta/threads/message.rbi b/rbi/lib/openai/models/beta/threads/message.rbi index 3f40f84e..b79d9945 100644 --- a/rbi/lib/openai/models/beta/threads/message.rbi +++ b/rbi/lib/openai/models/beta/threads/message.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class Message < OpenAI::BaseModel + # The identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -13,6 +14,9 @@ module OpenAI def id=(_) end + # If applicable, the ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) that + # authored this message. sig { returns(T.nilable(String)) } def assistant_id end @@ -21,6 +25,7 @@ module OpenAI def assistant_id=(_) end + # A list of files attached to the message, and the tools they were added to. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::Message::Attachment])) } def attachments end @@ -32,6 +37,7 @@ module OpenAI def attachments=(_) end + # The Unix timestamp (in seconds) for when the message was completed. sig { returns(T.nilable(Integer)) } def completed_at end @@ -40,6 +46,7 @@ module OpenAI def completed_at=(_) end + # The content of the message in array of text and/or images. sig do returns( T::Array[ @@ -80,6 +87,7 @@ module OpenAI def content=(_) end + # The Unix timestamp (in seconds) for when the message was created. sig { returns(Integer) } def created_at end @@ -88,6 +96,7 @@ module OpenAI def created_at=(_) end + # The Unix timestamp (in seconds) for when the message was marked as incomplete. sig { returns(T.nilable(Integer)) } def incomplete_at end @@ -96,6 +105,7 @@ module OpenAI def incomplete_at=(_) end + # On an incomplete message, details about why the message is incomplete. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Message::IncompleteDetails)) } def incomplete_details end @@ -107,6 +117,12 @@ module OpenAI def incomplete_details=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -115,6 +131,7 @@ module OpenAI def metadata=(_) end + # The object type, which is always `thread.message`. sig { returns(Symbol) } def object end @@ -123,6 +140,7 @@ module OpenAI def object=(_) end + # The entity that produced the message. One of `user` or `assistant`. sig { returns(Symbol) } def role end @@ -131,6 +149,9 @@ module OpenAI def role=(_) end + # The ID of the [run](https://platform.openai.com/docs/api-reference/runs) + # associated with the creation of this message. Value is `null` when messages are + # created manually using the create message or create thread endpoints. sig { returns(T.nilable(String)) } def run_id end @@ -139,6 +160,8 @@ module OpenAI def run_id=(_) end + # The status of the message, which can be either `in_progress`, `incomplete`, or + # `completed`. sig { returns(Symbol) } def status end @@ -147,6 +170,8 @@ module OpenAI def status=(_) end + # The [thread](https://platform.openai.com/docs/api-reference/threads) ID that + # this message belongs to. sig { returns(String) } def thread_id end @@ -155,6 +180,8 @@ module OpenAI def thread_id=(_) end + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig do params( id: String, @@ -231,6 +258,7 @@ module OpenAI end class Attachment < OpenAI::BaseModel + # The ID of the file to attach to the message. sig { returns(T.nilable(String)) } def file_id end @@ -239,6 +267,7 @@ module OpenAI def file_id=(_) end + # The tools to add this file to. sig do returns( T.nilable( @@ -310,7 +339,17 @@ module OpenAI class Tool < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::CodeInterpreterTool, + OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly + ) + } + end + class AssistantToolsFileSearchTypeOnly < OpenAI::BaseModel + # The type of tool being defined: `file_search` sig { returns(Symbol) } def type end @@ -327,21 +366,11 @@ module OpenAI def to_hash end end - - class << self - sig do - override - .returns( - [[NilClass, OpenAI::Models::Beta::CodeInterpreterTool], [NilClass, OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly]] - ) - end - private def variants - end - end end end class IncompleteDetails < OpenAI::BaseModel + # The reason the message is incomplete. sig { returns(Symbol) } def reason end @@ -350,6 +379,7 @@ module OpenAI def reason=(_) end + # On an incomplete message, details about why the message is incomplete. sig { params(reason: Symbol).returns(T.attached_class) } def self.new(reason:) end @@ -358,48 +388,40 @@ module OpenAI def to_hash end + # The reason the message is incomplete. class Reason < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + CONTENT_FILTER = :content_filter MAX_TOKENS = :max_tokens RUN_CANCELLED = :run_cancelled RUN_EXPIRED = :run_expired RUN_FAILED = :run_failed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end + # The entity that produced the message. One of `user` or `assistant`. class Role < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + USER = :user ASSISTANT = :assistant - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # The status of the message, which can be either `in_progress`, `incomplete`, or + # `completed`. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress INCOMPLETE = :incomplete COMPLETED = :completed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/threads/message_content.rbi b/rbi/lib/openai/models/beta/threads/message_content.rbi index b2972189..7bc0759c 100644 --- a/rbi/lib/openai/models/beta/threads/message_content.rbi +++ b/rbi/lib/openai/models/beta/threads/message_content.rbi @@ -4,18 +4,20 @@ module OpenAI module Models module Beta module Threads + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. class MessageContent < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::ImageFileContentBlock], [Symbol, OpenAI::Models::Beta::Threads::ImageURLContentBlock], [Symbol, OpenAI::Models::Beta::Threads::TextContentBlock], [Symbol, OpenAI::Models::Beta::Threads::RefusalContentBlock]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::Threads::ImageFileContentBlock, + OpenAI::Models::Beta::Threads::ImageURLContentBlock, + OpenAI::Models::Beta::Threads::TextContentBlock, + OpenAI::Models::Beta::Threads::RefusalContentBlock + ) + } end end end diff --git a/rbi/lib/openai/models/beta/threads/message_content_delta.rbi b/rbi/lib/openai/models/beta/threads/message_content_delta.rbi index fff1ee0b..09915e69 100644 --- a/rbi/lib/openai/models/beta/threads/message_content_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/message_content_delta.rbi @@ -4,18 +4,20 @@ module OpenAI module Models module Beta module Threads + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. class MessageContentDelta < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::ImageFileDeltaBlock], [Symbol, OpenAI::Models::Beta::Threads::TextDeltaBlock], [Symbol, OpenAI::Models::Beta::Threads::RefusalDeltaBlock], [Symbol, OpenAI::Models::Beta::Threads::ImageURLDeltaBlock]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::Threads::ImageFileDeltaBlock, + OpenAI::Models::Beta::Threads::TextDeltaBlock, + OpenAI::Models::Beta::Threads::RefusalDeltaBlock, + OpenAI::Models::Beta::Threads::ImageURLDeltaBlock + ) + } end end end diff --git a/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi b/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi index 82903174..fe9ae396 100644 --- a/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi +++ b/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi @@ -4,18 +4,19 @@ module OpenAI module Models module Beta module Threads + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. class MessageContentPartParam < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::ImageFileContentBlock], [Symbol, OpenAI::Models::Beta::Threads::ImageURLContentBlock], [Symbol, OpenAI::Models::Beta::Threads::TextContentBlockParam]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::Threads::ImageFileContentBlock, + OpenAI::Models::Beta::Threads::ImageURLContentBlock, + OpenAI::Models::Beta::Threads::TextContentBlockParam + ) + } end end end diff --git a/rbi/lib/openai/models/beta/threads/message_create_params.rbi b/rbi/lib/openai/models/beta/threads/message_create_params.rbi index 4a1db9ed..0783f811 100644 --- a/rbi/lib/openai/models/beta/threads/message_create_params.rbi +++ b/rbi/lib/openai/models/beta/threads/message_create_params.rbi @@ -8,6 +8,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The text contents of the message. sig do returns( T.any( @@ -54,6 +55,12 @@ module OpenAI def content=(_) end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. sig { returns(Symbol) } def role end @@ -62,6 +69,7 @@ module OpenAI def role=(_) end + # A list of files attached to the message, and the tools they should be added to. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment])) } def attachments end @@ -73,6 +81,12 @@ module OpenAI def attachments=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -127,9 +141,25 @@ module OpenAI def to_hash end + # The text contents of the message. class Content < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + String, + T::Array[ + T.any( + OpenAI::Models::Beta::Threads::ImageFileContentBlock, + OpenAI::Models::Beta::Threads::ImageURLContentBlock, + OpenAI::Models::Beta::Threads::TextContentBlockParam + ) + ] + ) + } + end + MessageContentPartParamArray = T.type_alias do T::Array[ T.any( @@ -139,45 +169,25 @@ module OpenAI ) ] end - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [ - NilClass, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ] - ] - ) - end - private def variants - end - end end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. class Role < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + USER = :user ASSISTANT = :assistant - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class Attachment < OpenAI::BaseModel + # The ID of the file to attach to the message. sig { returns(T.nilable(String)) } def file_id end @@ -186,6 +196,7 @@ module OpenAI def file_id=(_) end + # The tools to add this file to. sig do returns( T.nilable( @@ -257,7 +268,17 @@ module OpenAI class Tool < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::CodeInterpreterTool, + OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch + ) + } + end + class FileSearch < OpenAI::BaseModel + # The type of tool being defined: `file_search` sig { returns(Symbol) } def type end @@ -274,17 +295,6 @@ module OpenAI def to_hash end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::CodeInterpreterTool], [Symbol, OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch]] - ) - end - private def variants - end - end end end end diff --git a/rbi/lib/openai/models/beta/threads/message_delta.rbi b/rbi/lib/openai/models/beta/threads/message_delta.rbi index 4830ec92..7883727c 100644 --- a/rbi/lib/openai/models/beta/threads/message_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/message_delta.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class MessageDelta < OpenAI::BaseModel + # The content of the message in array of text and/or images. sig do returns( T.nilable( @@ -47,6 +48,7 @@ module OpenAI def content=(_) end + # The entity that produced the message. One of `user` or `assistant`. sig { returns(T.nilable(Symbol)) } def role end @@ -55,6 +57,7 @@ module OpenAI def role=(_) end + # The delta containing the fields that have changed on the Message. sig do params( content: T::Array[ @@ -91,17 +94,14 @@ module OpenAI def to_hash end + # The entity that produced the message. One of `user` or `assistant`. class Role < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + USER = :user ASSISTANT = :assistant - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/threads/message_delta_event.rbi b/rbi/lib/openai/models/beta/threads/message_delta_event.rbi index 7908606f..5df8b179 100644 --- a/rbi/lib/openai/models/beta/threads/message_delta_event.rbi +++ b/rbi/lib/openai/models/beta/threads/message_delta_event.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class MessageDeltaEvent < OpenAI::BaseModel + # The identifier of the message, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -13,6 +14,7 @@ module OpenAI def id=(_) end + # The delta containing the fields that have changed on the Message. sig { returns(OpenAI::Models::Beta::Threads::MessageDelta) } def delta end @@ -23,6 +25,7 @@ module OpenAI def delta=(_) end + # The object type, which is always `thread.message.delta`. sig { returns(Symbol) } def object end @@ -31,6 +34,8 @@ module OpenAI def object=(_) end + # Represents a message delta i.e. any changed fields on a message during + # streaming. sig do params(id: String, delta: OpenAI::Models::Beta::Threads::MessageDelta, object: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/beta/threads/message_list_params.rbi b/rbi/lib/openai/models/beta/threads/message_list_params.rbi index d2cb9550..4b93b0c8 100644 --- a/rbi/lib/openai/models/beta/threads/message_list_params.rbi +++ b/rbi/lib/openai/models/beta/threads/message_list_params.rbi @@ -8,6 +8,10 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -16,6 +20,10 @@ module OpenAI def after=(_) end + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } def before end @@ -24,6 +32,8 @@ module OpenAI def before=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -32,6 +42,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -40,6 +52,7 @@ module OpenAI def order=(_) end + # Filter messages by the run ID that generated them. sig { returns(T.nilable(String)) } def run_id end @@ -78,17 +91,15 @@ module OpenAI def to_hash end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ASC = :asc DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/threads/message_update_params.rbi b/rbi/lib/openai/models/beta/threads/message_update_params.rbi index 4fe12d9d..f2d3e73a 100644 --- a/rbi/lib/openai/models/beta/threads/message_update_params.rbi +++ b/rbi/lib/openai/models/beta/threads/message_update_params.rbi @@ -16,6 +16,12 @@ module OpenAI def thread_id=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end diff --git a/rbi/lib/openai/models/beta/threads/refusal_content_block.rbi b/rbi/lib/openai/models/beta/threads/refusal_content_block.rbi index 2451c015..c2179549 100644 --- a/rbi/lib/openai/models/beta/threads/refusal_content_block.rbi +++ b/rbi/lib/openai/models/beta/threads/refusal_content_block.rbi @@ -13,6 +13,7 @@ module OpenAI def refusal=(_) end + # Always `refusal`. sig { returns(Symbol) } def type end @@ -21,6 +22,7 @@ module OpenAI def type=(_) end + # The refusal content generated by the assistant. sig { params(refusal: String, type: Symbol).returns(T.attached_class) } def self.new(refusal:, type: :refusal) end diff --git a/rbi/lib/openai/models/beta/threads/refusal_delta_block.rbi b/rbi/lib/openai/models/beta/threads/refusal_delta_block.rbi index c6f9732d..87dd9957 100644 --- a/rbi/lib/openai/models/beta/threads/refusal_delta_block.rbi +++ b/rbi/lib/openai/models/beta/threads/refusal_delta_block.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class RefusalDeltaBlock < OpenAI::BaseModel + # The index of the refusal part in the message. sig { returns(Integer) } def index end @@ -13,6 +14,7 @@ module OpenAI def index=(_) end + # Always `refusal`. sig { returns(Symbol) } def type end @@ -29,6 +31,7 @@ module OpenAI def refusal=(_) end + # The refusal content that is part of a message. sig { params(index: Integer, refusal: String, type: Symbol).returns(T.attached_class) } def self.new(index:, refusal: nil, type: :refusal) end diff --git a/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi b/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi index 2627ee3f..ef418354 100644 --- a/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi +++ b/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi @@ -5,6 +5,10 @@ module OpenAI module Beta module Threads class RequiredActionFunctionToolCall < OpenAI::BaseModel + # The ID of the tool call. This ID must be referenced when you submit the tool + # outputs in using the + # [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # endpoint. sig { returns(String) } def id end @@ -13,6 +17,7 @@ module OpenAI def id=(_) end + # The function definition. sig { returns(OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function) } def function end @@ -24,6 +29,8 @@ module OpenAI def function=(_) end + # The type of tool call the output is required for. For now, this is always + # `function`. sig { returns(Symbol) } def type end @@ -32,6 +39,7 @@ module OpenAI def type=(_) end + # Tool call objects sig do params( id: String, @@ -53,6 +61,7 @@ module OpenAI end class Function < OpenAI::BaseModel + # The arguments that the model expects you to pass to the function. sig { returns(String) } def arguments end @@ -61,6 +70,7 @@ module OpenAI def arguments=(_) end + # The name of the function. sig { returns(String) } def name end @@ -69,6 +79,7 @@ module OpenAI def name=(_) end + # The function definition. sig { params(arguments: String, name: String).returns(T.attached_class) } def self.new(arguments:, name:) end diff --git a/rbi/lib/openai/models/beta/threads/run.rbi b/rbi/lib/openai/models/beta/threads/run.rbi index 61ddea55..94002e57 100644 --- a/rbi/lib/openai/models/beta/threads/run.rbi +++ b/rbi/lib/openai/models/beta/threads/run.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class Run < OpenAI::BaseModel + # The identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -13,6 +14,9 @@ module OpenAI def id=(_) end + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # execution of this run. sig { returns(String) } def assistant_id end @@ -21,6 +25,7 @@ module OpenAI def assistant_id=(_) end + # The Unix timestamp (in seconds) for when the run was cancelled. sig { returns(T.nilable(Integer)) } def cancelled_at end @@ -29,6 +34,7 @@ module OpenAI def cancelled_at=(_) end + # The Unix timestamp (in seconds) for when the run was completed. sig { returns(T.nilable(Integer)) } def completed_at end @@ -37,6 +43,7 @@ module OpenAI def completed_at=(_) end + # The Unix timestamp (in seconds) for when the run was created. sig { returns(Integer) } def created_at end @@ -45,6 +52,7 @@ module OpenAI def created_at=(_) end + # The Unix timestamp (in seconds) for when the run will expire. sig { returns(T.nilable(Integer)) } def expires_at end @@ -53,6 +61,7 @@ module OpenAI def expires_at=(_) end + # The Unix timestamp (in seconds) for when the run failed. sig { returns(T.nilable(Integer)) } def failed_at end @@ -61,6 +70,8 @@ module OpenAI def failed_at=(_) end + # Details on why the run is incomplete. Will be `null` if the run is not + # incomplete. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::IncompleteDetails)) } def incomplete_details end @@ -72,6 +83,9 @@ module OpenAI def incomplete_details=(_) end + # The instructions that the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. sig { returns(String) } def instructions end @@ -80,6 +94,7 @@ module OpenAI def instructions=(_) end + # The last error associated with this run. Will be `null` if there are no errors. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::LastError)) } def last_error end @@ -91,6 +106,8 @@ module OpenAI def last_error=(_) end + # The maximum number of completion tokens specified to have been used over the + # course of the run. sig { returns(T.nilable(Integer)) } def max_completion_tokens end @@ -99,6 +116,8 @@ module OpenAI def max_completion_tokens=(_) end + # The maximum number of prompt tokens specified to have been used over the course + # of the run. sig { returns(T.nilable(Integer)) } def max_prompt_tokens end @@ -107,6 +126,12 @@ module OpenAI def max_prompt_tokens=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -115,6 +140,9 @@ module OpenAI def metadata=(_) end + # The model that the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. sig { returns(String) } def model end @@ -123,6 +151,7 @@ module OpenAI def model=(_) end + # The object type, which is always `thread.run`. sig { returns(Symbol) } def object end @@ -131,6 +160,9 @@ module OpenAI def object=(_) end + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. sig { returns(T::Boolean) } def parallel_tool_calls end @@ -139,6 +171,8 @@ module OpenAI def parallel_tool_calls=(_) end + # Details on the action required to continue the run. Will be `null` if no action + # is required. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::RequiredAction)) } def required_action end @@ -150,6 +184,26 @@ module OpenAI def required_action=(_) end + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -190,6 +244,7 @@ module OpenAI def response_format=(_) end + # The Unix timestamp (in seconds) for when the run was started. sig { returns(T.nilable(Integer)) } def started_at end @@ -198,6 +253,9 @@ module OpenAI def started_at=(_) end + # The status of the run, which can be either `queued`, `in_progress`, + # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + # `incomplete`, or `expired`. sig { returns(Symbol) } def status end @@ -206,6 +264,8 @@ module OpenAI def status=(_) end + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # that was executed on as a part of this run. sig { returns(String) } def thread_id end @@ -214,6 +274,13 @@ module OpenAI def thread_id=(_) end + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) } def tool_choice end @@ -225,6 +292,9 @@ module OpenAI def tool_choice=(_) end + # The list of tools that the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. sig do returns( T::Array[ @@ -262,6 +332,8 @@ module OpenAI def tools=(_) end + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::TruncationStrategy)) } def truncation_strategy end @@ -273,6 +345,8 @@ module OpenAI def truncation_strategy=(_) end + # Usage statistics related to the run. This value will be `null` if the run is not + # in a terminal state (i.e. `in_progress`, `queued`, etc.). sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::Usage)) } def usage end @@ -284,6 +358,7 @@ module OpenAI def usage=(_) end + # The sampling temperature used for this run. If not set, defaults to 1. sig { returns(T.nilable(Float)) } def temperature end @@ -292,6 +367,7 @@ module OpenAI def temperature=(_) end + # The nucleus sampling value used for this run. If not set, defaults to 1. sig { returns(T.nilable(Float)) } def top_p end @@ -300,6 +376,8 @@ module OpenAI def top_p=(_) end + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig do params( id: String, @@ -427,6 +505,8 @@ module OpenAI end class IncompleteDetails < OpenAI::BaseModel + # The reason why the run is incomplete. This will point to which specific token + # limit was reached over the course of the run. sig { returns(T.nilable(Symbol)) } def reason end @@ -435,6 +515,8 @@ module OpenAI def reason=(_) end + # Details on why the run is incomplete. Will be `null` if the run is not + # incomplete. sig { params(reason: Symbol).returns(T.attached_class) } def self.new(reason: nil) end @@ -443,21 +525,20 @@ module OpenAI def to_hash end + # The reason why the run is incomplete. This will point to which specific token + # limit was reached over the course of the run. class Reason < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + MAX_COMPLETION_TOKENS = :max_completion_tokens MAX_PROMPT_TOKENS = :max_prompt_tokens - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end class LastError < OpenAI::BaseModel + # One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. sig { returns(Symbol) } def code end @@ -466,6 +547,7 @@ module OpenAI def code=(_) end + # A human-readable description of the error. sig { returns(String) } def message end @@ -474,6 +556,7 @@ module OpenAI def message=(_) end + # The last error associated with this run. Will be `null` if there are no errors. sig { params(code: Symbol, message: String).returns(T.attached_class) } def self.new(code:, message:) end @@ -482,22 +565,20 @@ module OpenAI def to_hash end + # One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. class Code < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + SERVER_ERROR = :server_error RATE_LIMIT_EXCEEDED = :rate_limit_exceeded INVALID_PROMPT = :invalid_prompt - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end class RequiredAction < OpenAI::BaseModel + # Details on the tool outputs needed for this run to continue. sig { returns(OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs) } def submit_tool_outputs end @@ -509,6 +590,7 @@ module OpenAI def submit_tool_outputs=(_) end + # For now, this is always `submit_tool_outputs`. sig { returns(Symbol) } def type end @@ -517,6 +599,8 @@ module OpenAI def type=(_) end + # Details on the action required to continue the run. Will be `null` if no action + # is required. sig do params( submit_tool_outputs: OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs, @@ -537,6 +621,7 @@ module OpenAI end class SubmitToolOutputs < OpenAI::BaseModel + # A list of the relevant tool calls. sig { returns(T::Array[OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall]) } def tool_calls end @@ -548,6 +633,7 @@ module OpenAI def tool_calls=(_) end + # Details on the tool outputs needed for this run to continue. sig do params(tool_calls: T::Array[OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall]) .returns(T.attached_class) @@ -562,6 +648,10 @@ module OpenAI end class TruncationStrategy < OpenAI::BaseModel + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. sig { returns(Symbol) } def type end @@ -570,6 +660,8 @@ module OpenAI def type=(_) end + # The number of most recent messages from the thread when constructing the context + # for the run. sig { returns(T.nilable(Integer)) } def last_messages end @@ -578,6 +670,8 @@ module OpenAI def last_messages=(_) end + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. sig { params(type: Symbol, last_messages: T.nilable(Integer)).returns(T.attached_class) } def self.new(type:, last_messages: nil) end @@ -586,21 +680,22 @@ module OpenAI def to_hash end + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. class Type < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + AUTO = :auto LAST_MESSAGES = :last_messages - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end class Usage < OpenAI::BaseModel + # Number of completion tokens used over the course of the run. sig { returns(Integer) } def completion_tokens end @@ -609,6 +704,7 @@ module OpenAI def completion_tokens=(_) end + # Number of prompt tokens used over the course of the run. sig { returns(Integer) } def prompt_tokens end @@ -617,6 +713,7 @@ module OpenAI def prompt_tokens=(_) end + # Total number of tokens used (prompt + completion). sig { returns(Integer) } def total_tokens end @@ -625,6 +722,8 @@ module OpenAI def total_tokens=(_) end + # Usage statistics related to the run. This value will be `null` if the run is not + # in a terminal state (i.e. `in_progress`, `queued`, etc.). sig do params( completion_tokens: Integer, diff --git a/rbi/lib/openai/models/beta/threads/run_create_params.rbi b/rbi/lib/openai/models/beta/threads/run_create_params.rbi index 3c923be3..dc48611b 100644 --- a/rbi/lib/openai/models/beta/threads/run_create_params.rbi +++ b/rbi/lib/openai/models/beta/threads/run_create_params.rbi @@ -8,6 +8,9 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. sig { returns(String) } def assistant_id end @@ -16,6 +19,13 @@ module OpenAI def assistant_id=(_) end + # A list of additional fields to include in the response. Currently the only + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(T::Array[Symbol])) } def include end @@ -24,6 +34,9 @@ module OpenAI def include=(_) end + # Appends additional instructions at the end of the instructions for the run. This + # is useful for modifying the behavior on a per-run basis without overriding other + # instructions. sig { returns(T.nilable(String)) } def additional_instructions end @@ -32,6 +45,7 @@ module OpenAI def additional_instructions=(_) end + # Adds additional messages to the thread before creating the run. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage])) } def additional_messages end @@ -43,6 +57,9 @@ module OpenAI def additional_messages=(_) end + # Overrides the + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. sig { returns(T.nilable(String)) } def instructions end @@ -51,6 +68,11 @@ module OpenAI def instructions=(_) end + # The maximum number of completion tokens that may be used over the course of the + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. sig { returns(T.nilable(Integer)) } def max_completion_tokens end @@ -59,6 +81,11 @@ module OpenAI def max_completion_tokens=(_) end + # The maximum number of prompt tokens that may be used over the course of the run. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. sig { returns(T.nilable(Integer)) } def max_prompt_tokens end @@ -67,6 +94,12 @@ module OpenAI def max_prompt_tokens=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -75,6 +108,10 @@ module OpenAI def metadata=(_) end + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. sig { returns(T.nilable(T.any(String, Symbol))) } def model end @@ -83,6 +120,9 @@ module OpenAI def model=(_) end + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. sig { returns(T.nilable(T::Boolean)) } def parallel_tool_calls end @@ -91,6 +131,12 @@ module OpenAI def parallel_tool_calls=(_) end + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(Symbol)) } def reasoning_effort end @@ -99,6 +145,26 @@ module OpenAI def reasoning_effort=(_) end + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -139,6 +205,9 @@ module OpenAI def response_format=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } def temperature end @@ -147,6 +216,13 @@ module OpenAI def temperature=(_) end + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) } def tool_choice end @@ -158,6 +234,8 @@ module OpenAI def tool_choice=(_) end + # Override the tools the assistant can use for this run. This is useful for + # modifying the behavior on a per-run basis. sig do returns( T.nilable( @@ -201,6 +279,11 @@ module OpenAI def tools=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -209,6 +292,8 @@ module OpenAI def top_p=(_) end + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy)) } def truncation_strategy end @@ -324,6 +409,7 @@ module OpenAI end class AdditionalMessage < OpenAI::BaseModel + # The text contents of the message. sig do returns( T.any( @@ -370,6 +456,12 @@ module OpenAI def content=(_) end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. sig { returns(Symbol) } def role end @@ -378,6 +470,7 @@ module OpenAI def role=(_) end + # A list of files attached to the message, and the tools they should be added to. sig do returns( T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]) @@ -397,6 +490,12 @@ module OpenAI def attachments=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -449,9 +548,25 @@ module OpenAI def to_hash end + # The text contents of the message. class Content < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + String, + T::Array[ + T.any( + OpenAI::Models::Beta::Threads::ImageFileContentBlock, + OpenAI::Models::Beta::Threads::ImageURLContentBlock, + OpenAI::Models::Beta::Threads::TextContentBlockParam + ) + ] + ) + } + end + MessageContentPartParamArray = T.type_alias do T::Array[ T.any( @@ -461,45 +576,25 @@ module OpenAI ) ] end - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [ - NilClass, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ] - ] - ) - end - private def variants - end - end end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. class Role < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + USER = :user ASSISTANT = :assistant - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class Attachment < OpenAI::BaseModel + # The ID of the file to attach to the message. sig { returns(T.nilable(String)) } def file_id end @@ -508,6 +603,7 @@ module OpenAI def file_id=(_) end + # The tools to add this file to. sig do returns( T.nilable( @@ -579,7 +675,17 @@ module OpenAI class Tool < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::CodeInterpreterTool, + OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch + ) + } + end + class FileSearch < OpenAI::BaseModel + # The type of tool being defined: `file_search` sig { returns(Symbol) } def type end @@ -596,32 +702,25 @@ module OpenAI def to_hash end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::CodeInterpreterTool], [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch]] - ) - end - private def variants - end - end end end end + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. class Model < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end class TruncationStrategy < OpenAI::BaseModel + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. sig { returns(Symbol) } def type end @@ -630,6 +729,8 @@ module OpenAI def type=(_) end + # The number of most recent messages from the thread when constructing the context + # for the run. sig { returns(T.nilable(Integer)) } def last_messages end @@ -638,6 +739,8 @@ module OpenAI def last_messages=(_) end + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. sig { params(type: Symbol, last_messages: T.nilable(Integer)).returns(T.attached_class) } def self.new(type:, last_messages: nil) end @@ -646,17 +749,17 @@ module OpenAI def to_hash end + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. class Type < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + AUTO = :auto LAST_MESSAGES = :last_messages - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/threads/run_list_params.rbi b/rbi/lib/openai/models/beta/threads/run_list_params.rbi index cffb671d..44ca10d2 100644 --- a/rbi/lib/openai/models/beta/threads/run_list_params.rbi +++ b/rbi/lib/openai/models/beta/threads/run_list_params.rbi @@ -8,6 +8,10 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -16,6 +20,10 @@ module OpenAI def after=(_) end + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } def before end @@ -24,6 +32,8 @@ module OpenAI def before=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -32,6 +42,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -68,17 +80,15 @@ module OpenAI def to_hash end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ASC = :asc DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/threads/run_status.rbi b/rbi/lib/openai/models/beta/threads/run_status.rbi index e890d711..76ada3f1 100644 --- a/rbi/lib/openai/models/beta/threads/run_status.rbi +++ b/rbi/lib/openai/models/beta/threads/run_status.rbi @@ -4,9 +4,14 @@ module OpenAI module Models module Beta module Threads + # The status of the run, which can be either `queued`, `in_progress`, + # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + # `incomplete`, or `expired`. class RunStatus < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + QUEUED = :queued IN_PROGRESS = :in_progress REQUIRES_ACTION = :requires_action @@ -16,12 +21,6 @@ module OpenAI COMPLETED = :completed INCOMPLETE = :incomplete EXPIRED = :expired - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi b/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi index 42c12d0a..0a39a132 100644 --- a/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi +++ b/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi @@ -16,6 +16,7 @@ module OpenAI def thread_id=(_) end + # A list of tools for which the outputs are being submitted. sig { returns(T::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput]) } def tool_outputs end @@ -52,6 +53,7 @@ module OpenAI end class ToolOutput < OpenAI::BaseModel + # The output of the tool call to be submitted to continue the run. sig { returns(T.nilable(String)) } def output end @@ -60,6 +62,8 @@ module OpenAI def output=(_) end + # The ID of the tool call in the `required_action` object within the run object + # the output is being submitted for. sig { returns(T.nilable(String)) } def tool_call_id end diff --git a/rbi/lib/openai/models/beta/threads/run_update_params.rbi b/rbi/lib/openai/models/beta/threads/run_update_params.rbi index 0ede0dab..d01aa52d 100644 --- a/rbi/lib/openai/models/beta/threads/run_update_params.rbi +++ b/rbi/lib/openai/models/beta/threads/run_update_params.rbi @@ -16,6 +16,12 @@ module OpenAI def thread_id=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_logs.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_logs.rbi index 53f5040c..a6c26d19 100644 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_logs.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_logs.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class CodeInterpreterLogs < OpenAI::BaseModel + # The index of the output in the outputs array. sig { returns(Integer) } def index end @@ -14,6 +15,7 @@ module OpenAI def index=(_) end + # Always `logs`. sig { returns(Symbol) } def type end @@ -22,6 +24,7 @@ module OpenAI def type=(_) end + # The text output from the Code Interpreter tool call. sig { returns(T.nilable(String)) } def logs end @@ -30,6 +33,7 @@ module OpenAI def logs=(_) end + # Text output from the Code Interpreter tool call as part of a run step. sig { params(index: Integer, logs: String, type: Symbol).returns(T.attached_class) } def self.new(index:, logs: nil, type: :logs) end diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi index e3a5d1f7..d598eb87 100644 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class CodeInterpreterOutputImage < OpenAI::BaseModel + # The index of the output in the outputs array. sig { returns(Integer) } def index end @@ -14,6 +15,7 @@ module OpenAI def index=(_) end + # Always `image`. sig { returns(Symbol) } def type end @@ -54,6 +56,8 @@ module OpenAI end class Image < OpenAI::BaseModel + # The [file](https://platform.openai.com/docs/api-reference/files) ID of the + # image. sig { returns(T.nilable(String)) } def file_id end diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi index fb36dea2..4ddb7b18 100644 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class CodeInterpreterToolCall < OpenAI::BaseModel + # The ID of the tool call. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # The Code Interpreter tool call definition. sig { returns(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter) } def code_interpreter end @@ -25,6 +27,8 @@ module OpenAI def code_interpreter=(_) end + # The type of tool call. This is always going to be `code_interpreter` for this + # type of tool call. sig { returns(Symbol) } def type end @@ -33,6 +37,7 @@ module OpenAI def type=(_) end + # Details of the Code Interpreter tool call the run step was involved in. sig do params( id: String, @@ -58,6 +63,7 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # The input to the Code Interpreter tool call. sig { returns(String) } def input end @@ -66,6 +72,9 @@ module OpenAI def input=(_) end + # The outputs from the Code Interpreter tool call. Code Interpreter can output one + # or more items, including text (`logs`) or images (`image`). Each of these are + # represented by a different object type. sig do returns( T::Array[ @@ -100,6 +109,7 @@ module OpenAI def outputs=(_) end + # The Code Interpreter tool call definition. sig do params( input: String, @@ -132,10 +142,21 @@ module OpenAI def to_hash end + # Text output from the Code Interpreter tool call as part of a run step. class Output < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs, + OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image + ) + } + end + class Logs < OpenAI::BaseModel + # The text output from the Code Interpreter tool call. sig { returns(String) } def logs end @@ -144,6 +165,7 @@ module OpenAI def logs=(_) end + # Always `logs`. sig { returns(Symbol) } def type end @@ -152,6 +174,7 @@ module OpenAI def type=(_) end + # Text output from the Code Interpreter tool call as part of a run step. sig { params(logs: String, type: Symbol).returns(T.attached_class) } def self.new(logs:, type: :logs) end @@ -181,6 +204,7 @@ module OpenAI def image=(_) end + # Always `image`. sig { returns(Symbol) } def type end @@ -212,6 +236,8 @@ module OpenAI end class Image < OpenAI::BaseModel + # The [file](https://platform.openai.com/docs/api-reference/files) ID of the + # image. sig { returns(String) } def file_id end @@ -229,17 +255,6 @@ module OpenAI end end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs], [Symbol, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image]] - ) - end - private def variants - end - end end end end diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi index 763d9012..76d2e5d9 100644 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class CodeInterpreterToolCallDelta < OpenAI::BaseModel + # The index of the tool call in the tool calls array. sig { returns(Integer) } def index end @@ -14,6 +15,8 @@ module OpenAI def index=(_) end + # The type of tool call. This is always going to be `code_interpreter` for this + # type of tool call. sig { returns(Symbol) } def type end @@ -22,6 +25,7 @@ module OpenAI def type=(_) end + # The ID of the tool call. sig { returns(T.nilable(String)) } def id end @@ -30,6 +34,7 @@ module OpenAI def id=(_) end + # The Code Interpreter tool call definition. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter)) } def code_interpreter end @@ -41,6 +46,7 @@ module OpenAI def code_interpreter=(_) end + # Details of the Code Interpreter tool call the run step was involved in. sig do params( index: Integer, @@ -68,6 +74,7 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # The input to the Code Interpreter tool call. sig { returns(T.nilable(String)) } def input end @@ -76,6 +83,9 @@ module OpenAI def input=(_) end + # The outputs from the Code Interpreter tool call. Code Interpreter can output one + # or more items, including text (`logs`) or images (`image`). Each of these are + # represented by a different object type. sig do returns( T.nilable( @@ -112,6 +122,7 @@ module OpenAI def outputs=(_) end + # The Code Interpreter tool call definition. sig do params( input: String, @@ -144,18 +155,17 @@ module OpenAI def to_hash end + # Text output from the Code Interpreter tool call as part of a run step. class Output < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs], [Symbol, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs, + OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage + ) + } end end end diff --git a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi index 9aaccc68..b4f818ee 100644 --- a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class FileSearchToolCall < OpenAI::BaseModel + # The ID of the tool call object. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # For now, this is always going to be an empty object. sig { returns(OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch) } def file_search end @@ -25,6 +27,8 @@ module OpenAI def file_search=(_) end + # The type of tool call. This is always going to be `file_search` for this type of + # tool call. sig { returns(Symbol) } def type end @@ -54,6 +58,7 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The ranking options for the file search. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions)) } def ranking_options end @@ -65,6 +70,7 @@ module OpenAI def ranking_options=(_) end + # The results of the file search. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result])) } def results end @@ -76,6 +82,7 @@ module OpenAI def results=(_) end + # For now, this is always going to be an empty object. sig do params( ranking_options: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions, @@ -99,6 +106,8 @@ module OpenAI end class RankingOptions < OpenAI::BaseModel + # The ranker to use for the file search. If not specified will use the `auto` + # ranker. sig { returns(Symbol) } def ranker end @@ -107,6 +116,8 @@ module OpenAI def ranker=(_) end + # The score threshold for the file search. All values must be a floating point + # number between 0 and 1. sig { returns(Float) } def score_threshold end @@ -115,6 +126,7 @@ module OpenAI def score_threshold=(_) end + # The ranking options for the file search. sig { params(ranker: Symbol, score_threshold: Float).returns(T.attached_class) } def self.new(ranker:, score_threshold:) end @@ -123,21 +135,20 @@ module OpenAI def to_hash end + # The ranker to use for the file search. If not specified will use the `auto` + # ranker. class Ranker < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + AUTO = :auto DEFAULT_2024_08_21 = :default_2024_08_21 - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end class Result < OpenAI::BaseModel + # The ID of the file that result was found in. sig { returns(String) } def file_id end @@ -146,6 +157,7 @@ module OpenAI def file_id=(_) end + # The name of the file that result was found in. sig { returns(String) } def file_name end @@ -154,6 +166,8 @@ module OpenAI def file_name=(_) end + # The score of the result. All values must be a floating point number between 0 + # and 1. sig { returns(Float) } def score end @@ -162,6 +176,8 @@ module OpenAI def score=(_) end + # The content of the result that was found. The content is only included if + # requested via the include query parameter. sig do returns( T.nilable(T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content]) @@ -177,6 +193,7 @@ module OpenAI def content=(_) end + # A result instance of the file search. sig do params( file_id: String, @@ -204,6 +221,7 @@ module OpenAI end class Content < OpenAI::BaseModel + # The text content of the file. sig { returns(T.nilable(String)) } def text end @@ -212,6 +230,7 @@ module OpenAI def text=(_) end + # The type of the content. sig { returns(T.nilable(Symbol)) } def type end @@ -228,16 +247,13 @@ module OpenAI def to_hash end + # The type of the content. class Type < OpenAI::Enum abstract! - TEXT = :text + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + TEXT = :text end end end diff --git a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi index 82d07489..4986a86f 100644 --- a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class FileSearchToolCallDelta < OpenAI::BaseModel + # For now, this is always going to be an empty object. sig { returns(T.anything) } def file_search end @@ -14,6 +15,7 @@ module OpenAI def file_search=(_) end + # The index of the tool call in the tool calls array. sig { returns(Integer) } def index end @@ -22,6 +24,8 @@ module OpenAI def index=(_) end + # The type of tool call. This is always going to be `file_search` for this type of + # tool call. sig { returns(Symbol) } def type end @@ -30,6 +34,7 @@ module OpenAI def type=(_) end + # The ID of the tool call object. sig { returns(T.nilable(String)) } def id end diff --git a/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi index 4f13f258..2fbc10a5 100644 --- a/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class FunctionToolCall < OpenAI::BaseModel + # The ID of the tool call object. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # The definition of the function that was called. sig { returns(OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function) } def function end @@ -25,6 +27,8 @@ module OpenAI def function=(_) end + # The type of tool call. This is always going to be `function` for this type of + # tool call. sig { returns(Symbol) } def type end @@ -54,6 +58,7 @@ module OpenAI end class Function < OpenAI::BaseModel + # The arguments passed to the function. sig { returns(String) } def arguments end @@ -62,6 +67,7 @@ module OpenAI def arguments=(_) end + # The name of the function. sig { returns(String) } def name end @@ -70,6 +76,9 @@ module OpenAI def name=(_) end + # The output of the function. This will be `null` if the outputs have not been + # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # yet. sig { returns(T.nilable(String)) } def output end @@ -78,6 +87,7 @@ module OpenAI def output=(_) end + # The definition of the function that was called. sig do params(arguments: String, name: String, output: T.nilable(String)).returns(T.attached_class) end diff --git a/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi index d5acf0b1..c026f468 100644 --- a/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class FunctionToolCallDelta < OpenAI::BaseModel + # The index of the tool call in the tool calls array. sig { returns(Integer) } def index end @@ -14,6 +15,8 @@ module OpenAI def index=(_) end + # The type of tool call. This is always going to be `function` for this type of + # tool call. sig { returns(Symbol) } def type end @@ -22,6 +25,7 @@ module OpenAI def type=(_) end + # The ID of the tool call object. sig { returns(T.nilable(String)) } def id end @@ -30,6 +34,7 @@ module OpenAI def id=(_) end + # The definition of the function that was called. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function)) } def function end @@ -68,6 +73,7 @@ module OpenAI end class Function < OpenAI::BaseModel + # The arguments passed to the function. sig { returns(T.nilable(String)) } def arguments end @@ -76,6 +82,7 @@ module OpenAI def arguments=(_) end + # The name of the function. sig { returns(T.nilable(String)) } def name end @@ -84,6 +91,9 @@ module OpenAI def name=(_) end + # The output of the function. This will be `null` if the outputs have not been + # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # yet. sig { returns(T.nilable(String)) } def output end @@ -92,6 +102,7 @@ module OpenAI def output=(_) end + # The definition of the function that was called. sig do params(arguments: String, name: String, output: T.nilable(String)).returns(T.attached_class) end diff --git a/rbi/lib/openai/models/beta/threads/runs/message_creation_step_details.rbi b/rbi/lib/openai/models/beta/threads/runs/message_creation_step_details.rbi index 73222dd0..ba7932c9 100644 --- a/rbi/lib/openai/models/beta/threads/runs/message_creation_step_details.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/message_creation_step_details.rbi @@ -17,6 +17,7 @@ module OpenAI def message_creation=(_) end + # Always `message_creation`. sig { returns(Symbol) } def type end @@ -25,6 +26,7 @@ module OpenAI def type=(_) end + # Details of the message creation by the run step. sig do params( message_creation: OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation, @@ -48,6 +50,7 @@ module OpenAI end class MessageCreation < OpenAI::BaseModel + # The ID of the message that was created by this run step. sig { returns(String) } def message_id end diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step.rbi index 2f08a34f..c4e9125c 100644 --- a/rbi/lib/openai/models/beta/threads/runs/run_step.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/run_step.rbi @@ -8,6 +8,7 @@ module OpenAI module Runs class RunStep < OpenAI::BaseModel + # The identifier of the run step, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -16,6 +17,9 @@ module OpenAI def id=(_) end + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) + # associated with the run step. sig { returns(String) } def assistant_id end @@ -24,6 +28,7 @@ module OpenAI def assistant_id=(_) end + # The Unix timestamp (in seconds) for when the run step was cancelled. sig { returns(T.nilable(Integer)) } def cancelled_at end @@ -32,6 +37,7 @@ module OpenAI def cancelled_at=(_) end + # The Unix timestamp (in seconds) for when the run step completed. sig { returns(T.nilable(Integer)) } def completed_at end @@ -40,6 +46,7 @@ module OpenAI def completed_at=(_) end + # The Unix timestamp (in seconds) for when the run step was created. sig { returns(Integer) } def created_at end @@ -48,6 +55,8 @@ module OpenAI def created_at=(_) end + # The Unix timestamp (in seconds) for when the run step expired. A step is + # considered expired if the parent run is expired. sig { returns(T.nilable(Integer)) } def expired_at end @@ -56,6 +65,7 @@ module OpenAI def expired_at=(_) end + # The Unix timestamp (in seconds) for when the run step failed. sig { returns(T.nilable(Integer)) } def failed_at end @@ -64,6 +74,8 @@ module OpenAI def failed_at=(_) end + # The last error associated with this run step. Will be `null` if there are no + # errors. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::LastError)) } def last_error end @@ -75,6 +87,12 @@ module OpenAI def last_error=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -83,6 +101,7 @@ module OpenAI def metadata=(_) end + # The object type, which is always `thread.run.step`. sig { returns(Symbol) } def object end @@ -91,6 +110,8 @@ module OpenAI def object=(_) end + # The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that + # this run step is a part of. sig { returns(String) } def run_id end @@ -99,6 +120,8 @@ module OpenAI def run_id=(_) end + # The status of the run step, which can be either `in_progress`, `cancelled`, + # `failed`, `completed`, or `expired`. sig { returns(Symbol) } def status end @@ -107,6 +130,7 @@ module OpenAI def status=(_) end + # The details of the run step. sig do returns( T.any( @@ -135,6 +159,8 @@ module OpenAI def step_details=(_) end + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # that was run. sig { returns(String) } def thread_id end @@ -143,6 +169,7 @@ module OpenAI def thread_id=(_) end + # The type of run step, which can be either `message_creation` or `tool_calls`. sig { returns(Symbol) } def type end @@ -151,6 +178,8 @@ module OpenAI def type=(_) end + # Usage statistics related to the run step. This value will be `null` while the + # run step's status is `in_progress`. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::Usage)) } def usage end @@ -162,6 +191,7 @@ module OpenAI def usage=(_) end + # Represents a step in execution of a run. sig do params( id: String, @@ -236,6 +266,7 @@ module OpenAI end class LastError < OpenAI::BaseModel + # One of `server_error` or `rate_limit_exceeded`. sig { returns(Symbol) } def code end @@ -244,6 +275,7 @@ module OpenAI def code=(_) end + # A human-readable description of the error. sig { returns(String) } def message end @@ -252,6 +284,8 @@ module OpenAI def message=(_) end + # The last error associated with this run step. Will be `null` if there are no + # errors. sig { params(code: Symbol, message: String).returns(T.attached_class) } def self.new(code:, message:) end @@ -260,65 +294,57 @@ module OpenAI def to_hash end + # One of `server_error` or `rate_limit_exceeded`. class Code < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + SERVER_ERROR = :server_error RATE_LIMIT_EXCEEDED = :rate_limit_exceeded - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end + # The status of the run step, which can be either `in_progress`, `cancelled`, + # `failed`, `completed`, or `expired`. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress CANCELLED = :cancelled FAILED = :failed COMPLETED = :completed EXPIRED = :expired - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # The details of the run step. class StepDetails < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails], [Symbol, OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, + OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails + ) + } end end + # The type of run step, which can be either `message_creation` or `tool_calls`. class Type < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + MESSAGE_CREATION = :message_creation TOOL_CALLS = :tool_calls - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class Usage < OpenAI::BaseModel + # Number of completion tokens used over the course of the run step. sig { returns(Integer) } def completion_tokens end @@ -327,6 +353,7 @@ module OpenAI def completion_tokens=(_) end + # Number of prompt tokens used over the course of the run step. sig { returns(Integer) } def prompt_tokens end @@ -335,6 +362,7 @@ module OpenAI def prompt_tokens=(_) end + # Total number of tokens used (prompt + completion). sig { returns(Integer) } def total_tokens end @@ -343,6 +371,8 @@ module OpenAI def total_tokens=(_) end + # Usage statistics related to the run step. This value will be `null` while the + # run step's status is `in_progress`. sig do params( completion_tokens: Integer, diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step_delta.rbi index 60805244..cb3b9c62 100644 --- a/rbi/lib/openai/models/beta/threads/runs/run_step_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/run_step_delta.rbi @@ -8,6 +8,7 @@ module OpenAI module Runs class RunStepDelta < OpenAI::BaseModel + # The details of the run step. sig do returns( T.nilable( @@ -38,6 +39,7 @@ module OpenAI def step_details=(_) end + # The delta containing the fields that have changed on the run step. sig do params( step_details: T.any( @@ -64,18 +66,17 @@ module OpenAI def to_hash end + # The details of the run step. class StepDetails < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta], [Symbol, OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, + OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject + ) + } end end end diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi index 73076f3a..7cc4a3fb 100644 --- a/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi @@ -8,6 +8,7 @@ module OpenAI module Runs class RunStepDeltaEvent < OpenAI::BaseModel + # The identifier of the run step, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -16,6 +17,7 @@ module OpenAI def id=(_) end + # The delta containing the fields that have changed on the run step. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStepDelta) } def delta end @@ -27,6 +29,7 @@ module OpenAI def delta=(_) end + # The object type, which is always `thread.run.step.delta`. sig { returns(Symbol) } def object end @@ -35,6 +38,8 @@ module OpenAI def object=(_) end + # Represents a run step delta i.e. any changed fields on a run step during + # streaming. sig do params(id: String, delta: OpenAI::Models::Beta::Threads::Runs::RunStepDelta, object: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rbi index e933c731..02216a5c 100644 --- a/rbi/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rbi @@ -8,6 +8,7 @@ module OpenAI module Runs class RunStepDeltaMessageDelta < OpenAI::BaseModel + # Always `message_creation`. sig { returns(Symbol) } def type end @@ -27,6 +28,7 @@ module OpenAI def message_creation=(_) end + # Details of the message creation by the run step. sig do params( message_creation: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation, @@ -50,6 +52,7 @@ module OpenAI end class MessageCreation < OpenAI::BaseModel + # The ID of the message that was created by this run step. sig { returns(T.nilable(String)) } def message_id end diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step_include.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step_include.rbi index b2726421..9fd4c51e 100644 --- a/rbi/lib/openai/models/beta/threads/runs/run_step_include.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/run_step_include.rbi @@ -10,13 +10,9 @@ module OpenAI class RunStepInclude < OpenAI::Enum abstract! - STEP_DETAILS_TOOL_CALLS_FILE_SEARCH_RESULTS_CONTENT = :"step_details.tool_calls[*].file_search.results[*].content" + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + STEP_DETAILS_TOOL_CALLS_FILE_SEARCH_RESULTS_CONTENT = :"step_details.tool_calls[*].file_search.results[*].content" end end end diff --git a/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi b/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi index 56b21c91..73059bac 100644 --- a/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi @@ -17,6 +17,10 @@ module OpenAI def thread_id=(_) end + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -25,6 +29,10 @@ module OpenAI def after=(_) end + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } def before end @@ -33,6 +41,13 @@ module OpenAI def before=(_) end + # A list of additional fields to include in the response. Currently the only + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(T::Array[Symbol])) } def include end @@ -41,6 +56,8 @@ module OpenAI def include=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -49,6 +66,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -89,17 +108,15 @@ module OpenAI def to_hash end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ASC = :asc DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi b/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi index 9778a156..71dc2e52 100644 --- a/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi @@ -25,6 +25,13 @@ module OpenAI def run_id=(_) end + # A list of additional fields to include in the response. Currently the only + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(T::Array[Symbol])) } def include end diff --git a/rbi/lib/openai/models/beta/threads/runs/tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/tool_call.rbi index 5d68bb14..e3f26a25 100644 --- a/rbi/lib/openai/models/beta/threads/runs/tool_call.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/tool_call.rbi @@ -5,18 +5,18 @@ module OpenAI module Beta module Threads module Runs + # Details of the Code Interpreter tool call the run step was involved in. class ToolCall < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall], [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall], [Symbol, OpenAI::Models::Beta::Threads::Runs::FunctionToolCall]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall, + OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall, + OpenAI::Models::Beta::Threads::Runs::FunctionToolCall + ) + } end end end diff --git a/rbi/lib/openai/models/beta/threads/runs/tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/tool_call_delta.rbi index fc5420d6..8169ef81 100644 --- a/rbi/lib/openai/models/beta/threads/runs/tool_call_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/tool_call_delta.rbi @@ -5,18 +5,18 @@ module OpenAI module Beta module Threads module Runs + # Details of the Code Interpreter tool call the run step was involved in. class ToolCallDelta < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta], [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta], [Symbol, OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta, + OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta, + OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta + ) + } end end end diff --git a/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi b/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi index f9cb13ff..3258b9d6 100644 --- a/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class ToolCallDeltaObject < OpenAI::BaseModel + # Always `tool_calls`. sig { returns(Symbol) } def type end @@ -14,6 +15,9 @@ module OpenAI def type=(_) end + # An array of tool calls the run step was involved in. These can be associated + # with one of three types of tools: `code_interpreter`, `file_search`, or + # `function`. sig do returns( T.nilable( @@ -53,6 +57,7 @@ module OpenAI def tool_calls=(_) end + # Details of the tool call. sig do params( tool_calls: T::Array[ diff --git a/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi b/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi index 4d2d6cf2..3ad8cc7f 100644 --- a/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi @@ -6,6 +6,9 @@ module OpenAI module Threads module Runs class ToolCallsStepDetails < OpenAI::BaseModel + # An array of tool calls the run step was involved in. These can be associated + # with one of three types of tools: `code_interpreter`, `file_search`, or + # `function`. sig do returns( T::Array[ @@ -43,6 +46,7 @@ module OpenAI def tool_calls=(_) end + # Always `tool_calls`. sig { returns(Symbol) } def type end @@ -51,6 +55,7 @@ module OpenAI def type=(_) end + # Details of the tool call. sig do params( tool_calls: T::Array[ diff --git a/rbi/lib/openai/models/beta/threads/text.rbi b/rbi/lib/openai/models/beta/threads/text.rbi index 97fe10b1..0ba8015c 100644 --- a/rbi/lib/openai/models/beta/threads/text.rbi +++ b/rbi/lib/openai/models/beta/threads/text.rbi @@ -39,6 +39,7 @@ module OpenAI def annotations=(_) end + # The data that makes up the text. sig { returns(String) } def value end diff --git a/rbi/lib/openai/models/beta/threads/text_content_block.rbi b/rbi/lib/openai/models/beta/threads/text_content_block.rbi index d714859a..5a3f345f 100644 --- a/rbi/lib/openai/models/beta/threads/text_content_block.rbi +++ b/rbi/lib/openai/models/beta/threads/text_content_block.rbi @@ -13,6 +13,7 @@ module OpenAI def text=(_) end + # Always `text`. sig { returns(Symbol) } def type end @@ -21,6 +22,7 @@ module OpenAI def type=(_) end + # The text content that is part of a message. sig { params(text: OpenAI::Models::Beta::Threads::Text, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :text) end diff --git a/rbi/lib/openai/models/beta/threads/text_content_block_param.rbi b/rbi/lib/openai/models/beta/threads/text_content_block_param.rbi index dcfd074b..58764a6f 100644 --- a/rbi/lib/openai/models/beta/threads/text_content_block_param.rbi +++ b/rbi/lib/openai/models/beta/threads/text_content_block_param.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class TextContentBlockParam < OpenAI::BaseModel + # Text content to be sent to the model sig { returns(String) } def text end @@ -13,6 +14,7 @@ module OpenAI def text=(_) end + # Always `text`. sig { returns(Symbol) } def type end @@ -21,6 +23,7 @@ module OpenAI def type=(_) end + # The text content that is part of a message. sig { params(text: String, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :text) end diff --git a/rbi/lib/openai/models/beta/threads/text_delta.rbi b/rbi/lib/openai/models/beta/threads/text_delta.rbi index 3ffea679..12996683 100644 --- a/rbi/lib/openai/models/beta/threads/text_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/text_delta.rbi @@ -41,6 +41,7 @@ module OpenAI def annotations=(_) end + # The data that makes up the text. sig { returns(T.nilable(String)) } def value end diff --git a/rbi/lib/openai/models/beta/threads/text_delta_block.rbi b/rbi/lib/openai/models/beta/threads/text_delta_block.rbi index a3e06fc4..80145b24 100644 --- a/rbi/lib/openai/models/beta/threads/text_delta_block.rbi +++ b/rbi/lib/openai/models/beta/threads/text_delta_block.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class TextDeltaBlock < OpenAI::BaseModel + # The index of the content part in the message. sig { returns(Integer) } def index end @@ -13,6 +14,7 @@ module OpenAI def index=(_) end + # Always `text`. sig { returns(Symbol) } def type end @@ -29,6 +31,7 @@ module OpenAI def text=(_) end + # The text content that is part of a message. sig do params(index: Integer, text: OpenAI::Models::Beta::Threads::TextDelta, type: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/chat/chat_completion.rbi b/rbi/lib/openai/models/chat/chat_completion.rbi index 85749208..c68e3594 100644 --- a/rbi/lib/openai/models/chat/chat_completion.rbi +++ b/rbi/lib/openai/models/chat/chat_completion.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletion < OpenAI::BaseModel + # A unique identifier for the chat completion. sig { returns(String) } def id end @@ -14,6 +15,8 @@ module OpenAI def id=(_) end + # A list of chat completion choices. Can be more than one if `n` is greater + # than 1. sig { returns(T::Array[OpenAI::Models::Chat::ChatCompletion::Choice]) } def choices end @@ -25,6 +28,7 @@ module OpenAI def choices=(_) end + # The Unix timestamp (in seconds) of when the chat completion was created. sig { returns(Integer) } def created end @@ -33,6 +37,7 @@ module OpenAI def created=(_) end + # The model used for the chat completion. sig { returns(String) } def model end @@ -41,6 +46,7 @@ module OpenAI def model=(_) end + # The object type, which is always `chat.completion`. sig { returns(Symbol) } def object end @@ -49,6 +55,7 @@ module OpenAI def object=(_) end + # The service tier used for processing the request. sig { returns(T.nilable(Symbol)) } def service_tier end @@ -57,6 +64,10 @@ module OpenAI def service_tier=(_) end + # This fingerprint represents the backend configuration that the model runs with. + # + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. sig { returns(T.nilable(String)) } def system_fingerprint end @@ -65,6 +76,7 @@ module OpenAI def system_fingerprint=(_) end + # Usage statistics for the completion request. sig { returns(T.nilable(OpenAI::Models::CompletionUsage)) } def usage end @@ -73,6 +85,8 @@ module OpenAI def usage=(_) end + # Represents a chat completion response returned by model, based on the provided + # input. sig do params( id: String, @@ -117,6 +131,12 @@ module OpenAI end class Choice < OpenAI::BaseModel + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. sig { returns(Symbol) } def finish_reason end @@ -125,6 +145,7 @@ module OpenAI def finish_reason=(_) end + # The index of the choice in the list of choices. sig { returns(Integer) } def index end @@ -133,6 +154,7 @@ module OpenAI def index=(_) end + # Log probability information for the choice. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs)) } def logprobs end @@ -144,6 +166,7 @@ module OpenAI def logprobs=(_) end + # A chat completion message generated by the model. sig { returns(OpenAI::Models::Chat::ChatCompletionMessage) } def message end @@ -180,23 +203,26 @@ module OpenAI def to_hash end + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. class FinishReason < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + STOP = :stop LENGTH = :length TOOL_CALLS = :tool_calls CONTENT_FILTER = :content_filter FUNCTION_CALL = :function_call - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class Logprobs < OpenAI::BaseModel + # A list of message content tokens with log probability information. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) } def content end @@ -208,6 +234,7 @@ module OpenAI def content=(_) end + # A list of message refusal tokens with log probability information. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) } def refusal end @@ -219,6 +246,7 @@ module OpenAI def refusal=(_) end + # Log probability information for the choice. sig do params( content: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]), @@ -243,17 +271,14 @@ module OpenAI end end + # The service tier used for processing the request. class ServiceTier < OpenAI::Enum abstract! - SCALE = T.let(:scale, T.nilable(Symbol)) - DEFAULT = T.let(:default, T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + SCALE = :scale + DEFAULT = :default end end end diff --git a/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi index 10203986..624890ee 100644 --- a/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionAssistantMessageParam < OpenAI::BaseModel + # The role of the messages author, in this case `assistant`. sig { returns(Symbol) } def role end @@ -14,6 +15,8 @@ module OpenAI def role=(_) end + # Data about a previous audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio)) } def audio end @@ -25,6 +28,8 @@ module OpenAI def audio=(_) end + # The contents of the assistant message. Required unless `tool_calls` or + # `function_call` is specified. sig do returns( T.nilable( @@ -74,6 +79,8 @@ module OpenAI def content=(_) end + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall)) } def function_call end @@ -85,6 +92,8 @@ module OpenAI def function_call=(_) end + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. sig { returns(T.nilable(String)) } def name end @@ -93,6 +102,7 @@ module OpenAI def name=(_) end + # The refusal message by the assistant. sig { returns(T.nilable(String)) } def refusal end @@ -101,6 +111,7 @@ module OpenAI def refusal=(_) end + # The tool calls generated by the model, such as function calls. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall])) } def tool_calls end @@ -112,6 +123,7 @@ module OpenAI def tool_calls=(_) end + # Messages sent by the model in response to user messages. sig do params( audio: T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio), @@ -165,6 +177,7 @@ module OpenAI end class Audio < OpenAI::BaseModel + # Unique identifier for a previous audio response from the model. sig { returns(String) } def id end @@ -173,6 +186,8 @@ module OpenAI def id=(_) end + # Data about a previous audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig { params(id: String).returns(T.attached_class) } def self.new(id:) end @@ -182,9 +197,25 @@ module OpenAI end end + # The contents of the assistant message. Required unless `tool_calls` or + # `function_call` is specified. class Content < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + String, + T::Array[ + T.any( + OpenAI::Models::Chat::ChatCompletionContentPartText, + OpenAI::Models::Chat::ChatCompletionContentPartRefusal + ) + ] + ) + } + end + ArrayOfContentPartArray = T.type_alias do T::Array[ T.any( @@ -194,45 +225,27 @@ module OpenAI ] end + # Learn about + # [text inputs](https://platform.openai.com/docs/guides/text-generation). class ArrayOfContentPart < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Chat::ChatCompletionContentPartText], [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartRefusal]] - ) - end - private def variants - end - end - end - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [ - NilClass, - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartRefusal - ) - ] - ] - ] + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Chat::ChatCompletionContentPartText, + OpenAI::Models::Chat::ChatCompletionContentPartRefusal ) - end - private def variants + } end end end class FunctionCall < OpenAI::BaseModel + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(String) } def arguments end @@ -241,6 +254,7 @@ module OpenAI def arguments=(_) end + # The name of the function to call. sig { returns(String) } def name end @@ -249,6 +263,8 @@ module OpenAI def name=(_) end + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. sig { params(arguments: String, name: String).returns(T.attached_class) } def self.new(arguments:, name:) end diff --git a/rbi/lib/openai/models/chat/chat_completion_audio.rbi b/rbi/lib/openai/models/chat/chat_completion_audio.rbi index f8431d64..489f4145 100644 --- a/rbi/lib/openai/models/chat/chat_completion_audio.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_audio.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionAudio < OpenAI::BaseModel + # Unique identifier for this audio response. sig { returns(String) } def id end @@ -14,6 +15,8 @@ module OpenAI def id=(_) end + # Base64 encoded audio bytes generated by the model, in the format specified in + # the request. sig { returns(String) } def data end @@ -22,6 +25,8 @@ module OpenAI def data=(_) end + # The Unix timestamp (in seconds) for when this audio response will no longer be + # accessible on the server for use in multi-turn conversations. sig { returns(Integer) } def expires_at end @@ -30,6 +35,7 @@ module OpenAI def expires_at=(_) end + # Transcript of the audio generated by the model. sig { returns(String) } def transcript end @@ -38,6 +44,9 @@ module OpenAI def transcript=(_) end + # If the audio output modality is requested, this object contains data about the + # audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig do params(id: String, data: String, expires_at: Integer, transcript: String).returns(T.attached_class) end diff --git a/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi b/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi index 0cbbdba5..0af4b8ee 100644 --- a/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi @@ -6,6 +6,8 @@ module OpenAI module Chat class ChatCompletionAudioParam < OpenAI::BaseModel + # Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, + # or `pcm16`. sig { returns(Symbol) } def format_ end @@ -14,6 +16,8 @@ module OpenAI def format_=(_) end + # The voice the model uses to respond. Supported voices are `alloy`, `ash`, + # `ballad`, `coral`, `echo`, `sage`, and `shimmer`. sig { returns(Symbol) } def voice end @@ -22,6 +26,9 @@ module OpenAI def voice=(_) end + # Parameters for audio output. Required when audio output is requested with + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig { params(format_: Symbol, voice: Symbol).returns(T.attached_class) } def self.new(format_:, voice:) end @@ -30,25 +37,27 @@ module OpenAI def to_hash end + # Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, + # or `pcm16`. class Format < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + WAV = :wav MP3 = :mp3 FLAC = :flac OPUS = :opus PCM16 = :pcm16 - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # The voice the model uses to respond. Supported voices are `alloy`, `ash`, + # `ballad`, `coral`, `echo`, `sage`, and `shimmer`. class Voice < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ALLOY = :alloy ASH = :ash BALLAD = :ballad @@ -57,12 +66,6 @@ module OpenAI SAGE = :sage SHIMMER = :shimmer VERSE = :verse - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/chat/chat_completion_chunk.rbi b/rbi/lib/openai/models/chat/chat_completion_chunk.rbi index 12f6d8b5..9595e84a 100644 --- a/rbi/lib/openai/models/chat/chat_completion_chunk.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_chunk.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionChunk < OpenAI::BaseModel + # A unique identifier for the chat completion. Each chunk has the same ID. sig { returns(String) } def id end @@ -14,6 +15,9 @@ module OpenAI def id=(_) end + # A list of chat completion choices. Can contain more than one elements if `n` is + # greater than 1. Can also be empty for the last chunk if you set + # `stream_options: {"include_usage": true}`. sig { returns(T::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice]) } def choices end @@ -25,6 +29,8 @@ module OpenAI def choices=(_) end + # The Unix timestamp (in seconds) of when the chat completion was created. Each + # chunk has the same timestamp. sig { returns(Integer) } def created end @@ -33,6 +39,7 @@ module OpenAI def created=(_) end + # The model to generate the completion. sig { returns(String) } def model end @@ -41,6 +48,7 @@ module OpenAI def model=(_) end + # The object type, which is always `chat.completion.chunk`. sig { returns(Symbol) } def object end @@ -49,6 +57,7 @@ module OpenAI def object=(_) end + # The service tier used for processing the request. sig { returns(T.nilable(Symbol)) } def service_tier end @@ -57,6 +66,9 @@ module OpenAI def service_tier=(_) end + # This fingerprint represents the backend configuration that the model runs with. + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. sig { returns(T.nilable(String)) } def system_fingerprint end @@ -65,6 +77,10 @@ module OpenAI def system_fingerprint=(_) end + # An optional field that will only be present when you set + # `stream_options: {"include_usage": true}` in your request. When present, it + # contains a null value except for the last chunk which contains the token usage + # statistics for the entire request. sig { returns(T.nilable(OpenAI::Models::CompletionUsage)) } def usage end @@ -73,6 +89,9 @@ module OpenAI def usage=(_) end + # Represents a streamed chunk of a chat completion response returned by the model, + # based on the provided input. + # [Learn more](https://platform.openai.com/docs/guides/streaming-responses). sig do params( id: String, @@ -117,6 +136,7 @@ module OpenAI end class Choice < OpenAI::BaseModel + # A chat completion delta generated by streamed model responses. sig { returns(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta) } def delta end @@ -128,6 +148,12 @@ module OpenAI def delta=(_) end + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. sig { returns(T.nilable(Symbol)) } def finish_reason end @@ -136,6 +162,7 @@ module OpenAI def finish_reason=(_) end + # The index of the choice in the list of choices. sig { returns(Integer) } def index end @@ -144,6 +171,7 @@ module OpenAI def index=(_) end + # Log probability information for the choice. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs)) } def logprobs end @@ -182,6 +210,7 @@ module OpenAI end class Delta < OpenAI::BaseModel + # The contents of the chunk message. sig { returns(T.nilable(String)) } def content end @@ -190,6 +219,8 @@ module OpenAI def content=(_) end + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall)) } def function_call end @@ -201,6 +232,7 @@ module OpenAI def function_call=(_) end + # The refusal message generated by the model. sig { returns(T.nilable(String)) } def refusal end @@ -209,6 +241,7 @@ module OpenAI def refusal=(_) end + # The role of the author of this message. sig { returns(T.nilable(Symbol)) } def role end @@ -228,6 +261,7 @@ module OpenAI def tool_calls=(_) end + # A chat completion delta generated by streamed model responses. sig do params( content: T.nilable(String), @@ -257,6 +291,10 @@ module OpenAI end class FunctionCall < OpenAI::BaseModel + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(T.nilable(String)) } def arguments end @@ -265,6 +303,7 @@ module OpenAI def arguments=(_) end + # The name of the function to call. sig { returns(T.nilable(String)) } def name end @@ -273,6 +312,8 @@ module OpenAI def name=(_) end + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. sig { params(arguments: String, name: String).returns(T.attached_class) } def self.new(arguments: nil, name: nil) end @@ -282,20 +323,17 @@ module OpenAI end end + # The role of the author of this message. class Role < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + DEVELOPER = :developer SYSTEM = :system USER = :user ASSISTANT = :assistant TOOL = :tool - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class ToolCall < OpenAI::BaseModel @@ -307,6 +345,7 @@ module OpenAI def index=(_) end + # The ID of the tool call. sig { returns(T.nilable(String)) } def id end @@ -326,6 +365,7 @@ module OpenAI def function=(_) end + # The type of the tool. Currently, only `function` is supported. sig { returns(T.nilable(Symbol)) } def type end @@ -361,6 +401,10 @@ module OpenAI end class Function < OpenAI::BaseModel + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(T.nilable(String)) } def arguments end @@ -369,6 +413,7 @@ module OpenAI def arguments=(_) end + # The name of the function to call. sig { returns(T.nilable(String)) } def name end @@ -386,37 +431,37 @@ module OpenAI end end + # The type of the tool. Currently, only `function` is supported. class Type < OpenAI::Enum abstract! - FUNCTION = :function + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + FUNCTION = :function end end end + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. class FinishReason < OpenAI::Enum abstract! - STOP = T.let(:stop, T.nilable(Symbol)) - LENGTH = T.let(:length, T.nilable(Symbol)) - TOOL_CALLS = T.let(:tool_calls, T.nilable(Symbol)) - CONTENT_FILTER = T.let(:content_filter, T.nilable(Symbol)) - FUNCTION_CALL = T.let(:function_call, T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + STOP = :stop + LENGTH = :length + TOOL_CALLS = :tool_calls + CONTENT_FILTER = :content_filter + FUNCTION_CALL = :function_call end class Logprobs < OpenAI::BaseModel + # A list of message content tokens with log probability information. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) } def content end @@ -428,6 +473,7 @@ module OpenAI def content=(_) end + # A list of message refusal tokens with log probability information. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) } def refusal end @@ -439,6 +485,7 @@ module OpenAI def refusal=(_) end + # Log probability information for the choice. sig do params( content: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]), @@ -463,17 +510,14 @@ module OpenAI end end + # The service tier used for processing the request. class ServiceTier < OpenAI::Enum abstract! - SCALE = T.let(:scale, T.nilable(Symbol)) - DEFAULT = T.let(:default, T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + SCALE = :scale + DEFAULT = :default end end end diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part.rbi index 02a241f0..a36f38f5 100644 --- a/rbi/lib/openai/models/chat/chat_completion_content_part.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_content_part.rbi @@ -5,9 +5,22 @@ module OpenAI ChatCompletionContentPart = T.type_alias { Chat::ChatCompletionContentPart } module Chat + # Learn about + # [text inputs](https://platform.openai.com/docs/guides/text-generation). class ChatCompletionContentPart < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Chat::ChatCompletionContentPartText, + OpenAI::Models::Chat::ChatCompletionContentPartImage, + OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, + OpenAI::Models::Chat::ChatCompletionContentPart::File + ) + } + end + class File < OpenAI::BaseModel sig { returns(OpenAI::Models::Chat::ChatCompletionContentPart::File::File) } def file @@ -20,6 +33,7 @@ module OpenAI def file=(_) end + # The type of the content part. Always `file`. sig { returns(Symbol) } def type end @@ -28,6 +42,8 @@ module OpenAI def type=(_) end + # Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text + # generation. sig do params(file: OpenAI::Models::Chat::ChatCompletionContentPart::File::File, type: Symbol) .returns(T.attached_class) @@ -40,6 +56,8 @@ module OpenAI end class File < OpenAI::BaseModel + # The base64 encoded file data, used when passing the file to the model as a + # string. sig { returns(T.nilable(String)) } def file_data end @@ -48,6 +66,7 @@ module OpenAI def file_data=(_) end + # The ID of an uploaded file to use as input. sig { returns(T.nilable(String)) } def file_id end @@ -56,6 +75,7 @@ module OpenAI def file_id=(_) end + # The name of the file, used when passing the file to the model as a string. sig { returns(T.nilable(String)) } def file_name end @@ -73,17 +93,6 @@ module OpenAI end end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Chat::ChatCompletionContentPartText], [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartImage], [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio], [Symbol, OpenAI::Models::Chat::ChatCompletionContentPart::File]] - ) - end - private def variants - end - end end end end diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi index e74c6c80..56a6931a 100644 --- a/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi @@ -17,6 +17,7 @@ module OpenAI def image_url=(_) end + # The type of the content part. sig { returns(Symbol) } def type end @@ -25,6 +26,7 @@ module OpenAI def type=(_) end + # Learn about [image inputs](https://platform.openai.com/docs/guides/vision). sig do params(image_url: OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL, type: Symbol) .returns(T.attached_class) @@ -39,6 +41,7 @@ module OpenAI end class ImageURL < OpenAI::BaseModel + # Either a URL of the image or the base64 encoded image data. sig { returns(String) } def url end @@ -47,6 +50,8 @@ module OpenAI def url=(_) end + # Specifies the detail level of the image. Learn more in the + # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). sig { returns(T.nilable(Symbol)) } def detail end @@ -63,18 +68,16 @@ module OpenAI def to_hash end + # Specifies the detail level of the image. Learn more in the + # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). class Detail < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + AUTO = :auto LOW = :low HIGH = :high - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part_input_audio.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part_input_audio.rbi index bff9cc3b..83e891b0 100644 --- a/rbi/lib/openai/models/chat/chat_completion_content_part_input_audio.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_content_part_input_audio.rbi @@ -17,6 +17,7 @@ module OpenAI def input_audio=(_) end + # The type of the content part. Always `input_audio`. sig { returns(Symbol) } def type end @@ -25,6 +26,7 @@ module OpenAI def type=(_) end + # Learn about [audio inputs](https://platform.openai.com/docs/guides/audio). sig do params(input_audio: OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio, type: Symbol) .returns(T.attached_class) @@ -42,6 +44,7 @@ module OpenAI end class InputAudio < OpenAI::BaseModel + # Base64 encoded audio data. sig { returns(String) } def data end @@ -50,6 +53,7 @@ module OpenAI def data=(_) end + # The format of the encoded audio data. Currently supports "wav" and "mp3". sig { returns(Symbol) } def format_ end @@ -66,17 +70,14 @@ module OpenAI def to_hash end + # The format of the encoded audio data. Currently supports "wav" and "mp3". class Format < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + WAV = :wav MP3 = :mp3 - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part_refusal.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part_refusal.rbi index 263c9c0b..392ce584 100644 --- a/rbi/lib/openai/models/chat/chat_completion_content_part_refusal.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_content_part_refusal.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionContentPartRefusal < OpenAI::BaseModel + # The refusal message generated by the model. sig { returns(String) } def refusal end @@ -14,6 +15,7 @@ module OpenAI def refusal=(_) end + # The type of the content part. sig { returns(Symbol) } def type end diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi index 84a24c5d..1154a72d 100644 --- a/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionContentPartText < OpenAI::BaseModel + # The text content. sig { returns(String) } def text end @@ -14,6 +15,7 @@ module OpenAI def text=(_) end + # The type of the content part. sig { returns(Symbol) } def type end @@ -22,6 +24,8 @@ module OpenAI def type=(_) end + # Learn about + # [text inputs](https://platform.openai.com/docs/guides/text-generation). sig { params(text: String, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :text) end diff --git a/rbi/lib/openai/models/chat/chat_completion_deleted.rbi b/rbi/lib/openai/models/chat/chat_completion_deleted.rbi index 763165dc..72764c05 100644 --- a/rbi/lib/openai/models/chat/chat_completion_deleted.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_deleted.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionDeleted < OpenAI::BaseModel + # The ID of the chat completion that was deleted. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # Whether the chat completion was deleted. sig { returns(T::Boolean) } def deleted end @@ -22,6 +24,7 @@ module OpenAI def deleted=(_) end + # The type of object being deleted. sig { returns(Symbol) } def object end diff --git a/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi index b7f2d9e7..d5dca054 100644 --- a/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionDeveloperMessageParam < OpenAI::BaseModel + # The contents of the developer message. sig { returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) } def content end @@ -17,6 +18,7 @@ module OpenAI def content=(_) end + # The role of the messages author, in this case `developer`. sig { returns(Symbol) } def role end @@ -25,6 +27,8 @@ module OpenAI def role=(_) end + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. sig { returns(T.nilable(String)) } def name end @@ -33,6 +37,9 @@ module OpenAI def name=(_) end + # Developer-provided instructions that the model should follow, regardless of + # messages sent by the user. With o1 models and newer, `developer` messages + # replace the previous `system` messages. sig do params( content: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]), @@ -57,19 +64,13 @@ module OpenAI def to_hash end + # The contents of the developer message. class Content < OpenAI::Union abstract! - ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } + Variants = type_template(:out) { {fixed: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])} } - class << self - sig do - override - .returns([[NilClass, String], [NilClass, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]]) - end - private def variants - end - end + ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } end end end diff --git a/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi b/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi index abf316d2..a17d3350 100644 --- a/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionFunctionCallOption < OpenAI::BaseModel + # The name of the function to call. sig { returns(String) } def name end @@ -14,6 +15,8 @@ module OpenAI def name=(_) end + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. sig { params(name: String).returns(T.attached_class) } def self.new(name:) end diff --git a/rbi/lib/openai/models/chat/chat_completion_function_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_function_message_param.rbi index 1cf6ef40..1c035a2e 100644 --- a/rbi/lib/openai/models/chat/chat_completion_function_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_function_message_param.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionFunctionMessageParam < OpenAI::BaseModel + # The contents of the function message. sig { returns(T.nilable(String)) } def content end @@ -14,6 +15,7 @@ module OpenAI def content=(_) end + # The name of the function to call. sig { returns(String) } def name end @@ -22,6 +24,7 @@ module OpenAI def name=(_) end + # The role of the messages author, in this case `function`. sig { returns(Symbol) } def role end diff --git a/rbi/lib/openai/models/chat/chat_completion_message.rbi b/rbi/lib/openai/models/chat/chat_completion_message.rbi index 91a6344d..a54250d7 100644 --- a/rbi/lib/openai/models/chat/chat_completion_message.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_message.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionMessage < OpenAI::BaseModel + # The contents of the message. sig { returns(T.nilable(String)) } def content end @@ -14,6 +15,7 @@ module OpenAI def content=(_) end + # The refusal message generated by the model. sig { returns(T.nilable(String)) } def refusal end @@ -22,6 +24,7 @@ module OpenAI def refusal=(_) end + # The role of the author of this message. sig { returns(Symbol) } def role end @@ -30,6 +33,8 @@ module OpenAI def role=(_) end + # Annotations for the message, when applicable, as when using the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionMessage::Annotation])) } def annotations end @@ -41,6 +46,9 @@ module OpenAI def annotations=(_) end + # If the audio output modality is requested, this object contains data about the + # audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAudio)) } def audio end @@ -52,6 +60,8 @@ module OpenAI def audio=(_) end + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall)) } def function_call end @@ -63,6 +73,7 @@ module OpenAI def function_call=(_) end + # The tool calls generated by the model, such as function calls. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall])) } def tool_calls end @@ -74,6 +85,7 @@ module OpenAI def tool_calls=(_) end + # A chat completion message generated by the model. sig do params( content: T.nilable(String), @@ -107,6 +119,7 @@ module OpenAI end class Annotation < OpenAI::BaseModel + # The type of the URL citation. Always `url_citation`. sig { returns(Symbol) } def type end @@ -115,6 +128,7 @@ module OpenAI def type=(_) end + # A URL citation when using web search. sig { returns(OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation) } def url_citation end @@ -126,6 +140,7 @@ module OpenAI def url_citation=(_) end + # A URL citation when using web search. sig do params(url_citation: OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation, type: Symbol) .returns(T.attached_class) @@ -143,6 +158,7 @@ module OpenAI end class URLCitation < OpenAI::BaseModel + # The index of the last character of the URL citation in the message. sig { returns(Integer) } def end_index end @@ -151,6 +167,7 @@ module OpenAI def end_index=(_) end + # The index of the first character of the URL citation in the message. sig { returns(Integer) } def start_index end @@ -159,6 +176,7 @@ module OpenAI def start_index=(_) end + # The title of the web resource. sig { returns(String) } def title end @@ -167,6 +185,7 @@ module OpenAI def title=(_) end + # The URL of the web resource. sig { returns(String) } def url end @@ -175,6 +194,7 @@ module OpenAI def url=(_) end + # A URL citation when using web search. sig do params( end_index: Integer, @@ -193,6 +213,10 @@ module OpenAI end class FunctionCall < OpenAI::BaseModel + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(String) } def arguments end @@ -201,6 +225,7 @@ module OpenAI def arguments=(_) end + # The name of the function to call. sig { returns(String) } def name end @@ -209,6 +234,8 @@ module OpenAI def name=(_) end + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. sig { params(arguments: String, name: String).returns(T.attached_class) } def self.new(arguments:, name:) end diff --git a/rbi/lib/openai/models/chat/chat_completion_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_message_param.rbi index 471f5f89..97d8192e 100644 --- a/rbi/lib/openai/models/chat/chat_completion_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_message_param.rbi @@ -5,18 +5,23 @@ module OpenAI ChatCompletionMessageParam = T.type_alias { Chat::ChatCompletionMessageParam } module Chat + # Developer-provided instructions that the model should follow, regardless of + # messages sent by the user. With o1 models and newer, `developer` messages + # replace the previous `system` messages. class ChatCompletionMessageParam < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam], [Symbol, OpenAI::Models::Chat::ChatCompletionSystemMessageParam], [Symbol, OpenAI::Models::Chat::ChatCompletionUserMessageParam], [Symbol, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam], [Symbol, OpenAI::Models::Chat::ChatCompletionToolMessageParam], [Symbol, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, + OpenAI::Models::Chat::ChatCompletionSystemMessageParam, + OpenAI::Models::Chat::ChatCompletionUserMessageParam, + OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, + OpenAI::Models::Chat::ChatCompletionToolMessageParam, + OpenAI::Models::Chat::ChatCompletionFunctionMessageParam + ) + } end end end diff --git a/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi b/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi index ca4c6ac0..fadd4257 100644 --- a/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionMessageToolCall < OpenAI::BaseModel + # The ID of the tool call. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # The function that the model called. sig { returns(OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function) } def function end @@ -25,6 +27,7 @@ module OpenAI def function=(_) end + # The type of the tool. Currently, only `function` is supported. sig { returns(Symbol) } def type end @@ -50,6 +53,10 @@ module OpenAI end class Function < OpenAI::BaseModel + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(String) } def arguments end @@ -58,6 +65,7 @@ module OpenAI def arguments=(_) end + # The name of the function to call. sig { returns(String) } def name end @@ -66,6 +74,7 @@ module OpenAI def name=(_) end + # The function that the model called. sig { params(arguments: String, name: String).returns(T.attached_class) } def self.new(arguments:, name:) end diff --git a/rbi/lib/openai/models/chat/chat_completion_modality.rbi b/rbi/lib/openai/models/chat/chat_completion_modality.rbi index bb95e5c8..df9e0044 100644 --- a/rbi/lib/openai/models/chat/chat_completion_modality.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_modality.rbi @@ -8,14 +8,10 @@ module OpenAI class ChatCompletionModality < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + TEXT = :text AUDIO = :audio - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi b/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi index 89db6837..d1d23deb 100644 --- a/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi @@ -17,6 +17,7 @@ module OpenAI def function=(_) end + # The type of the tool. Currently, only `function` is supported. sig { returns(Symbol) } def type end @@ -25,6 +26,8 @@ module OpenAI def type=(_) end + # Specifies a tool the model should use. Use to force the model to call a specific + # function. sig do params(function: OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function, type: Symbol) .returns(T.attached_class) @@ -37,6 +40,7 @@ module OpenAI end class Function < OpenAI::BaseModel + # The name of the function to call. sig { returns(String) } def name end diff --git a/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi b/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi index 279b9fbe..f595e430 100644 --- a/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi @@ -6,6 +6,9 @@ module OpenAI module Chat class ChatCompletionPredictionContent < OpenAI::BaseModel + # The content that should be matched when generating a model response. If + # generated tokens would match this content, the entire model response can be + # returned much more quickly. sig { returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) } def content end @@ -17,6 +20,8 @@ module OpenAI def content=(_) end + # The type of the predicted content you want to provide. This type is currently + # always `content`. sig { returns(Symbol) } def type end @@ -25,6 +30,8 @@ module OpenAI def type=(_) end + # Static predicted output content, such as the content of a text file that is + # being regenerated. sig do params( content: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]), @@ -44,19 +51,15 @@ module OpenAI def to_hash end + # The content that should be matched when generating a model response. If + # generated tokens would match this content, the entire model response can be + # returned much more quickly. class Content < OpenAI::Union abstract! - ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } + Variants = type_template(:out) { {fixed: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])} } - class << self - sig do - override - .returns([[NilClass, String], [NilClass, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]]) - end - private def variants - end - end + ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } end end end diff --git a/rbi/lib/openai/models/chat/chat_completion_role.rbi b/rbi/lib/openai/models/chat/chat_completion_role.rbi index 807d6735..ae42bac3 100644 --- a/rbi/lib/openai/models/chat/chat_completion_role.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_role.rbi @@ -5,21 +5,18 @@ module OpenAI ChatCompletionRole = T.type_alias { Chat::ChatCompletionRole } module Chat + # The role of the author of a message class ChatCompletionRole < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + DEVELOPER = :developer SYSTEM = :system USER = :user ASSISTANT = :assistant TOOL = :tool FUNCTION = :function - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/chat/chat_completion_store_message.rbi b/rbi/lib/openai/models/chat/chat_completion_store_message.rbi index 0c487bdc..a63c1c01 100644 --- a/rbi/lib/openai/models/chat/chat_completion_store_message.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_store_message.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionStoreMessage < OpenAI::Models::Chat::ChatCompletionMessage + # The identifier of the chat message. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # A chat completion message generated by the model. sig { params(id: String).returns(T.attached_class) } def self.new(id:) end diff --git a/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi b/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi index 88104415..9fbf5879 100644 --- a/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi @@ -6,6 +6,10 @@ module OpenAI module Chat class ChatCompletionStreamOptions < OpenAI::BaseModel + # If set, an additional chunk will be streamed before the `data: [DONE]` message. + # The `usage` field on this chunk shows the token usage statistics for the entire + # request, and the `choices` field will always be an empty array. All other chunks + # will also include a `usage` field, but with a null value. sig { returns(T.nilable(T::Boolean)) } def include_usage end @@ -14,6 +18,7 @@ module OpenAI def include_usage=(_) end + # Options for streaming response. Only set this when you set `stream: true`. sig { params(include_usage: T::Boolean).returns(T.attached_class) } def self.new(include_usage: nil) end diff --git a/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi index 4c4e8def..1863c0f8 100644 --- a/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionSystemMessageParam < OpenAI::BaseModel + # The contents of the system message. sig { returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) } def content end @@ -17,6 +18,7 @@ module OpenAI def content=(_) end + # The role of the messages author, in this case `system`. sig { returns(Symbol) } def role end @@ -25,6 +27,8 @@ module OpenAI def role=(_) end + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. sig { returns(T.nilable(String)) } def name end @@ -33,6 +37,9 @@ module OpenAI def name=(_) end + # Developer-provided instructions that the model should follow, regardless of + # messages sent by the user. With o1 models and newer, use `developer` messages + # for this purpose instead. sig do params( content: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]), @@ -57,19 +64,13 @@ module OpenAI def to_hash end + # The contents of the system message. class Content < OpenAI::Union abstract! - ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } + Variants = type_template(:out) { {fixed: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])} } - class << self - sig do - override - .returns([[NilClass, String], [NilClass, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]]) - end - private def variants - end - end + ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } end end end diff --git a/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi b/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi index c280c13e..2341c139 100644 --- a/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionTokenLogprob < OpenAI::BaseModel + # The token. sig { returns(String) } def token end @@ -14,6 +15,10 @@ module OpenAI def token=(_) end + # A list of integers representing the UTF-8 bytes representation of the token. + # Useful in instances where characters are represented by multiple tokens and + # their byte representations must be combined to generate the correct text + # representation. Can be `null` if there is no bytes representation for the token. sig { returns(T.nilable(T::Array[Integer])) } def bytes end @@ -22,6 +27,9 @@ module OpenAI def bytes=(_) end + # The log probability of this token, if it is within the top 20 most likely + # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + # unlikely. sig { returns(Float) } def logprob end @@ -30,6 +38,9 @@ module OpenAI def logprob=(_) end + # List of the most likely tokens and their log probability, at this token + # position. In rare cases, there may be fewer than the number of requested + # `top_logprobs` returned. sig { returns(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob]) } def top_logprobs end @@ -68,6 +79,7 @@ module OpenAI end class TopLogprob < OpenAI::BaseModel + # The token. sig { returns(String) } def token end @@ -76,6 +88,10 @@ module OpenAI def token=(_) end + # A list of integers representing the UTF-8 bytes representation of the token. + # Useful in instances where characters are represented by multiple tokens and + # their byte representations must be combined to generate the correct text + # representation. Can be `null` if there is no bytes representation for the token. sig { returns(T.nilable(T::Array[Integer])) } def bytes end @@ -84,6 +100,9 @@ module OpenAI def bytes=(_) end + # The log probability of this token, if it is within the top 20 most likely + # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + # unlikely. sig { returns(Float) } def logprob end diff --git a/rbi/lib/openai/models/chat/chat_completion_tool.rbi b/rbi/lib/openai/models/chat/chat_completion_tool.rbi index 363528f2..5a41a09d 100644 --- a/rbi/lib/openai/models/chat/chat_completion_tool.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_tool.rbi @@ -14,6 +14,7 @@ module OpenAI def function=(_) end + # The type of the tool. Currently, only `function` is supported. sig { returns(Symbol) } def type end diff --git a/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi b/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi index 3d7a6aea..0c237bac 100644 --- a/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi @@ -5,27 +5,31 @@ module OpenAI ChatCompletionToolChoiceOption = T.type_alias { Chat::ChatCompletionToolChoiceOption } module Chat + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. class ChatCompletionToolChoiceOption < OpenAI::Union abstract! + Variants = type_template(:out) { {fixed: T.any(Symbol, OpenAI::Models::Chat::ChatCompletionNamedToolChoice)} } + + # `none` means the model will not call any tool and instead generates a message. + # `auto` means the model can pick between generating a message or calling one or + # more tools. `required` means the model must call one or more tools. class Auto < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + NONE = :none AUTO = :auto REQUIRED = :required - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, OpenAI::Models::Chat::ChatCompletionNamedToolChoice]]) } - private def variants - end end end end diff --git a/rbi/lib/openai/models/chat/chat_completion_tool_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_tool_message_param.rbi index e320aa6c..20936406 100644 --- a/rbi/lib/openai/models/chat/chat_completion_tool_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_tool_message_param.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionToolMessageParam < OpenAI::BaseModel + # The contents of the tool message. sig { returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) } def content end @@ -17,6 +18,7 @@ module OpenAI def content=(_) end + # The role of the messages author, in this case `tool`. sig { returns(Symbol) } def role end @@ -25,6 +27,7 @@ module OpenAI def role=(_) end + # Tool call that this message is responding to. sig { returns(String) } def tool_call_id end @@ -57,19 +60,13 @@ module OpenAI def to_hash end + # The contents of the tool message. class Content < OpenAI::Union abstract! - ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } + Variants = type_template(:out) { {fixed: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])} } - class << self - sig do - override - .returns([[NilClass, String], [NilClass, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]]) - end - private def variants - end - end + ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } end end end diff --git a/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi index a41ad724..7ebaa424 100644 --- a/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionUserMessageParam < OpenAI::BaseModel + # The contents of the user message. sig do returns( T.any( @@ -55,6 +56,7 @@ module OpenAI def content=(_) end + # The role of the messages author, in this case `user`. sig { returns(Symbol) } def role end @@ -63,6 +65,8 @@ module OpenAI def role=(_) end + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. sig { returns(T.nilable(String)) } def name end @@ -71,6 +75,8 @@ module OpenAI def name=(_) end + # Messages sent by an end user, containing prompts or additional context + # information. sig do params( content: T.any( @@ -115,9 +121,26 @@ module OpenAI def to_hash end + # The contents of the user message. class Content < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + String, + T::Array[ + T.any( + OpenAI::Models::Chat::ChatCompletionContentPartText, + OpenAI::Models::Chat::ChatCompletionContentPartImage, + OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, + OpenAI::Models::Chat::ChatCompletionContentPart::File + ) + ] + ) + } + end + ChatCompletionContentPartArray = T.type_alias do T::Array[ T.any( @@ -128,30 +151,6 @@ module OpenAI ) ] end - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [ - NilClass, - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartImage, - OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, - OpenAI::Models::Chat::ChatCompletionContentPart::File - ) - ] - ] - ] - ) - end - private def variants - end - end end end end diff --git a/rbi/lib/openai/models/chat/completion_create_params.rbi b/rbi/lib/openai/models/chat/completion_create_params.rbi index 77aab350..7b69e065 100644 --- a/rbi/lib/openai/models/chat/completion_create_params.rbi +++ b/rbi/lib/openai/models/chat/completion_create_params.rbi @@ -7,6 +7,12 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A list of messages comprising the conversation so far. Depending on the + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). sig do returns( T::Array[ @@ -53,6 +59,11 @@ module OpenAI def messages=(_) end + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. sig { returns(T.any(String, Symbol)) } def model end @@ -61,6 +72,9 @@ module OpenAI def model=(_) end + # Parameters for audio output. Required when audio output is requested with + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAudioParam)) } def audio end @@ -72,6 +86,9 @@ module OpenAI def audio=(_) end + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. sig { returns(T.nilable(Float)) } def frequency_penalty end @@ -80,6 +97,20 @@ module OpenAI def frequency_penalty=(_) end + # Deprecated in favor of `tool_choice`. + # + # Controls which (if any) function is called by the model. + # + # `none` means the model will not call a function and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling a + # function. + # + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + # + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Chat::ChatCompletionFunctionCallOption))) } def function_call end @@ -91,6 +122,9 @@ module OpenAI def function_call=(_) end + # Deprecated in favor of `tools`. + # + # A list of functions the model may generate JSON inputs for. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::CompletionCreateParams::Function])) } def functions end @@ -102,6 +136,14 @@ module OpenAI def functions=(_) end + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. sig { returns(T.nilable(T::Hash[Symbol, Integer])) } def logit_bias end @@ -110,6 +152,9 @@ module OpenAI def logit_bias=(_) end + # Whether to return log probabilities of the output tokens or not. If true, + # returns the log probabilities of each output token returned in the `content` of + # `message`. sig { returns(T.nilable(T::Boolean)) } def logprobs end @@ -118,6 +163,9 @@ module OpenAI def logprobs=(_) end + # An upper bound for the number of tokens that can be generated for a completion, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(Integer)) } def max_completion_tokens end @@ -126,6 +174,13 @@ module OpenAI def max_completion_tokens=(_) end + # The maximum number of [tokens](/tokenizer) that can be generated in the chat + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. + # + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o1 series models](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(Integer)) } def max_tokens end @@ -134,6 +189,12 @@ module OpenAI def max_tokens=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -142,6 +203,16 @@ module OpenAI def metadata=(_) end + # Output types that you would like the model to generate. Most models are capable + # of generating text, which is the default: + # + # `["text"]` + # + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: + # + # `["text", "audio"]` sig { returns(T.nilable(T::Array[Symbol])) } def modalities end @@ -150,6 +221,9 @@ module OpenAI def modalities=(_) end + # How many chat completion choices to generate for each input message. Note that + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. sig { returns(T.nilable(Integer)) } def n end @@ -158,6 +232,9 @@ module OpenAI def n=(_) end + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. sig { returns(T.nilable(T::Boolean)) } def parallel_tool_calls end @@ -166,6 +243,8 @@ module OpenAI def parallel_tool_calls=(_) end + # Static predicted output content, such as the content of a text file that is + # being regenerated. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionPredictionContent)) } def prediction end @@ -177,6 +256,9 @@ module OpenAI def prediction=(_) end + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. sig { returns(T.nilable(Float)) } def presence_penalty end @@ -185,6 +267,12 @@ module OpenAI def presence_penalty=(_) end + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(Symbol)) } def reasoning_effort end @@ -193,6 +281,16 @@ module OpenAI def reasoning_effort=(_) end + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. sig do returns( T.nilable( @@ -226,6 +324,11 @@ module OpenAI def response_format=(_) end + # This feature is in Beta. If specified, our system will make a best effort to + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. sig { returns(T.nilable(Integer)) } def seed end @@ -234,6 +337,20 @@ module OpenAI def seed=(_) end + # Specifies the latency tier to use for processing the request. This parameter is + # relevant for customers subscribed to the scale tier service: + # + # - If set to 'auto', and the Project is Scale tier enabled, the system will + # utilize scale tier credits until they are exhausted. + # - If set to 'auto', and the Project is not Scale tier enabled, the request will + # be processed using the default service tier with a lower uptime SLA and no + # latency guarentee. + # - If set to 'default', the request will be processed using the default service + # tier with a lower uptime SLA and no latency guarentee. + # - When not set, the default behavior is 'auto'. + # + # When this parameter is set, the response body will include the `service_tier` + # utilized. sig { returns(T.nilable(Symbol)) } def service_tier end @@ -242,6 +359,8 @@ module OpenAI def service_tier=(_) end + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. sig { returns(T.nilable(T.any(String, T::Array[String]))) } def stop end @@ -259,6 +378,9 @@ module OpenAI def stop=(_) end + # Whether or not to store the output of this chat completion request for use in + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. sig { returns(T.nilable(T::Boolean)) } def store end @@ -267,6 +389,7 @@ module OpenAI def store=(_) end + # Options for streaming response. Only set this when you set `stream: true`. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions)) } def stream_options end @@ -278,6 +401,10 @@ module OpenAI def stream_options=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. sig { returns(T.nilable(Float)) } def temperature end @@ -286,6 +413,15 @@ module OpenAI def temperature=(_) end + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Chat::ChatCompletionNamedToolChoice))) } def tool_choice end @@ -297,6 +433,9 @@ module OpenAI def tool_choice=(_) end + # A list of tools the model may call. Currently, only functions are supported as a + # tool. Use this to provide a list of functions the model may generate JSON inputs + # for. A max of 128 functions are supported. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTool])) } def tools end @@ -308,6 +447,9 @@ module OpenAI def tools=(_) end + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. sig { returns(T.nilable(Integer)) } def top_logprobs end @@ -316,6 +458,11 @@ module OpenAI def top_logprobs=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -324,6 +471,9 @@ module OpenAI def top_p=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -332,6 +482,9 @@ module OpenAI def user=(_) end + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). sig { returns(T.nilable(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions)) } def web_search_options end @@ -481,40 +634,52 @@ module OpenAI def to_hash end + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. class Model < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } + end + + # Deprecated in favor of `tool_choice`. + # + # Controls which (if any) function is called by the model. + # + # `none` means the model will not call a function and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling a + # function. + # + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + # + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. class FunctionCall < OpenAI::Union abstract! + Variants = type_template(:out) { {fixed: T.any(Symbol, OpenAI::Models::Chat::ChatCompletionFunctionCallOption)} } + + # `none` means the model will not call a function and instead generates a message. + # `auto` means the model can pick between generating a message or calling a + # function. class FunctionCallMode < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + NONE = :none AUTO = :auto - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, OpenAI::Models::Chat::ChatCompletionFunctionCallOption]]) } - private def variants - end end end class Function < OpenAI::BaseModel + # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain + # underscores and dashes, with a maximum length of 64. sig { returns(String) } def name end @@ -523,6 +688,8 @@ module OpenAI def name=(_) end + # A description of what the function does, used by the model to choose when and + # how to call the function. sig { returns(T.nilable(String)) } def description end @@ -531,6 +698,13 @@ module OpenAI def description=(_) end + # The parameters the functions accepts, described as a JSON Schema object. See the + # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + # and the + # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + # documentation about the format. + # + # Omitting `parameters` defines a function with an empty parameter list. sig { returns(T.nilable(OpenAI::Models::FunctionParameters)) } def parameters end @@ -554,57 +728,72 @@ module OpenAI class Modality < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + TEXT = :text AUDIO = :audio - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. class ResponseFormat < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[NilClass, OpenAI::Models::ResponseFormatText], [NilClass, OpenAI::Models::ResponseFormatJSONSchema], [NilClass, OpenAI::Models::ResponseFormatJSONObject]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::ResponseFormatText, + OpenAI::Models::ResponseFormatJSONSchema, + OpenAI::Models::ResponseFormatJSONObject + ) + } end end + # Specifies the latency tier to use for processing the request. This parameter is + # relevant for customers subscribed to the scale tier service: + # + # - If set to 'auto', and the Project is Scale tier enabled, the system will + # utilize scale tier credits until they are exhausted. + # - If set to 'auto', and the Project is not Scale tier enabled, the request will + # be processed using the default service tier with a lower uptime SLA and no + # latency guarentee. + # - If set to 'default', the request will be processed using the default service + # tier with a lower uptime SLA and no latency guarentee. + # - When not set, the default behavior is 'auto'. + # + # When this parameter is set, the response body will include the `service_tier` + # utilized. class ServiceTier < OpenAI::Enum abstract! - AUTO = T.let(:auto, T.nilable(Symbol)) - DEFAULT = T.let(:default, T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + AUTO = :auto + DEFAULT = :default end + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. class Stop < OpenAI::Union abstract! - StringArray = T.type_alias { T::Array[String] } + Variants = type_template(:out) { {fixed: T.nilable(T.any(String, T::Array[String]))} } - class << self - sig { override.returns([[NilClass, String], [NilClass, T::Array[String]]]) } - private def variants - end - end + StringArray = T.type_alias { T::Array[String] } end class WebSearchOptions < OpenAI::BaseModel + # High level guidance for the amount of context window space to use for the + # search. One of `low`, `medium`, or `high`. `medium` is the default. sig { returns(T.nilable(Symbol)) } def search_context_size end @@ -613,6 +802,7 @@ module OpenAI def search_context_size=(_) end + # Approximate location parameters for the search. sig { returns(T.nilable(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation)) } def user_location end @@ -624,6 +814,9 @@ module OpenAI def user_location=(_) end + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). sig do params( search_context_size: Symbol, @@ -646,21 +839,20 @@ module OpenAI def to_hash end + # High level guidance for the amount of context window space to use for the + # search. One of `low`, `medium`, or `high`. `medium` is the default. class SearchContextSize < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + LOW = :low MEDIUM = :medium HIGH = :high - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class UserLocation < OpenAI::BaseModel + # Approximate location parameters for the search. sig { returns(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate) } def approximate end @@ -672,6 +864,7 @@ module OpenAI def approximate=(_) end + # The type of location approximation. Always `approximate`. sig { returns(Symbol) } def type end @@ -680,6 +873,7 @@ module OpenAI def type=(_) end + # Approximate location parameters for the search. sig do params( approximate: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate, @@ -703,6 +897,7 @@ module OpenAI end class Approximate < OpenAI::BaseModel + # Free text input for the city of the user, e.g. `San Francisco`. sig { returns(T.nilable(String)) } def city end @@ -711,6 +906,8 @@ module OpenAI def city=(_) end + # The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + # the user, e.g. `US`. sig { returns(T.nilable(String)) } def country end @@ -719,6 +916,7 @@ module OpenAI def country=(_) end + # Free text input for the region of the user, e.g. `California`. sig { returns(T.nilable(String)) } def region end @@ -727,6 +925,8 @@ module OpenAI def region=(_) end + # The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + # user, e.g. `America/Los_Angeles`. sig { returns(T.nilable(String)) } def timezone end @@ -735,6 +935,7 @@ module OpenAI def timezone=(_) end + # Approximate location parameters for the search. sig do params( city: String, diff --git a/rbi/lib/openai/models/chat/completion_list_params.rbi b/rbi/lib/openai/models/chat/completion_list_params.rbi index 224d64b7..45a53c74 100644 --- a/rbi/lib/openai/models/chat/completion_list_params.rbi +++ b/rbi/lib/openai/models/chat/completion_list_params.rbi @@ -7,6 +7,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Identifier for the last chat completion from the previous pagination request. sig { returns(T.nilable(String)) } def after end @@ -15,6 +16,7 @@ module OpenAI def after=(_) end + # Number of Chat Completions to retrieve. sig { returns(T.nilable(Integer)) } def limit end @@ -23,6 +25,9 @@ module OpenAI def limit=(_) end + # A list of metadata keys to filter the Chat Completions by. Example: + # + # `metadata[key1]=value1&metadata[key2]=value2` sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -31,6 +36,7 @@ module OpenAI def metadata=(_) end + # The model used to generate the Chat Completions. sig { returns(T.nilable(String)) } def model end @@ -39,6 +45,8 @@ module OpenAI def model=(_) end + # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. sig { returns(T.nilable(Symbol)) } def order end @@ -77,17 +85,15 @@ module OpenAI def to_hash end + # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. class Order < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ASC = :asc DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/chat/completion_update_params.rbi b/rbi/lib/openai/models/chat/completion_update_params.rbi index 7c557df2..0b3aa56f 100644 --- a/rbi/lib/openai/models/chat/completion_update_params.rbi +++ b/rbi/lib/openai/models/chat/completion_update_params.rbi @@ -7,6 +7,12 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end diff --git a/rbi/lib/openai/models/chat/completions/message_list_params.rbi b/rbi/lib/openai/models/chat/completions/message_list_params.rbi index b5474b6c..014bdbc9 100644 --- a/rbi/lib/openai/models/chat/completions/message_list_params.rbi +++ b/rbi/lib/openai/models/chat/completions/message_list_params.rbi @@ -8,6 +8,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Identifier for the last message from the previous pagination request. sig { returns(T.nilable(String)) } def after end @@ -16,6 +17,7 @@ module OpenAI def after=(_) end + # Number of messages to retrieve. sig { returns(T.nilable(Integer)) } def limit end @@ -24,6 +26,8 @@ module OpenAI def limit=(_) end + # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + # for descending order. Defaults to `asc`. sig { returns(T.nilable(Symbol)) } def order end @@ -57,17 +61,15 @@ module OpenAI def to_hash end + # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + # for descending order. Defaults to `asc`. class Order < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ASC = :asc DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/chat_model.rbi b/rbi/lib/openai/models/chat_model.rbi index 5120ba73..acb06a2d 100644 --- a/rbi/lib/openai/models/chat_model.rbi +++ b/rbi/lib/openai/models/chat_model.rbi @@ -5,6 +5,8 @@ module OpenAI class ChatModel < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + O3_MINI = :"o3-mini" O3_MINI_2025_01_31 = :"o3-mini-2025-01-31" O1 = :o1 @@ -49,12 +51,6 @@ module OpenAI GPT_3_5_TURBO_1106 = :"gpt-3.5-turbo-1106" GPT_3_5_TURBO_0125 = :"gpt-3.5-turbo-0125" GPT_3_5_TURBO_16K_0613 = :"gpt-3.5-turbo-16k-0613" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/comparison_filter.rbi b/rbi/lib/openai/models/comparison_filter.rbi index a44961a9..bbb84c5d 100644 --- a/rbi/lib/openai/models/comparison_filter.rbi +++ b/rbi/lib/openai/models/comparison_filter.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class ComparisonFilter < OpenAI::BaseModel + # The key to compare against the value. sig { returns(String) } def key end @@ -11,6 +12,14 @@ module OpenAI def key=(_) end + # Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + # + # - `eq`: equals + # - `ne`: not equal + # - `gt`: greater than + # - `gte`: greater than or equal + # - `lt`: less than + # - `lte`: less than or equal sig { returns(Symbol) } def type end @@ -19,6 +28,8 @@ module OpenAI def type=(_) end + # The value to compare against the attribute key; supports string, number, or + # boolean types. sig { returns(T.any(String, Float, T::Boolean)) } def value end @@ -27,6 +38,8 @@ module OpenAI def value=(_) end + # A filter used to compare a specified attribute key to a given value using a + # defined comparison operation. sig do params(key: String, type: Symbol, value: T.any(String, Float, T::Boolean)).returns(T.attached_class) end @@ -37,31 +50,33 @@ module OpenAI def to_hash end + # Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + # + # - `eq`: equals + # - `ne`: not equal + # - `gt`: greater than + # - `gte`: greater than or equal + # - `lt`: less than + # - `lte`: less than or equal class Type < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + EQ = :eq NE = :ne GT = :gt GTE = :gte LT = :lt LTE = :lte - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # The value to compare against the attribute key; supports string, number, or + # boolean types. class Value < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Float, T::Boolean)} } end end end diff --git a/rbi/lib/openai/models/completion.rbi b/rbi/lib/openai/models/completion.rbi index 53205098..5c9f6e01 100644 --- a/rbi/lib/openai/models/completion.rbi +++ b/rbi/lib/openai/models/completion.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class Completion < OpenAI::BaseModel + # A unique identifier for the completion. sig { returns(String) } def id end @@ -11,6 +12,7 @@ module OpenAI def id=(_) end + # The list of completion choices the model generated for the input prompt. sig { returns(T::Array[OpenAI::Models::CompletionChoice]) } def choices end @@ -19,6 +21,7 @@ module OpenAI def choices=(_) end + # The Unix timestamp (in seconds) of when the completion was created. sig { returns(Integer) } def created end @@ -27,6 +30,7 @@ module OpenAI def created=(_) end + # The model used for completion. sig { returns(String) } def model end @@ -35,6 +39,7 @@ module OpenAI def model=(_) end + # The object type, which is always "text_completion" sig { returns(Symbol) } def object end @@ -43,6 +48,10 @@ module OpenAI def object=(_) end + # This fingerprint represents the backend configuration that the model runs with. + # + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. sig { returns(T.nilable(String)) } def system_fingerprint end @@ -51,6 +60,7 @@ module OpenAI def system_fingerprint=(_) end + # Usage statistics for the completion request. sig { returns(T.nilable(OpenAI::Models::CompletionUsage)) } def usage end @@ -59,6 +69,8 @@ module OpenAI def usage=(_) end + # Represents a completion response from the API. Note: both the streamed and + # non-streamed response objects share the same shape (unlike the chat endpoint). sig do params( id: String, diff --git a/rbi/lib/openai/models/completion_choice.rbi b/rbi/lib/openai/models/completion_choice.rbi index 0e80d12b..93b41ebf 100644 --- a/rbi/lib/openai/models/completion_choice.rbi +++ b/rbi/lib/openai/models/completion_choice.rbi @@ -3,6 +3,10 @@ module OpenAI module Models class CompletionChoice < OpenAI::BaseModel + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, or `content_filter` if + # content was omitted due to a flag from our content filters. sig { returns(Symbol) } def finish_reason end @@ -64,18 +68,18 @@ module OpenAI def to_hash end + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, or `content_filter` if + # content was omitted due to a flag from our content filters. class FinishReason < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + STOP = :stop LENGTH = :length CONTENT_FILTER = :content_filter - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class Logprobs < OpenAI::BaseModel diff --git a/rbi/lib/openai/models/completion_create_params.rbi b/rbi/lib/openai/models/completion_create_params.rbi index dc1a4be1..2e1185c5 100644 --- a/rbi/lib/openai/models/completion_create_params.rbi +++ b/rbi/lib/openai/models/completion_create_params.rbi @@ -6,6 +6,11 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(T.any(String, Symbol)) } def model end @@ -14,6 +19,12 @@ module OpenAI def model=(_) end + # The prompt(s) to generate completions for, encoded as a string, array of + # strings, array of tokens, or array of token arrays. + # + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. sig do returns(T.nilable(T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]]))) end @@ -27,6 +38,15 @@ module OpenAI def prompt=(_) end + # Generates `best_of` completions server-side and returns the "best" (the one with + # the highest log probability per token). Results cannot be streamed. + # + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. sig { returns(T.nilable(Integer)) } def best_of end @@ -35,6 +55,7 @@ module OpenAI def best_of=(_) end + # Echo back the prompt in addition to the completion sig { returns(T.nilable(T::Boolean)) } def echo end @@ -43,6 +64,11 @@ module OpenAI def echo=(_) end + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) sig { returns(T.nilable(Float)) } def frequency_penalty end @@ -51,6 +77,18 @@ module OpenAI def frequency_penalty=(_) end + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. + # + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. sig { returns(T.nilable(T::Hash[Symbol, Integer])) } def logit_bias end @@ -59,6 +97,12 @@ module OpenAI def logit_bias=(_) end + # Include the log probabilities on the `logprobs` most likely output tokens, as + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. + # + # The maximum value for `logprobs` is 5. sig { returns(T.nilable(Integer)) } def logprobs end @@ -67,6 +111,13 @@ module OpenAI def logprobs=(_) end + # The maximum number of [tokens](/tokenizer) that can be generated in the + # completion. + # + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. sig { returns(T.nilable(Integer)) } def max_tokens end @@ -75,6 +126,11 @@ module OpenAI def max_tokens=(_) end + # How many completions to generate for each prompt. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. sig { returns(T.nilable(Integer)) } def n end @@ -83,6 +139,11 @@ module OpenAI def n=(_) end + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) sig { returns(T.nilable(Float)) } def presence_penalty end @@ -91,6 +152,12 @@ module OpenAI def presence_penalty=(_) end + # If specified, our system will make a best effort to sample deterministically, + # such that repeated requests with the same `seed` and parameters should return + # the same result. + # + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. sig { returns(T.nilable(Integer)) } def seed end @@ -99,6 +166,8 @@ module OpenAI def seed=(_) end + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. sig { returns(T.nilable(T.any(String, T::Array[String]))) } def stop end @@ -116,6 +185,7 @@ module OpenAI def stop=(_) end + # Options for streaming response. Only set this when you set `stream: true`. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions)) } def stream_options end @@ -127,6 +197,9 @@ module OpenAI def stream_options=(_) end + # The suffix that comes after a completion of inserted text. + # + # This parameter is only supported for `gpt-3.5-turbo-instruct`. sig { returns(T.nilable(String)) } def suffix end @@ -135,6 +208,11 @@ module OpenAI def suffix=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + # + # We generally recommend altering this or `top_p` but not both. sig { returns(T.nilable(Float)) } def temperature end @@ -143,6 +221,11 @@ module OpenAI def temperature=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -151,6 +234,9 @@ module OpenAI def top_p=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -246,66 +332,45 @@ module OpenAI def to_hash end + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. class Model < OpenAI::Union abstract! - class Preset < OpenAI::Enum - abstract! - - GPT_3_5_TURBO_INSTRUCT = :"gpt-3.5-turbo-instruct" - DAVINCI_002 = :"davinci-002" - BABBAGE_002 = :"babbage-002" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end + # The prompt(s) to generate completions for, encoded as a string, array of + # strings, array of tokens, or array of token arrays. + # + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. class Prompt < OpenAI::Union abstract! + Variants = type_template(:out) do + {fixed: T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]])} + end + StringArray = T.type_alias { T::Array[String] } IntegerArray = T.type_alias { T::Array[Integer] } ArrayOfToken2DArray = T.type_alias { T::Array[T::Array[Integer]] } - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [NilClass, T::Array[String]], - [NilClass, T::Array[Integer]], - [NilClass, T::Array[T::Array[Integer]]] - ] - ) - end - private def variants - end - end end + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. class Stop < OpenAI::Union abstract! - StringArray = T.type_alias { T::Array[String] } + Variants = type_template(:out) { {fixed: T.nilable(T.any(String, T::Array[String]))} } - class << self - sig { override.returns([[NilClass, String], [NilClass, T::Array[String]]]) } - private def variants - end - end + StringArray = T.type_alias { T::Array[String] } end end end diff --git a/rbi/lib/openai/models/completion_usage.rbi b/rbi/lib/openai/models/completion_usage.rbi index 36ebba0c..de91da0f 100644 --- a/rbi/lib/openai/models/completion_usage.rbi +++ b/rbi/lib/openai/models/completion_usage.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class CompletionUsage < OpenAI::BaseModel + # Number of tokens in the generated completion. sig { returns(Integer) } def completion_tokens end @@ -11,6 +12,7 @@ module OpenAI def completion_tokens=(_) end + # Number of tokens in the prompt. sig { returns(Integer) } def prompt_tokens end @@ -19,6 +21,7 @@ module OpenAI def prompt_tokens=(_) end + # Total number of tokens used in the request (prompt + completion). sig { returns(Integer) } def total_tokens end @@ -27,6 +30,7 @@ module OpenAI def total_tokens=(_) end + # Breakdown of tokens used in a completion. sig { returns(T.nilable(OpenAI::Models::CompletionUsage::CompletionTokensDetails)) } def completion_tokens_details end @@ -38,6 +42,7 @@ module OpenAI def completion_tokens_details=(_) end + # Breakdown of tokens used in the prompt. sig { returns(T.nilable(OpenAI::Models::CompletionUsage::PromptTokensDetails)) } def prompt_tokens_details end @@ -49,6 +54,7 @@ module OpenAI def prompt_tokens_details=(_) end + # Usage statistics for the completion request. sig do params( completion_tokens: Integer, @@ -84,6 +90,8 @@ module OpenAI end class CompletionTokensDetails < OpenAI::BaseModel + # When using Predicted Outputs, the number of tokens in the prediction that + # appeared in the completion. sig { returns(T.nilable(Integer)) } def accepted_prediction_tokens end @@ -92,6 +100,7 @@ module OpenAI def accepted_prediction_tokens=(_) end + # Audio input tokens generated by the model. sig { returns(T.nilable(Integer)) } def audio_tokens end @@ -100,6 +109,7 @@ module OpenAI def audio_tokens=(_) end + # Tokens generated by the model for reasoning. sig { returns(T.nilable(Integer)) } def reasoning_tokens end @@ -108,6 +118,10 @@ module OpenAI def reasoning_tokens=(_) end + # When using Predicted Outputs, the number of tokens in the prediction that did + # not appear in the completion. However, like reasoning tokens, these tokens are + # still counted in the total completion tokens for purposes of billing, output, + # and context window limits. sig { returns(T.nilable(Integer)) } def rejected_prediction_tokens end @@ -116,6 +130,7 @@ module OpenAI def rejected_prediction_tokens=(_) end + # Breakdown of tokens used in a completion. sig do params( accepted_prediction_tokens: Integer, @@ -144,6 +159,7 @@ module OpenAI end class PromptTokensDetails < OpenAI::BaseModel + # Audio input tokens present in the prompt. sig { returns(T.nilable(Integer)) } def audio_tokens end @@ -152,6 +168,7 @@ module OpenAI def audio_tokens=(_) end + # Cached tokens present in the prompt. sig { returns(T.nilable(Integer)) } def cached_tokens end @@ -160,6 +177,7 @@ module OpenAI def cached_tokens=(_) end + # Breakdown of tokens used in the prompt. sig { params(audio_tokens: Integer, cached_tokens: Integer).returns(T.attached_class) } def self.new(audio_tokens: nil, cached_tokens: nil) end diff --git a/rbi/lib/openai/models/compound_filter.rbi b/rbi/lib/openai/models/compound_filter.rbi index e8e64bd4..6722f10d 100644 --- a/rbi/lib/openai/models/compound_filter.rbi +++ b/rbi/lib/openai/models/compound_filter.rbi @@ -3,6 +3,8 @@ module OpenAI module Models class CompoundFilter < OpenAI::BaseModel + # Array of filters to combine. Items can be `ComparisonFilter` or + # `CompoundFilter`. sig { returns(T::Array[T.any(OpenAI::Models::ComparisonFilter, T.anything)]) } def filters end @@ -14,6 +16,7 @@ module OpenAI def filters=(_) end + # Type of operation: `and` or `or`. sig { returns(Symbol) } def type end @@ -22,6 +25,7 @@ module OpenAI def type=(_) end + # Combine multiple filters using `and` or `or`. sig do params(filters: T::Array[T.any(OpenAI::Models::ComparisonFilter, T.anything)], type: Symbol) .returns(T.attached_class) @@ -33,27 +37,22 @@ module OpenAI def to_hash end + # A filter used to compare a specified attribute key to a given value using a + # defined comparison operation. class Filter < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, OpenAI::Models::ComparisonFilter], [NilClass, T.anything]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(OpenAI::Models::ComparisonFilter, T.anything)} } end + # Type of operation: `and` or `or`. class Type < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + AND = :and OR = :or - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/create_embedding_response.rbi b/rbi/lib/openai/models/create_embedding_response.rbi index 6f823131..c095b791 100644 --- a/rbi/lib/openai/models/create_embedding_response.rbi +++ b/rbi/lib/openai/models/create_embedding_response.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class CreateEmbeddingResponse < OpenAI::BaseModel + # The list of embeddings generated by the model. sig { returns(T::Array[OpenAI::Models::Embedding]) } def data end @@ -11,6 +12,7 @@ module OpenAI def data=(_) end + # The name of the model used to generate the embedding. sig { returns(String) } def model end @@ -19,6 +21,7 @@ module OpenAI def model=(_) end + # The object type, which is always "list". sig { returns(Symbol) } def object end @@ -27,6 +30,7 @@ module OpenAI def object=(_) end + # The usage information for the request. sig { returns(OpenAI::Models::CreateEmbeddingResponse::Usage) } def usage end @@ -65,6 +69,7 @@ module OpenAI end class Usage < OpenAI::BaseModel + # The number of tokens used by the prompt. sig { returns(Integer) } def prompt_tokens end @@ -73,6 +78,7 @@ module OpenAI def prompt_tokens=(_) end + # The total number of tokens used by the request. sig { returns(Integer) } def total_tokens end @@ -81,6 +87,7 @@ module OpenAI def total_tokens=(_) end + # The usage information for the request. sig { params(prompt_tokens: Integer, total_tokens: Integer).returns(T.attached_class) } def self.new(prompt_tokens:, total_tokens:) end diff --git a/rbi/lib/openai/models/embedding.rbi b/rbi/lib/openai/models/embedding.rbi index 11cc9072..4218f3b9 100644 --- a/rbi/lib/openai/models/embedding.rbi +++ b/rbi/lib/openai/models/embedding.rbi @@ -3,6 +3,9 @@ module OpenAI module Models class Embedding < OpenAI::BaseModel + # The embedding vector, which is a list of floats. The length of vector depends on + # the model as listed in the + # [embedding guide](https://platform.openai.com/docs/guides/embeddings). sig { returns(T::Array[Float]) } def embedding end @@ -11,6 +14,7 @@ module OpenAI def embedding=(_) end + # The index of the embedding in the list of embeddings. sig { returns(Integer) } def index end @@ -19,6 +23,7 @@ module OpenAI def index=(_) end + # The object type, which is always "embedding". sig { returns(Symbol) } def object end @@ -27,6 +32,7 @@ module OpenAI def object=(_) end + # Represents an embedding vector returned by embedding endpoint. sig { params(embedding: T::Array[Float], index: Integer, object: Symbol).returns(T.attached_class) } def self.new(embedding:, index:, object: :embedding) end diff --git a/rbi/lib/openai/models/embedding_create_params.rbi b/rbi/lib/openai/models/embedding_create_params.rbi index 2a08c856..a9a37c56 100644 --- a/rbi/lib/openai/models/embedding_create_params.rbi +++ b/rbi/lib/openai/models/embedding_create_params.rbi @@ -6,6 +6,14 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Input text to embed, encoded as a string or array of tokens. To embed multiple + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. Some models may also impose a limit on total number of + # tokens summed across inputs. sig { returns(T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]])) } def input end @@ -17,6 +25,11 @@ module OpenAI def input=(_) end + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(T.any(String, Symbol)) } def model end @@ -25,6 +38,8 @@ module OpenAI def model=(_) end + # The number of dimensions the resulting output embeddings should have. Only + # supported in `text-embedding-3` and later models. sig { returns(T.nilable(Integer)) } def dimensions end @@ -33,6 +48,8 @@ module OpenAI def dimensions=(_) end + # The format to return the embeddings in. Can be either `float` or + # [`base64`](https://pypi.org/project/pybase64/). sig { returns(T.nilable(Symbol)) } def encoding_format end @@ -41,6 +58,9 @@ module OpenAI def encoding_format=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -79,53 +99,48 @@ module OpenAI def to_hash end + # Input text to embed, encoded as a string or array of tokens. To embed multiple + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. Some models may also impose a limit on total number of + # tokens summed across inputs. class Input < OpenAI::Union abstract! + Variants = type_template(:out) do + {fixed: T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]])} + end + StringArray = T.type_alias { T::Array[String] } IntegerArray = T.type_alias { T::Array[Integer] } ArrayOfToken2DArray = T.type_alias { T::Array[T::Array[Integer]] } - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [NilClass, T::Array[String]], - [NilClass, T::Array[Integer]], - [NilClass, T::Array[T::Array[Integer]]] - ] - ) - end - private def variants - end - end end + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. class Model < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end + # The format to return the embeddings in. Can be either `float` or + # [`base64`](https://pypi.org/project/pybase64/). class EncodingFormat < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + FLOAT = :float BASE64 = :base64 - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/embedding_model.rbi b/rbi/lib/openai/models/embedding_model.rbi index 161fb296..1bd2eac2 100644 --- a/rbi/lib/openai/models/embedding_model.rbi +++ b/rbi/lib/openai/models/embedding_model.rbi @@ -5,15 +5,11 @@ module OpenAI class EmbeddingModel < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + TEXT_EMBEDDING_ADA_002 = :"text-embedding-ada-002" TEXT_EMBEDDING_3_SMALL = :"text-embedding-3-small" TEXT_EMBEDDING_3_LARGE = :"text-embedding-3-large" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/file_chunking_strategy.rbi b/rbi/lib/openai/models/file_chunking_strategy.rbi index b0b4a1b0..93972f9b 100644 --- a/rbi/lib/openai/models/file_chunking_strategy.rbi +++ b/rbi/lib/openai/models/file_chunking_strategy.rbi @@ -2,18 +2,14 @@ module OpenAI module Models + # The strategy used to chunk the file. class FileChunkingStrategy < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::StaticFileChunkingStrategyObject], [Symbol, OpenAI::Models::OtherFileChunkingStrategyObject]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any(OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject) + } end end end diff --git a/rbi/lib/openai/models/file_chunking_strategy_param.rbi b/rbi/lib/openai/models/file_chunking_strategy_param.rbi index 9a360f39..aeffdf61 100644 --- a/rbi/lib/openai/models/file_chunking_strategy_param.rbi +++ b/rbi/lib/openai/models/file_chunking_strategy_param.rbi @@ -2,18 +2,18 @@ module OpenAI module Models + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. class FileChunkingStrategyParam < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::AutoFileChunkingStrategyParam], [Symbol, OpenAI::Models::StaticFileChunkingStrategyObjectParam]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::AutoFileChunkingStrategyParam, + OpenAI::Models::StaticFileChunkingStrategyObjectParam + ) + } end end end diff --git a/rbi/lib/openai/models/file_create_params.rbi b/rbi/lib/openai/models/file_create_params.rbi index aa9afe8a..98619f88 100644 --- a/rbi/lib/openai/models/file_create_params.rbi +++ b/rbi/lib/openai/models/file_create_params.rbi @@ -6,6 +6,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The File object (not file name) to be uploaded. sig { returns(T.any(IO, StringIO)) } def file end @@ -14,6 +15,10 @@ module OpenAI def file=(_) end + # The intended purpose of the uploaded file. One of: - `assistants`: Used in the + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets sig { returns(Symbol) } def purpose end diff --git a/rbi/lib/openai/models/file_list_params.rbi b/rbi/lib/openai/models/file_list_params.rbi index 2da43a92..48b8106e 100644 --- a/rbi/lib/openai/models/file_list_params.rbi +++ b/rbi/lib/openai/models/file_list_params.rbi @@ -6,6 +6,10 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -14,6 +18,8 @@ module OpenAI def after=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 10,000, and the default is 10,000. sig { returns(T.nilable(Integer)) } def limit end @@ -22,6 +28,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -30,6 +38,7 @@ module OpenAI def order=(_) end + # Only return files with the given purpose. sig { returns(T.nilable(String)) } def purpose end @@ -66,17 +75,15 @@ module OpenAI def to_hash end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ASC = :asc DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/file_object.rbi b/rbi/lib/openai/models/file_object.rbi index 91a4d778..b31df148 100644 --- a/rbi/lib/openai/models/file_object.rbi +++ b/rbi/lib/openai/models/file_object.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class FileObject < OpenAI::BaseModel + # The file identifier, which can be referenced in the API endpoints. sig { returns(String) } def id end @@ -11,6 +12,7 @@ module OpenAI def id=(_) end + # The size of the file, in bytes. sig { returns(Integer) } def bytes end @@ -19,6 +21,7 @@ module OpenAI def bytes=(_) end + # The Unix timestamp (in seconds) for when the file was created. sig { returns(Integer) } def created_at end @@ -27,6 +30,7 @@ module OpenAI def created_at=(_) end + # The name of the file. sig { returns(String) } def filename end @@ -35,6 +39,7 @@ module OpenAI def filename=(_) end + # The object type, which is always `file`. sig { returns(Symbol) } def object end @@ -43,6 +48,9 @@ module OpenAI def object=(_) end + # The intended purpose of the file. Supported values are `assistants`, + # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` + # and `vision`. sig { returns(Symbol) } def purpose end @@ -51,6 +59,8 @@ module OpenAI def purpose=(_) end + # Deprecated. The current status of the file, which can be either `uploaded`, + # `processed`, or `error`. sig { returns(Symbol) } def status end @@ -59,6 +69,7 @@ module OpenAI def status=(_) end + # The Unix timestamp (in seconds) for when the file will expire. sig { returns(T.nilable(Integer)) } def expires_at end @@ -67,6 +78,8 @@ module OpenAI def expires_at=(_) end + # Deprecated. For details on why a fine-tuning training file failed validation, + # see the `error` field on `fine_tuning.job`. sig { returns(T.nilable(String)) } def status_details end @@ -75,6 +88,7 @@ module OpenAI def status_details=(_) end + # The `File` object represents a document that has been uploaded to OpenAI. sig do params( id: String, @@ -121,9 +135,14 @@ module OpenAI def to_hash end + # The intended purpose of the file. Supported values are `assistants`, + # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` + # and `vision`. class Purpose < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ASSISTANTS = :assistants ASSISTANTS_OUTPUT = :assistants_output BATCH = :batch @@ -131,26 +150,18 @@ module OpenAI FINE_TUNE = :"fine-tune" FINE_TUNE_RESULTS = :"fine-tune-results" VISION = :vision - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # Deprecated. The current status of the file, which can be either `uploaded`, + # `processed`, or `error`. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + UPLOADED = :uploaded PROCESSED = :processed ERROR = :error - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/file_purpose.rbi b/rbi/lib/openai/models/file_purpose.rbi index a30abf94..07bd11c5 100644 --- a/rbi/lib/openai/models/file_purpose.rbi +++ b/rbi/lib/openai/models/file_purpose.rbi @@ -2,21 +2,21 @@ module OpenAI module Models + # The intended purpose of the uploaded file. One of: - `assistants`: Used in the + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets class FilePurpose < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ASSISTANTS = :assistants BATCH = :batch FINE_TUNE = :"fine-tune" VISION = :vision USER_DATA = :user_data EVALS = :evals - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi index ece16391..2de2b251 100644 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi +++ b/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi @@ -6,6 +6,7 @@ module OpenAI module FineTuning class FineTuningJob < OpenAI::BaseModel + # The object identifier, which can be referenced in the API endpoints. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the fine-tuning job was created. sig { returns(Integer) } def created_at end @@ -22,6 +24,8 @@ module OpenAI def created_at=(_) end + # For fine-tuning jobs that have `failed`, this will contain more information on + # the cause of the failure. sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Error)) } def error end @@ -33,6 +37,8 @@ module OpenAI def error=(_) end + # The name of the fine-tuned model that is being created. The value will be null + # if the fine-tuning job is still running. sig { returns(T.nilable(String)) } def fine_tuned_model end @@ -41,6 +47,8 @@ module OpenAI def fine_tuned_model=(_) end + # The Unix timestamp (in seconds) for when the fine-tuning job was finished. The + # value will be null if the fine-tuning job is still running. sig { returns(T.nilable(Integer)) } def finished_at end @@ -49,6 +57,8 @@ module OpenAI def finished_at=(_) end + # The hyperparameters used for the fine-tuning job. This value will only be + # returned when running `supervised` jobs. sig { returns(OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters) } def hyperparameters end @@ -60,6 +70,7 @@ module OpenAI def hyperparameters=(_) end + # The base model that is being fine-tuned. sig { returns(String) } def model end @@ -68,6 +79,7 @@ module OpenAI def model=(_) end + # The object type, which is always "fine_tuning.job". sig { returns(Symbol) } def object end @@ -76,6 +88,7 @@ module OpenAI def object=(_) end + # The organization that owns the fine-tuning job. sig { returns(String) } def organization_id end @@ -84,6 +97,9 @@ module OpenAI def organization_id=(_) end + # The compiled results file ID(s) for the fine-tuning job. You can retrieve the + # results with the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). sig { returns(T::Array[String]) } def result_files end @@ -92,6 +108,7 @@ module OpenAI def result_files=(_) end + # The seed used for the fine-tuning job. sig { returns(Integer) } def seed end @@ -100,6 +117,8 @@ module OpenAI def seed=(_) end + # The current status of the fine-tuning job, which can be either + # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. sig { returns(Symbol) } def status end @@ -108,6 +127,8 @@ module OpenAI def status=(_) end + # The total number of billable tokens processed by this fine-tuning job. The value + # will be null if the fine-tuning job is still running. sig { returns(T.nilable(Integer)) } def trained_tokens end @@ -116,6 +137,8 @@ module OpenAI def trained_tokens=(_) end + # The file ID used for training. You can retrieve the training data with the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). sig { returns(String) } def training_file end @@ -124,6 +147,9 @@ module OpenAI def training_file=(_) end + # The file ID used for validation. You can retrieve the validation results with + # the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). sig { returns(T.nilable(String)) } def validation_file end @@ -132,6 +158,8 @@ module OpenAI def validation_file=(_) end + # The Unix timestamp (in seconds) for when the fine-tuning job is estimated to + # finish. The value will be null if the fine-tuning job is not running. sig { returns(T.nilable(Integer)) } def estimated_finish end @@ -140,6 +168,7 @@ module OpenAI def estimated_finish=(_) end + # A list of integrations to enable for this fine-tuning job. sig { returns(T.nilable(T::Array[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject])) } def integrations end @@ -151,6 +180,12 @@ module OpenAI def integrations=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -159,6 +194,7 @@ module OpenAI def metadata=(_) end + # The method used for fine-tuning. sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method)) } def method_ end @@ -170,6 +206,8 @@ module OpenAI def method_=(_) end + # The `fine_tuning.job` object represents a fine-tuning job that has been created + # through the API. sig do params( id: String, @@ -247,6 +285,7 @@ module OpenAI end class Error < OpenAI::BaseModel + # A machine-readable error code. sig { returns(String) } def code end @@ -255,6 +294,7 @@ module OpenAI def code=(_) end + # A human-readable error message. sig { returns(String) } def message end @@ -263,6 +303,8 @@ module OpenAI def message=(_) end + # The parameter that was invalid, usually `training_file` or `validation_file`. + # This field will be null if the failure was not parameter-specific. sig { returns(T.nilable(String)) } def param end @@ -271,6 +313,8 @@ module OpenAI def param=(_) end + # For fine-tuning jobs that have `failed`, this will contain more information on + # the cause of the failure. sig { params(code: String, message: String, param: T.nilable(String)).returns(T.attached_class) } def self.new(code:, message:, param:) end @@ -281,6 +325,8 @@ module OpenAI end class Hyperparameters < OpenAI::BaseModel + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } def batch_size end @@ -289,6 +335,8 @@ module OpenAI def batch_size=(_) end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } def learning_rate_multiplier end @@ -297,6 +345,8 @@ module OpenAI def learning_rate_multiplier=(_) end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } def n_epochs end @@ -305,6 +355,8 @@ module OpenAI def n_epochs=(_) end + # The hyperparameters used for the fine-tuning job. This value will only be + # returned when running `supervised` jobs. sig do params( batch_size: T.any(Symbol, Integer), @@ -329,55 +381,48 @@ module OpenAI def to_hash end + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. class BatchSize < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Integer)} } end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. class LearningRateMultiplier < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Float)} } end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. class NEpochs < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Integer)} } end end + # The current status of the fine-tuning job, which can be either + # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + VALIDATING_FILES = :validating_files QUEUED = :queued RUNNING = :running SUCCEEDED = :succeeded FAILED = :failed CANCELLED = :cancelled - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class Method < OpenAI::BaseModel + # Configuration for the DPO fine-tuning method. sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo)) } def dpo end @@ -389,6 +434,7 @@ module OpenAI def dpo=(_) end + # Configuration for the supervised fine-tuning method. sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised)) } def supervised end @@ -400,6 +446,7 @@ module OpenAI def supervised=(_) end + # The type of method. Is either `supervised` or `dpo`. sig { returns(T.nilable(Symbol)) } def type end @@ -408,6 +455,7 @@ module OpenAI def type=(_) end + # The method used for fine-tuning. sig do params( dpo: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo, @@ -433,6 +481,7 @@ module OpenAI end class Dpo < OpenAI::BaseModel + # The hyperparameters used for the fine-tuning job. sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters)) } def hyperparameters end @@ -444,6 +493,7 @@ module OpenAI def hyperparameters=(_) end + # Configuration for the DPO fine-tuning method. sig do params(hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters) .returns(T.attached_class) @@ -459,6 +509,8 @@ module OpenAI end class Hyperparameters < OpenAI::BaseModel + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } def batch_size end @@ -467,6 +519,8 @@ module OpenAI def batch_size=(_) end + # The beta value for the DPO method. A higher beta value will increase the weight + # of the penalty between the policy and reference model. sig { returns(T.nilable(T.any(Symbol, Float))) } def beta end @@ -475,6 +529,8 @@ module OpenAI def beta=(_) end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } def learning_rate_multiplier end @@ -483,6 +539,8 @@ module OpenAI def learning_rate_multiplier=(_) end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } def n_epochs end @@ -491,6 +549,7 @@ module OpenAI def n_epochs=(_) end + # The hyperparameters used for the fine-tuning job. sig do params( batch_size: T.any(Symbol, Integer), @@ -517,49 +576,42 @@ module OpenAI def to_hash end + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. class BatchSize < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Integer)} } end + # The beta value for the DPO method. A higher beta value will increase the weight + # of the penalty between the policy and reference model. class Beta < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Float)} } end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. class LearningRateMultiplier < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Float)} } end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. class NEpochs < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Integer)} } end end end class Supervised < OpenAI::BaseModel + # The hyperparameters used for the fine-tuning job. sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters)) } def hyperparameters end @@ -571,6 +623,7 @@ module OpenAI def hyperparameters=(_) end + # Configuration for the supervised fine-tuning method. sig do params(hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters) .returns(T.attached_class) @@ -586,6 +639,8 @@ module OpenAI end class Hyperparameters < OpenAI::BaseModel + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } def batch_size end @@ -594,6 +649,8 @@ module OpenAI def batch_size=(_) end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } def learning_rate_multiplier end @@ -602,6 +659,8 @@ module OpenAI def learning_rate_multiplier=(_) end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } def n_epochs end @@ -610,6 +669,7 @@ module OpenAI def n_epochs=(_) end + # The hyperparameters used for the fine-tuning job. sig do params( batch_size: T.any(Symbol, Integer), @@ -634,49 +694,40 @@ module OpenAI def to_hash end + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. class BatchSize < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Integer)} } end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. class LearningRateMultiplier < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Float)} } end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. class NEpochs < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Integer)} } end end end + # The type of method. Is either `supervised` or `dpo`. class Type < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + SUPERVISED = :supervised DPO = :dpo - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_event.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_event.rbi index 15ce95c4..b1e399d7 100644 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_event.rbi +++ b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_event.rbi @@ -6,6 +6,7 @@ module OpenAI module FineTuning class FineTuningJobEvent < OpenAI::BaseModel + # The object identifier. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the fine-tuning job was created. sig { returns(Integer) } def created_at end @@ -22,6 +24,7 @@ module OpenAI def created_at=(_) end + # The log level of the event. sig { returns(Symbol) } def level end @@ -30,6 +33,7 @@ module OpenAI def level=(_) end + # The message of the event. sig { returns(String) } def message end @@ -38,6 +42,7 @@ module OpenAI def message=(_) end + # The object type, which is always "fine_tuning.job.event". sig { returns(Symbol) } def object end @@ -46,6 +51,7 @@ module OpenAI def object=(_) end + # The data associated with the event. sig { returns(T.nilable(T.anything)) } def data end @@ -54,6 +60,7 @@ module OpenAI def data=(_) end + # The type of event. sig { returns(T.nilable(Symbol)) } def type end @@ -62,6 +69,7 @@ module OpenAI def type=(_) end + # Fine-tuning job event object sig do params( id: String, @@ -94,31 +102,25 @@ module OpenAI def to_hash end + # The log level of the event. class Level < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + INFO = :info WARN = :warn ERROR = :error - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # The type of event. class Type < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + MESSAGE = :message METRICS = :metrics - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi index 37b15696..f3899e11 100644 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi +++ b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi @@ -6,6 +6,7 @@ module OpenAI module FineTuning class FineTuningJobWandbIntegration < OpenAI::BaseModel + # The name of the project that the new run will be created under. sig { returns(String) } def project end @@ -14,6 +15,9 @@ module OpenAI def project=(_) end + # The entity to use for the run. This allows you to set the team or username of + # the WandB user that you would like associated with the run. If not set, the + # default entity for the registered WandB API key is used. sig { returns(T.nilable(String)) } def entity end @@ -22,6 +26,8 @@ module OpenAI def entity=(_) end + # A display name to set for the run. If not set, we will use the Job ID as the + # name. sig { returns(T.nilable(String)) } def name end @@ -30,6 +36,9 @@ module OpenAI def name=(_) end + # A list of tags to be attached to the newly created run. These tags are passed + # through directly to WandB. Some default tags are generated by OpenAI: + # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". sig { returns(T.nilable(T::Array[String])) } def tags end @@ -38,6 +47,10 @@ module OpenAI def tags=(_) end + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. sig do params(project: String, entity: T.nilable(String), name: T.nilable(String), tags: T::Array[String]) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi index 148ab9c5..f6af3f86 100644 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi +++ b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi @@ -6,6 +6,7 @@ module OpenAI module FineTuning class FineTuningJobWandbIntegrationObject < OpenAI::BaseModel + # The type of the integration being enabled for the fine-tuning job sig { returns(Symbol) } def type end @@ -14,6 +15,10 @@ module OpenAI def type=(_) end + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. sig { returns(OpenAI::Models::FineTuning::FineTuningJobWandbIntegration) } def wandb end diff --git a/rbi/lib/openai/models/fine_tuning/job_create_params.rbi b/rbi/lib/openai/models/fine_tuning/job_create_params.rbi index 269a125d..224616d6 100644 --- a/rbi/lib/openai/models/fine_tuning/job_create_params.rbi +++ b/rbi/lib/openai/models/fine_tuning/job_create_params.rbi @@ -7,6 +7,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The name of the model to fine-tune. You can select one of the + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). sig { returns(T.any(String, Symbol)) } def model end @@ -15,6 +17,23 @@ module OpenAI def model=(_) end + # The ID of an uploaded file that contains training data. + # + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. + # + # Your dataset must be formatted as a JSONL file. Additionally, you must upload + # your file with the purpose `fine-tune`. + # + # The contents of the file should differ depending on if the model uses the + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # format, or if the fine-tuning method uses the + # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) + # format. + # + # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + # for more details. sig { returns(String) } def training_file end @@ -23,6 +42,8 @@ module OpenAI def training_file=(_) end + # The hyperparameters used for the fine-tuning job. This value is now deprecated + # in favor of `method`, and should be passed in under the `method` parameter. sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters)) } def hyperparameters end @@ -34,6 +55,7 @@ module OpenAI def hyperparameters=(_) end + # A list of integrations to enable for your fine-tuning job. sig { returns(T.nilable(T::Array[OpenAI::Models::FineTuning::JobCreateParams::Integration])) } def integrations end @@ -45,6 +67,12 @@ module OpenAI def integrations=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -53,6 +81,7 @@ module OpenAI def metadata=(_) end + # The method used for fine-tuning. sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method)) } def method_ end @@ -64,6 +93,9 @@ module OpenAI def method_=(_) end + # The seed controls the reproducibility of the job. Passing in the same seed and + # job parameters should produce the same results, but may differ in rare cases. If + # a seed is not specified, one will be generated for you. sig { returns(T.nilable(Integer)) } def seed end @@ -72,6 +104,11 @@ module OpenAI def seed=(_) end + # A string of up to 64 characters that will be added to your fine-tuned model + # name. + # + # For example, a `suffix` of "custom-model-name" would produce a model name like + # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. sig { returns(T.nilable(String)) } def suffix end @@ -80,6 +117,18 @@ module OpenAI def suffix=(_) end + # The ID of an uploaded file that contains validation data. + # + # If you provide this file, the data is used to generate validation metrics + # periodically during fine-tuning. These metrics can be viewed in the fine-tuning + # results file. The same data should not be present in both train and validation + # files. + # + # Your dataset must be formatted as a JSONL file. You must upload your file with + # the purpose `fine-tune`. + # + # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + # for more details. sig { returns(T.nilable(String)) } def validation_file end @@ -137,32 +186,17 @@ module OpenAI def to_hash end + # The name of the model to fine-tune. You can select one of the + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). class Model < OpenAI::Union abstract! - class Preset < OpenAI::Enum - abstract! - - BABBAGE_002 = :"babbage-002" - DAVINCI_002 = :"davinci-002" - GPT_3_5_TURBO = :"gpt-3.5-turbo" - GPT_4O_MINI = :"gpt-4o-mini" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end class Hyperparameters < OpenAI::BaseModel + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } def batch_size end @@ -171,6 +205,8 @@ module OpenAI def batch_size=(_) end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } def learning_rate_multiplier end @@ -179,6 +215,8 @@ module OpenAI def learning_rate_multiplier=(_) end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } def n_epochs end @@ -187,6 +225,8 @@ module OpenAI def n_epochs=(_) end + # The hyperparameters used for the fine-tuning job. This value is now deprecated + # in favor of `method`, and should be passed in under the `method` parameter. sig do params( batch_size: T.any(Symbol, Integer), @@ -211,38 +251,34 @@ module OpenAI def to_hash end + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. class BatchSize < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Integer)} } end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. class LearningRateMultiplier < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Float)} } end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. class NEpochs < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Integer)} } end end class Integration < OpenAI::BaseModel + # The type of integration to enable. Currently, only "wandb" (Weights and Biases) + # is supported. sig { returns(Symbol) } def type end @@ -251,6 +287,10 @@ module OpenAI def type=(_) end + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. sig { returns(OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb) } def wandb end @@ -274,6 +314,7 @@ module OpenAI end class Wandb < OpenAI::BaseModel + # The name of the project that the new run will be created under. sig { returns(String) } def project end @@ -282,6 +323,9 @@ module OpenAI def project=(_) end + # The entity to use for the run. This allows you to set the team or username of + # the WandB user that you would like associated with the run. If not set, the + # default entity for the registered WandB API key is used. sig { returns(T.nilable(String)) } def entity end @@ -290,6 +334,8 @@ module OpenAI def entity=(_) end + # A display name to set for the run. If not set, we will use the Job ID as the + # name. sig { returns(T.nilable(String)) } def name end @@ -298,6 +344,9 @@ module OpenAI def name=(_) end + # A list of tags to be attached to the newly created run. These tags are passed + # through directly to WandB. Some default tags are generated by OpenAI: + # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". sig { returns(T.nilable(T::Array[String])) } def tags end @@ -306,6 +355,10 @@ module OpenAI def tags=(_) end + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. sig do params( project: String, @@ -333,6 +386,7 @@ module OpenAI end class Method < OpenAI::BaseModel + # Configuration for the DPO fine-tuning method. sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo)) } def dpo end @@ -344,6 +398,7 @@ module OpenAI def dpo=(_) end + # Configuration for the supervised fine-tuning method. sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised)) } def supervised end @@ -355,6 +410,7 @@ module OpenAI def supervised=(_) end + # The type of method. Is either `supervised` or `dpo`. sig { returns(T.nilable(Symbol)) } def type end @@ -363,6 +419,7 @@ module OpenAI def type=(_) end + # The method used for fine-tuning. sig do params( dpo: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo, @@ -388,6 +445,7 @@ module OpenAI end class Dpo < OpenAI::BaseModel + # The hyperparameters used for the fine-tuning job. sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters)) } def hyperparameters end @@ -399,6 +457,7 @@ module OpenAI def hyperparameters=(_) end + # Configuration for the DPO fine-tuning method. sig do params(hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters) .returns(T.attached_class) @@ -414,6 +473,8 @@ module OpenAI end class Hyperparameters < OpenAI::BaseModel + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } def batch_size end @@ -422,6 +483,8 @@ module OpenAI def batch_size=(_) end + # The beta value for the DPO method. A higher beta value will increase the weight + # of the penalty between the policy and reference model. sig { returns(T.nilable(T.any(Symbol, Float))) } def beta end @@ -430,6 +493,8 @@ module OpenAI def beta=(_) end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } def learning_rate_multiplier end @@ -438,6 +503,8 @@ module OpenAI def learning_rate_multiplier=(_) end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } def n_epochs end @@ -446,6 +513,7 @@ module OpenAI def n_epochs=(_) end + # The hyperparameters used for the fine-tuning job. sig do params( batch_size: T.any(Symbol, Integer), @@ -472,49 +540,42 @@ module OpenAI def to_hash end + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. class BatchSize < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Integer)} } end + # The beta value for the DPO method. A higher beta value will increase the weight + # of the penalty between the policy and reference model. class Beta < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Float)} } end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. class LearningRateMultiplier < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Float)} } end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. class NEpochs < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Integer)} } end end end class Supervised < OpenAI::BaseModel + # The hyperparameters used for the fine-tuning job. sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters)) } def hyperparameters end @@ -526,6 +587,7 @@ module OpenAI def hyperparameters=(_) end + # Configuration for the supervised fine-tuning method. sig do params(hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters) .returns(T.attached_class) @@ -543,6 +605,8 @@ module OpenAI end class Hyperparameters < OpenAI::BaseModel + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } def batch_size end @@ -551,6 +615,8 @@ module OpenAI def batch_size=(_) end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } def learning_rate_multiplier end @@ -559,6 +625,8 @@ module OpenAI def learning_rate_multiplier=(_) end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } def n_epochs end @@ -567,6 +635,7 @@ module OpenAI def n_epochs=(_) end + # The hyperparameters used for the fine-tuning job. sig do params( batch_size: T.any(Symbol, Integer), @@ -591,49 +660,40 @@ module OpenAI def to_hash end + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. class BatchSize < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Integer)} } end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. class LearningRateMultiplier < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Float)} } end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. class NEpochs < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(Symbol, Integer)} } end end end + # The type of method. Is either `supervised` or `dpo`. class Type < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + SUPERVISED = :supervised DPO = :dpo - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/fine_tuning/job_list_events_params.rbi b/rbi/lib/openai/models/fine_tuning/job_list_events_params.rbi index ba90b85e..fc4ede17 100644 --- a/rbi/lib/openai/models/fine_tuning/job_list_events_params.rbi +++ b/rbi/lib/openai/models/fine_tuning/job_list_events_params.rbi @@ -7,6 +7,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Identifier for the last event from the previous pagination request. sig { returns(T.nilable(String)) } def after end @@ -15,6 +16,7 @@ module OpenAI def after=(_) end + # Number of events to retrieve. sig { returns(T.nilable(Integer)) } def limit end diff --git a/rbi/lib/openai/models/fine_tuning/job_list_params.rbi b/rbi/lib/openai/models/fine_tuning/job_list_params.rbi index e217f2fe..6e667d46 100644 --- a/rbi/lib/openai/models/fine_tuning/job_list_params.rbi +++ b/rbi/lib/openai/models/fine_tuning/job_list_params.rbi @@ -7,6 +7,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Identifier for the last job from the previous pagination request. sig { returns(T.nilable(String)) } def after end @@ -15,6 +16,7 @@ module OpenAI def after=(_) end + # Number of fine-tuning jobs to retrieve. sig { returns(T.nilable(Integer)) } def limit end @@ -23,6 +25,8 @@ module OpenAI def limit=(_) end + # Optional metadata filter. To filter, use the syntax `metadata[k]=v`. + # Alternatively, set `metadata=null` to indicate no metadata. sig { returns(T.nilable(T::Hash[Symbol, String])) } def metadata end diff --git a/rbi/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rbi b/rbi/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rbi index a032fba4..27b1407f 100644 --- a/rbi/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rbi +++ b/rbi/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rbi @@ -8,6 +8,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Identifier for the last checkpoint ID from the previous pagination request. sig { returns(T.nilable(String)) } def after end @@ -16,6 +17,7 @@ module OpenAI def after=(_) end + # Number of checkpoints to retrieve. sig { returns(T.nilable(Integer)) } def limit end diff --git a/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi b/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi index 077add44..54fe9d93 100644 --- a/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi +++ b/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi @@ -5,6 +5,7 @@ module OpenAI module FineTuning module Jobs class FineTuningJobCheckpoint < OpenAI::BaseModel + # The checkpoint identifier, which can be referenced in the API endpoints. sig { returns(String) } def id end @@ -13,6 +14,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the checkpoint was created. sig { returns(Integer) } def created_at end @@ -21,6 +23,7 @@ module OpenAI def created_at=(_) end + # The name of the fine-tuned checkpoint model that is created. sig { returns(String) } def fine_tuned_model_checkpoint end @@ -29,6 +32,7 @@ module OpenAI def fine_tuned_model_checkpoint=(_) end + # The name of the fine-tuning job that this checkpoint was created from. sig { returns(String) } def fine_tuning_job_id end @@ -37,6 +41,7 @@ module OpenAI def fine_tuning_job_id=(_) end + # Metrics at the step number during the fine-tuning job. sig { returns(OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics) } def metrics end @@ -48,6 +53,7 @@ module OpenAI def metrics=(_) end + # The object type, which is always "fine_tuning.job.checkpoint". sig { returns(Symbol) } def object end @@ -56,6 +62,7 @@ module OpenAI def object=(_) end + # The step number that the checkpoint was created at. sig { returns(Integer) } def step_number end @@ -64,6 +71,8 @@ module OpenAI def step_number=(_) end + # The `fine_tuning.job.checkpoint` object represents a model checkpoint for a + # fine-tuning job that is ready to use. sig do params( id: String, @@ -161,6 +170,7 @@ module OpenAI def valid_mean_token_accuracy=(_) end + # Metrics at the step number during the fine-tuning job. sig do params( full_valid_loss: Float, diff --git a/rbi/lib/openai/models/function_definition.rbi b/rbi/lib/openai/models/function_definition.rbi index f4fe38c3..75a4000f 100644 --- a/rbi/lib/openai/models/function_definition.rbi +++ b/rbi/lib/openai/models/function_definition.rbi @@ -3,6 +3,8 @@ module OpenAI module Models class FunctionDefinition < OpenAI::BaseModel + # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain + # underscores and dashes, with a maximum length of 64. sig { returns(String) } def name end @@ -11,6 +13,8 @@ module OpenAI def name=(_) end + # A description of what the function does, used by the model to choose when and + # how to call the function. sig { returns(T.nilable(String)) } def description end @@ -19,6 +23,13 @@ module OpenAI def description=(_) end + # The parameters the functions accepts, described as a JSON Schema object. See the + # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + # and the + # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + # documentation about the format. + # + # Omitting `parameters` defines a function with an empty parameter list. sig { returns(T.nilable(OpenAI::Models::FunctionParameters)) } def parameters end @@ -27,6 +38,11 @@ module OpenAI def parameters=(_) end + # Whether to enable strict schema adherence when generating the function call. If + # set to true, the model will follow the exact schema defined in the `parameters` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn + # more about Structured Outputs in the + # [function calling guide](docs/guides/function-calling). sig { returns(T.nilable(T::Boolean)) } def strict end diff --git a/rbi/lib/openai/models/image.rbi b/rbi/lib/openai/models/image.rbi index 81607a7a..3c0fc4fa 100644 --- a/rbi/lib/openai/models/image.rbi +++ b/rbi/lib/openai/models/image.rbi @@ -3,6 +3,8 @@ module OpenAI module Models class Image < OpenAI::BaseModel + # The base64-encoded JSON of the generated image, if `response_format` is + # `b64_json`. sig { returns(T.nilable(String)) } def b64_json end @@ -11,6 +13,8 @@ module OpenAI def b64_json=(_) end + # The prompt that was used to generate the image, if there was any revision to the + # prompt. sig { returns(T.nilable(String)) } def revised_prompt end @@ -19,6 +23,7 @@ module OpenAI def revised_prompt=(_) end + # The URL of the generated image, if `response_format` is `url` (default). sig { returns(T.nilable(String)) } def url end @@ -27,6 +32,7 @@ module OpenAI def url=(_) end + # Represents the url or the content of an image generated by the OpenAI API. sig { params(b64_json: String, revised_prompt: String, url: String).returns(T.attached_class) } def self.new(b64_json: nil, revised_prompt: nil, url: nil) end diff --git a/rbi/lib/openai/models/image_create_variation_params.rbi b/rbi/lib/openai/models/image_create_variation_params.rbi index 1d40fb57..285e3c70 100644 --- a/rbi/lib/openai/models/image_create_variation_params.rbi +++ b/rbi/lib/openai/models/image_create_variation_params.rbi @@ -6,6 +6,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The image to use as the basis for the variation(s). Must be a valid PNG file, + # less than 4MB, and square. sig { returns(T.any(IO, StringIO)) } def image end @@ -14,6 +16,8 @@ module OpenAI def image=(_) end + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. sig { returns(T.nilable(T.any(String, Symbol))) } def model end @@ -22,6 +26,8 @@ module OpenAI def model=(_) end + # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # `n=1` is supported. sig { returns(T.nilable(Integer)) } def n end @@ -30,6 +36,9 @@ module OpenAI def n=(_) end + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. sig { returns(T.nilable(Symbol)) } def response_format end @@ -38,6 +47,8 @@ module OpenAI def response_format=(_) end + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. sig { returns(T.nilable(Symbol)) } def size end @@ -46,6 +57,9 @@ module OpenAI def size=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -86,41 +100,36 @@ module OpenAI def to_hash end + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. class Model < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. class ResponseFormat < OpenAI::Enum abstract! - URL = T.let(:url, T.nilable(Symbol)) - B64_JSON = T.let(:b64_json, T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + URL = :url + B64_JSON = :b64_json end + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. class Size < OpenAI::Enum abstract! - NUMBER_256X256 = T.let(:"256x256", T.nilable(Symbol)) - NUMBER_512X512 = T.let(:"512x512", T.nilable(Symbol)) - NUMBER_1024X1024 = T.let(:"1024x1024", T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + NUMBER_256X256 = :"256x256" + NUMBER_512X512 = :"512x512" + NUMBER_1024X1024 = :"1024x1024" end end end diff --git a/rbi/lib/openai/models/image_edit_params.rbi b/rbi/lib/openai/models/image_edit_params.rbi index fb3b78a8..b672e912 100644 --- a/rbi/lib/openai/models/image_edit_params.rbi +++ b/rbi/lib/openai/models/image_edit_params.rbi @@ -6,6 +6,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask + # is not provided, image must have transparency, which will be used as the mask. sig { returns(T.any(IO, StringIO)) } def image end @@ -14,6 +16,8 @@ module OpenAI def image=(_) end + # A text description of the desired image(s). The maximum length is 1000 + # characters. sig { returns(String) } def prompt end @@ -22,6 +26,9 @@ module OpenAI def prompt=(_) end + # An additional image whose fully transparent areas (e.g. where alpha is zero) + # indicate where `image` should be edited. Must be a valid PNG file, less than + # 4MB, and have the same dimensions as `image`. sig { returns(T.nilable(T.any(IO, StringIO))) } def mask end @@ -30,6 +37,8 @@ module OpenAI def mask=(_) end + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. sig { returns(T.nilable(T.any(String, Symbol))) } def model end @@ -38,6 +47,7 @@ module OpenAI def model=(_) end + # The number of images to generate. Must be between 1 and 10. sig { returns(T.nilable(Integer)) } def n end @@ -46,6 +56,9 @@ module OpenAI def n=(_) end + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. sig { returns(T.nilable(Symbol)) } def response_format end @@ -54,6 +67,8 @@ module OpenAI def response_format=(_) end + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. sig { returns(T.nilable(Symbol)) } def size end @@ -62,6 +77,9 @@ module OpenAI def size=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -116,41 +134,36 @@ module OpenAI def to_hash end + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. class Model < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. class ResponseFormat < OpenAI::Enum abstract! - URL = T.let(:url, T.nilable(Symbol)) - B64_JSON = T.let(:b64_json, T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + URL = :url + B64_JSON = :b64_json end + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. class Size < OpenAI::Enum abstract! - NUMBER_256X256 = T.let(:"256x256", T.nilable(Symbol)) - NUMBER_512X512 = T.let(:"512x512", T.nilable(Symbol)) - NUMBER_1024X1024 = T.let(:"1024x1024", T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + NUMBER_256X256 = :"256x256" + NUMBER_512X512 = :"512x512" + NUMBER_1024X1024 = :"1024x1024" end end end diff --git a/rbi/lib/openai/models/image_generate_params.rbi b/rbi/lib/openai/models/image_generate_params.rbi index c5e39887..7fccc54c 100644 --- a/rbi/lib/openai/models/image_generate_params.rbi +++ b/rbi/lib/openai/models/image_generate_params.rbi @@ -6,6 +6,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A text description of the desired image(s). The maximum length is 1000 + # characters for `dall-e-2` and 4000 characters for `dall-e-3`. sig { returns(String) } def prompt end @@ -14,6 +16,7 @@ module OpenAI def prompt=(_) end + # The model to use for image generation. sig { returns(T.nilable(T.any(String, Symbol))) } def model end @@ -22,6 +25,8 @@ module OpenAI def model=(_) end + # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # `n=1` is supported. sig { returns(T.nilable(Integer)) } def n end @@ -30,6 +35,9 @@ module OpenAI def n=(_) end + # The quality of the image that will be generated. `hd` creates images with finer + # details and greater consistency across the image. This param is only supported + # for `dall-e-3`. sig { returns(T.nilable(Symbol)) } def quality end @@ -38,6 +46,9 @@ module OpenAI def quality=(_) end + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. sig { returns(T.nilable(Symbol)) } def response_format end @@ -46,6 +57,9 @@ module OpenAI def response_format=(_) end + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + # `1024x1792` for `dall-e-3` models. sig { returns(T.nilable(Symbol)) } def size end @@ -54,6 +68,10 @@ module OpenAI def size=(_) end + # The style of the generated images. Must be one of `vivid` or `natural`. Vivid + # causes the model to lean towards generating hyper-real and dramatic images. + # Natural causes the model to produce more natural, less hyper-real looking + # images. This param is only supported for `dall-e-3`. sig { returns(T.nilable(Symbol)) } def style end @@ -62,6 +80,9 @@ module OpenAI def style=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -116,69 +137,63 @@ module OpenAI def to_hash end + # The model to use for image generation. class Model < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end + # The quality of the image that will be generated. `hd` creates images with finer + # details and greater consistency across the image. This param is only supported + # for `dall-e-3`. class Quality < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + STANDARD = :standard HD = :hd - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. class ResponseFormat < OpenAI::Enum abstract! - URL = T.let(:url, T.nilable(Symbol)) - B64_JSON = T.let(:b64_json, T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + URL = :url + B64_JSON = :b64_json end + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + # `1024x1792` for `dall-e-3` models. class Size < OpenAI::Enum abstract! - NUMBER_256X256 = T.let(:"256x256", T.nilable(Symbol)) - NUMBER_512X512 = T.let(:"512x512", T.nilable(Symbol)) - NUMBER_1024X1024 = T.let(:"1024x1024", T.nilable(Symbol)) - NUMBER_1792X1024 = T.let(:"1792x1024", T.nilable(Symbol)) - NUMBER_1024X1792 = T.let(:"1024x1792", T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + NUMBER_256X256 = :"256x256" + NUMBER_512X512 = :"512x512" + NUMBER_1024X1024 = :"1024x1024" + NUMBER_1792X1024 = :"1792x1024" + NUMBER_1024X1792 = :"1024x1792" end + # The style of the generated images. Must be one of `vivid` or `natural`. Vivid + # causes the model to lean towards generating hyper-real and dramatic images. + # Natural causes the model to produce more natural, less hyper-real looking + # images. This param is only supported for `dall-e-3`. class Style < OpenAI::Enum abstract! - VIVID = T.let(:vivid, T.nilable(Symbol)) - NATURAL = T.let(:natural, T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + VIVID = :vivid + NATURAL = :natural end end end diff --git a/rbi/lib/openai/models/image_model.rbi b/rbi/lib/openai/models/image_model.rbi index 08c7dccd..6716a390 100644 --- a/rbi/lib/openai/models/image_model.rbi +++ b/rbi/lib/openai/models/image_model.rbi @@ -5,14 +5,10 @@ module OpenAI class ImageModel < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + DALL_E_2 = :"dall-e-2" DALL_E_3 = :"dall-e-3" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/model.rbi b/rbi/lib/openai/models/model.rbi index ad1c5f72..07b59908 100644 --- a/rbi/lib/openai/models/model.rbi +++ b/rbi/lib/openai/models/model.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class Model < OpenAI::BaseModel + # The model identifier, which can be referenced in the API endpoints. sig { returns(String) } def id end @@ -11,6 +12,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) when the model was created. sig { returns(Integer) } def created end @@ -19,6 +21,7 @@ module OpenAI def created=(_) end + # The object type, which is always "model". sig { returns(Symbol) } def object end @@ -27,6 +30,7 @@ module OpenAI def object=(_) end + # The organization that owns the model. sig { returns(String) } def owned_by end @@ -35,6 +39,7 @@ module OpenAI def owned_by=(_) end + # Describes an OpenAI model offering that can be used with the API. sig { params(id: String, created: Integer, owned_by: String, object: Symbol).returns(T.attached_class) } def self.new(id:, created:, owned_by:, object: :model) end diff --git a/rbi/lib/openai/models/moderation.rbi b/rbi/lib/openai/models/moderation.rbi index aba76998..c430dd8a 100644 --- a/rbi/lib/openai/models/moderation.rbi +++ b/rbi/lib/openai/models/moderation.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class Moderation < OpenAI::BaseModel + # A list of the categories, and whether they are flagged or not. sig { returns(OpenAI::Models::Moderation::Categories) } def categories end @@ -11,6 +12,7 @@ module OpenAI def categories=(_) end + # A list of the categories along with the input type(s) that the score applies to. sig { returns(OpenAI::Models::Moderation::CategoryAppliedInputTypes) } def category_applied_input_types end @@ -22,6 +24,7 @@ module OpenAI def category_applied_input_types=(_) end + # A list of the categories along with their scores as predicted by model. sig { returns(OpenAI::Models::Moderation::CategoryScores) } def category_scores end @@ -30,6 +33,7 @@ module OpenAI def category_scores=(_) end + # Whether any of the below categories are flagged. sig { returns(T::Boolean) } def flagged end @@ -65,6 +69,8 @@ module OpenAI end class Categories < OpenAI::BaseModel + # Content that expresses, incites, or promotes harassing language towards any + # target. sig { returns(T::Boolean) } def harassment end @@ -73,6 +79,8 @@ module OpenAI def harassment=(_) end + # Harassment content that also includes violence or serious harm towards any + # target. sig { returns(T::Boolean) } def harassment_threatening end @@ -81,6 +89,10 @@ module OpenAI def harassment_threatening=(_) end + # Content that expresses, incites, or promotes hate based on race, gender, + # ethnicity, religion, nationality, sexual orientation, disability status, or + # caste. Hateful content aimed at non-protected groups (e.g., chess players) is + # harassment. sig { returns(T::Boolean) } def hate end @@ -89,6 +101,9 @@ module OpenAI def hate=(_) end + # Hateful content that also includes violence or serious harm towards the targeted + # group based on race, gender, ethnicity, religion, nationality, sexual + # orientation, disability status, or caste. sig { returns(T::Boolean) } def hate_threatening end @@ -97,6 +112,9 @@ module OpenAI def hate_threatening=(_) end + # Content that includes instructions or advice that facilitate the planning or + # execution of wrongdoing, or that gives advice or instruction on how to commit + # illicit acts. For example, "how to shoplift" would fit this category. sig { returns(T.nilable(T::Boolean)) } def illicit end @@ -105,6 +123,9 @@ module OpenAI def illicit=(_) end + # Content that includes instructions or advice that facilitate the planning or + # execution of wrongdoing that also includes violence, or that gives advice or + # instruction on the procurement of any weapon. sig { returns(T.nilable(T::Boolean)) } def illicit_violent end @@ -113,6 +134,8 @@ module OpenAI def illicit_violent=(_) end + # Content that promotes, encourages, or depicts acts of self-harm, such as + # suicide, cutting, and eating disorders. sig { returns(T::Boolean) } def self_harm end @@ -121,6 +144,9 @@ module OpenAI def self_harm=(_) end + # Content that encourages performing acts of self-harm, such as suicide, cutting, + # and eating disorders, or that gives instructions or advice on how to commit such + # acts. sig { returns(T::Boolean) } def self_harm_instructions end @@ -129,6 +155,8 @@ module OpenAI def self_harm_instructions=(_) end + # Content where the speaker expresses that they are engaging or intend to engage + # in acts of self-harm, such as suicide, cutting, and eating disorders. sig { returns(T::Boolean) } def self_harm_intent end @@ -137,6 +165,9 @@ module OpenAI def self_harm_intent=(_) end + # Content meant to arouse sexual excitement, such as the description of sexual + # activity, or that promotes sexual services (excluding sex education and + # wellness). sig { returns(T::Boolean) } def sexual end @@ -145,6 +176,7 @@ module OpenAI def sexual=(_) end + # Sexual content that includes an individual who is under 18 years old. sig { returns(T::Boolean) } def sexual_minors end @@ -153,6 +185,7 @@ module OpenAI def sexual_minors=(_) end + # Content that depicts death, violence, or physical injury. sig { returns(T::Boolean) } def violence end @@ -161,6 +194,7 @@ module OpenAI def violence=(_) end + # Content that depicts death, violence, or physical injury in graphic detail. sig { returns(T::Boolean) } def violence_graphic end @@ -169,6 +203,7 @@ module OpenAI def violence_graphic=(_) end + # A list of the categories, and whether they are flagged or not. sig do params( harassment: T::Boolean, @@ -229,6 +264,7 @@ module OpenAI end class CategoryAppliedInputTypes < OpenAI::BaseModel + # The applied input type(s) for the category 'harassment'. sig { returns(T::Array[Symbol]) } def harassment end @@ -237,6 +273,7 @@ module OpenAI def harassment=(_) end + # The applied input type(s) for the category 'harassment/threatening'. sig { returns(T::Array[Symbol]) } def harassment_threatening end @@ -245,6 +282,7 @@ module OpenAI def harassment_threatening=(_) end + # The applied input type(s) for the category 'hate'. sig { returns(T::Array[Symbol]) } def hate end @@ -253,6 +291,7 @@ module OpenAI def hate=(_) end + # The applied input type(s) for the category 'hate/threatening'. sig { returns(T::Array[Symbol]) } def hate_threatening end @@ -261,6 +300,7 @@ module OpenAI def hate_threatening=(_) end + # The applied input type(s) for the category 'illicit'. sig { returns(T::Array[Symbol]) } def illicit end @@ -269,6 +309,7 @@ module OpenAI def illicit=(_) end + # The applied input type(s) for the category 'illicit/violent'. sig { returns(T::Array[Symbol]) } def illicit_violent end @@ -277,6 +318,7 @@ module OpenAI def illicit_violent=(_) end + # The applied input type(s) for the category 'self-harm'. sig { returns(T::Array[Symbol]) } def self_harm end @@ -285,6 +327,7 @@ module OpenAI def self_harm=(_) end + # The applied input type(s) for the category 'self-harm/instructions'. sig { returns(T::Array[Symbol]) } def self_harm_instructions end @@ -293,6 +336,7 @@ module OpenAI def self_harm_instructions=(_) end + # The applied input type(s) for the category 'self-harm/intent'. sig { returns(T::Array[Symbol]) } def self_harm_intent end @@ -301,6 +345,7 @@ module OpenAI def self_harm_intent=(_) end + # The applied input type(s) for the category 'sexual'. sig { returns(T::Array[Symbol]) } def sexual end @@ -309,6 +354,7 @@ module OpenAI def sexual=(_) end + # The applied input type(s) for the category 'sexual/minors'. sig { returns(T::Array[Symbol]) } def sexual_minors end @@ -317,6 +363,7 @@ module OpenAI def sexual_minors=(_) end + # The applied input type(s) for the category 'violence'. sig { returns(T::Array[Symbol]) } def violence end @@ -325,6 +372,7 @@ module OpenAI def violence=(_) end + # The applied input type(s) for the category 'violence/graphic'. sig { returns(T::Array[Symbol]) } def violence_graphic end @@ -333,6 +381,7 @@ module OpenAI def violence_graphic=(_) end + # A list of the categories along with the input type(s) that the score applies to. sig do params( harassment: T::Array[Symbol], @@ -394,167 +443,116 @@ module OpenAI class Harassment < OpenAI::Enum abstract! - TEXT = :text + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + TEXT = :text end class HarassmentThreatening < OpenAI::Enum abstract! - TEXT = :text + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + TEXT = :text end class Hate < OpenAI::Enum abstract! - TEXT = :text + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + TEXT = :text end class HateThreatening < OpenAI::Enum abstract! - TEXT = :text + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + TEXT = :text end class Illicit < OpenAI::Enum abstract! - TEXT = :text + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + TEXT = :text end class IllicitViolent < OpenAI::Enum abstract! - TEXT = :text + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + TEXT = :text end class SelfHarm < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + TEXT = :text IMAGE = :image - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class SelfHarmInstruction < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + TEXT = :text IMAGE = :image - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class SelfHarmIntent < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + TEXT = :text IMAGE = :image - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class Sexual < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + TEXT = :text IMAGE = :image - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class SexualMinor < OpenAI::Enum abstract! - TEXT = :text + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + TEXT = :text end class Violence < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + TEXT = :text IMAGE = :image - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class ViolenceGraphic < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + TEXT = :text IMAGE = :image - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end class CategoryScores < OpenAI::BaseModel + # The score for the category 'harassment'. sig { returns(Float) } def harassment end @@ -563,6 +561,7 @@ module OpenAI def harassment=(_) end + # The score for the category 'harassment/threatening'. sig { returns(Float) } def harassment_threatening end @@ -571,6 +570,7 @@ module OpenAI def harassment_threatening=(_) end + # The score for the category 'hate'. sig { returns(Float) } def hate end @@ -579,6 +579,7 @@ module OpenAI def hate=(_) end + # The score for the category 'hate/threatening'. sig { returns(Float) } def hate_threatening end @@ -587,6 +588,7 @@ module OpenAI def hate_threatening=(_) end + # The score for the category 'illicit'. sig { returns(Float) } def illicit end @@ -595,6 +597,7 @@ module OpenAI def illicit=(_) end + # The score for the category 'illicit/violent'. sig { returns(Float) } def illicit_violent end @@ -603,6 +606,7 @@ module OpenAI def illicit_violent=(_) end + # The score for the category 'self-harm'. sig { returns(Float) } def self_harm end @@ -611,6 +615,7 @@ module OpenAI def self_harm=(_) end + # The score for the category 'self-harm/instructions'. sig { returns(Float) } def self_harm_instructions end @@ -619,6 +624,7 @@ module OpenAI def self_harm_instructions=(_) end + # The score for the category 'self-harm/intent'. sig { returns(Float) } def self_harm_intent end @@ -627,6 +633,7 @@ module OpenAI def self_harm_intent=(_) end + # The score for the category 'sexual'. sig { returns(Float) } def sexual end @@ -635,6 +642,7 @@ module OpenAI def sexual=(_) end + # The score for the category 'sexual/minors'. sig { returns(Float) } def sexual_minors end @@ -643,6 +651,7 @@ module OpenAI def sexual_minors=(_) end + # The score for the category 'violence'. sig { returns(Float) } def violence end @@ -651,6 +660,7 @@ module OpenAI def violence=(_) end + # The score for the category 'violence/graphic'. sig { returns(Float) } def violence_graphic end @@ -659,6 +669,7 @@ module OpenAI def violence_graphic=(_) end + # A list of the categories along with their scores as predicted by model. sig do params( harassment: Float, diff --git a/rbi/lib/openai/models/moderation_create_params.rbi b/rbi/lib/openai/models/moderation_create_params.rbi index 998863d3..e00a5df9 100644 --- a/rbi/lib/openai/models/moderation_create_params.rbi +++ b/rbi/lib/openai/models/moderation_create_params.rbi @@ -6,6 +6,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Input (or inputs) to classify. Can be a single string, an array of strings, or + # an array of multi-modal input objects similar to other models. sig do returns( T.any( @@ -37,6 +39,10 @@ module OpenAI def input=(_) end + # The content moderation model you would like to use. Learn more in + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). sig { returns(T.nilable(T.any(String, Symbol))) } def model end @@ -77,33 +83,34 @@ module OpenAI def to_hash end + # Input (or inputs) to classify. Can be a single string, an array of strings, or + # an array of multi-modal input objects similar to other models. class Input < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + String, + T::Array[String], + T::Array[T.any(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)] + ) + } + end + StringArray = T.type_alias { T::Array[String] } ModerationMultiModalInputArray = T.type_alias { T::Array[T.any(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)] } - - class << self - sig do - override - .returns( - [[NilClass, String], [NilClass, T::Array[String]], [NilClass, T::Array[T.any(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)]]] - ) - end - private def variants - end - end end + # The content moderation model you would like to use. Learn more in + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). class Model < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end end end diff --git a/rbi/lib/openai/models/moderation_create_response.rbi b/rbi/lib/openai/models/moderation_create_response.rbi index 46b29878..bf831d82 100644 --- a/rbi/lib/openai/models/moderation_create_response.rbi +++ b/rbi/lib/openai/models/moderation_create_response.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class ModerationCreateResponse < OpenAI::BaseModel + # The unique identifier for the moderation request. sig { returns(String) } def id end @@ -11,6 +12,7 @@ module OpenAI def id=(_) end + # The model used to generate the moderation results. sig { returns(String) } def model end @@ -19,6 +21,7 @@ module OpenAI def model=(_) end + # A list of moderation objects. sig { returns(T::Array[OpenAI::Models::Moderation]) } def results end @@ -27,6 +30,7 @@ module OpenAI def results=(_) end + # Represents if a given text input is potentially harmful. sig { params(id: String, model: String, results: T::Array[OpenAI::Models::Moderation]).returns(T.attached_class) } def self.new(id:, model:, results:) end diff --git a/rbi/lib/openai/models/moderation_image_url_input.rbi b/rbi/lib/openai/models/moderation_image_url_input.rbi index a7ecaefe..222a1447 100644 --- a/rbi/lib/openai/models/moderation_image_url_input.rbi +++ b/rbi/lib/openai/models/moderation_image_url_input.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class ModerationImageURLInput < OpenAI::BaseModel + # Contains either an image URL or a data URL for a base64 encoded image. sig { returns(OpenAI::Models::ModerationImageURLInput::ImageURL) } def image_url end @@ -14,6 +15,7 @@ module OpenAI def image_url=(_) end + # Always `image_url`. sig { returns(Symbol) } def type end @@ -22,6 +24,7 @@ module OpenAI def type=(_) end + # An object describing an image to classify. sig do params(image_url: OpenAI::Models::ModerationImageURLInput::ImageURL, type: Symbol) .returns(T.attached_class) @@ -34,6 +37,7 @@ module OpenAI end class ImageURL < OpenAI::BaseModel + # Either a URL of the image or the base64 encoded image data. sig { returns(String) } def url end @@ -42,6 +46,7 @@ module OpenAI def url=(_) end + # Contains either an image URL or a data URL for a base64 encoded image. sig { params(url: String).returns(T.attached_class) } def self.new(url:) end diff --git a/rbi/lib/openai/models/moderation_model.rbi b/rbi/lib/openai/models/moderation_model.rbi index e3a00bac..a34a1f36 100644 --- a/rbi/lib/openai/models/moderation_model.rbi +++ b/rbi/lib/openai/models/moderation_model.rbi @@ -5,16 +5,12 @@ module OpenAI class ModerationModel < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + OMNI_MODERATION_LATEST = :"omni-moderation-latest" OMNI_MODERATION_2024_09_26 = :"omni-moderation-2024-09-26" TEXT_MODERATION_LATEST = :"text-moderation-latest" TEXT_MODERATION_STABLE = :"text-moderation-stable" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/moderation_multi_modal_input.rbi b/rbi/lib/openai/models/moderation_multi_modal_input.rbi index 2d658e57..a24328fc 100644 --- a/rbi/lib/openai/models/moderation_multi_modal_input.rbi +++ b/rbi/lib/openai/models/moderation_multi_modal_input.rbi @@ -2,19 +2,11 @@ module OpenAI module Models + # An object describing an image to classify. class ModerationMultiModalInput < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::ModerationImageURLInput], [Symbol, OpenAI::Models::ModerationTextInput]] - ) - end - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)} } end end end diff --git a/rbi/lib/openai/models/moderation_text_input.rbi b/rbi/lib/openai/models/moderation_text_input.rbi index 85c34f7f..41888533 100644 --- a/rbi/lib/openai/models/moderation_text_input.rbi +++ b/rbi/lib/openai/models/moderation_text_input.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class ModerationTextInput < OpenAI::BaseModel + # A string of text to classify. sig { returns(String) } def text end @@ -11,6 +12,7 @@ module OpenAI def text=(_) end + # Always `text`. sig { returns(Symbol) } def type end @@ -19,6 +21,7 @@ module OpenAI def type=(_) end + # An object describing text to classify. sig { params(text: String, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :text) end diff --git a/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi b/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi index c4d89d20..db3ddb71 100644 --- a/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi +++ b/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class OtherFileChunkingStrategyObject < OpenAI::BaseModel + # Always `other`. sig { returns(Symbol) } def type end @@ -11,6 +12,9 @@ module OpenAI def type=(_) end + # This is returned when the chunking strategy is unknown. Typically, this is + # because the file was indexed before the `chunking_strategy` concept was + # introduced in the API. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :other) end diff --git a/rbi/lib/openai/models/reasoning.rbi b/rbi/lib/openai/models/reasoning.rbi index de57d2db..e6f69e82 100644 --- a/rbi/lib/openai/models/reasoning.rbi +++ b/rbi/lib/openai/models/reasoning.rbi @@ -3,6 +3,12 @@ module OpenAI module Models class Reasoning < OpenAI::BaseModel + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(Symbol)) } def effort end @@ -11,6 +17,11 @@ module OpenAI def effort=(_) end + # **o-series models only** + # + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `concise` or + # `detailed`. sig { returns(T.nilable(Symbol)) } def generate_summary end @@ -19,6 +30,10 @@ module OpenAI def generate_summary=(_) end + # **o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). sig { params(effort: T.nilable(Symbol), generate_summary: T.nilable(Symbol)).returns(T.attached_class) } def self.new(effort:, generate_summary: nil) end @@ -27,17 +42,18 @@ module OpenAI def to_hash end + # **o-series models only** + # + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `concise` or + # `detailed`. class GenerateSummary < OpenAI::Enum abstract! - CONCISE = T.let(:concise, T.nilable(Symbol)) - DETAILED = T.let(:detailed, T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + CONCISE = :concise + DETAILED = :detailed end end end diff --git a/rbi/lib/openai/models/reasoning_effort.rbi b/rbi/lib/openai/models/reasoning_effort.rbi index b4182a8d..dcca18c9 100644 --- a/rbi/lib/openai/models/reasoning_effort.rbi +++ b/rbi/lib/openai/models/reasoning_effort.rbi @@ -2,18 +2,20 @@ module OpenAI module Models + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. class ReasoningEffort < OpenAI::Enum abstract! - LOW = T.let(:low, T.nilable(Symbol)) - MEDIUM = T.let(:medium, T.nilable(Symbol)) - HIGH = T.let(:high, T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + LOW = :low + MEDIUM = :medium + HIGH = :high end end end diff --git a/rbi/lib/openai/models/response_format_json_object.rbi b/rbi/lib/openai/models/response_format_json_object.rbi index ffd5658c..044c6ff6 100644 --- a/rbi/lib/openai/models/response_format_json_object.rbi +++ b/rbi/lib/openai/models/response_format_json_object.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class ResponseFormatJSONObject < OpenAI::BaseModel + # The type of response format being defined. Always `json_object`. sig { returns(Symbol) } def type end @@ -11,6 +12,9 @@ module OpenAI def type=(_) end + # JSON object response format. An older method of generating JSON responses. Using + # `json_schema` is recommended for models that support it. Note that the model + # will not generate JSON without a system or user message instructing it to do so. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :json_object) end diff --git a/rbi/lib/openai/models/response_format_json_schema.rbi b/rbi/lib/openai/models/response_format_json_schema.rbi index b622a63b..de32d2a7 100644 --- a/rbi/lib/openai/models/response_format_json_schema.rbi +++ b/rbi/lib/openai/models/response_format_json_schema.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class ResponseFormatJSONSchema < OpenAI::BaseModel + # Structured Outputs configuration options, including a JSON Schema. sig { returns(OpenAI::Models::ResponseFormatJSONSchema::JSONSchema) } def json_schema end @@ -14,6 +15,7 @@ module OpenAI def json_schema=(_) end + # The type of response format being defined. Always `json_schema`. sig { returns(Symbol) } def type end @@ -22,6 +24,9 @@ module OpenAI def type=(_) end + # JSON Schema response format. Used to generate structured JSON responses. Learn + # more about + # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). sig do params(json_schema: OpenAI::Models::ResponseFormatJSONSchema::JSONSchema, type: Symbol) .returns(T.attached_class) @@ -34,6 +39,8 @@ module OpenAI end class JSONSchema < OpenAI::BaseModel + # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + # and dashes, with a maximum length of 64. sig { returns(String) } def name end @@ -42,6 +49,8 @@ module OpenAI def name=(_) end + # A description of what the response format is for, used by the model to determine + # how to respond in the format. sig { returns(T.nilable(String)) } def description end @@ -50,6 +59,8 @@ module OpenAI def description=(_) end + # The schema for the response format, described as a JSON Schema object. Learn how + # to build JSON schemas [here](https://json-schema.org/). sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } def schema end @@ -58,6 +69,11 @@ module OpenAI def schema=(_) end + # Whether to enable strict schema adherence when generating the output. If set to + # true, the model will always follow the exact schema defined in the `schema` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. To + # learn more, read the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). sig { returns(T.nilable(T::Boolean)) } def strict end @@ -66,6 +82,7 @@ module OpenAI def strict=(_) end + # Structured Outputs configuration options, including a JSON Schema. sig do params( name: String, diff --git a/rbi/lib/openai/models/response_format_text.rbi b/rbi/lib/openai/models/response_format_text.rbi index 6f3c8970..2894efdf 100644 --- a/rbi/lib/openai/models/response_format_text.rbi +++ b/rbi/lib/openai/models/response_format_text.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class ResponseFormatText < OpenAI::BaseModel + # The type of response format being defined. Always `text`. sig { returns(Symbol) } def type end @@ -11,6 +12,7 @@ module OpenAI def type=(_) end + # Default response format. Used to generate text responses. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :text) end diff --git a/rbi/lib/openai/models/responses/computer_tool.rbi b/rbi/lib/openai/models/responses/computer_tool.rbi index b6ba2c12..40b9918f 100644 --- a/rbi/lib/openai/models/responses/computer_tool.rbi +++ b/rbi/lib/openai/models/responses/computer_tool.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ComputerTool < OpenAI::BaseModel + # The height of the computer display. sig { returns(Float) } def display_height end @@ -12,6 +13,7 @@ module OpenAI def display_height=(_) end + # The width of the computer display. sig { returns(Float) } def display_width end @@ -20,6 +22,7 @@ module OpenAI def display_width=(_) end + # The type of computer environment to control. sig { returns(Symbol) } def environment end @@ -28,6 +31,7 @@ module OpenAI def environment=(_) end + # The type of the computer use tool. Always `computer_use_preview`. sig { returns(Symbol) } def type end @@ -36,6 +40,8 @@ module OpenAI def type=(_) end + # A tool that controls a virtual computer. Learn more about the + # [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). sig do params(display_height: Float, display_width: Float, environment: Symbol, type: Symbol) .returns(T.attached_class) @@ -49,19 +55,16 @@ module OpenAI def to_hash end + # The type of computer environment to control. class Environment < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + MAC = :mac WINDOWS = :windows UBUNTU = :ubuntu BROWSER = :browser - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/easy_input_message.rbi b/rbi/lib/openai/models/responses/easy_input_message.rbi index 428515ec..990b3527 100644 --- a/rbi/lib/openai/models/responses/easy_input_message.rbi +++ b/rbi/lib/openai/models/responses/easy_input_message.rbi @@ -4,6 +4,8 @@ module OpenAI module Models module Responses class EasyInputMessage < OpenAI::BaseModel + # Text, image, or audio input to the model, used to generate a response. Can also + # contain previous assistant responses. sig { returns(T.any(String, OpenAI::Models::Responses::ResponseInputMessageContentList)) } def content end @@ -15,6 +17,8 @@ module OpenAI def content=(_) end + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. sig { returns(Symbol) } def role end @@ -23,6 +27,7 @@ module OpenAI def role=(_) end + # The type of the message input. Always `message`. sig { returns(T.nilable(Symbol)) } def type end @@ -31,6 +36,11 @@ module OpenAI def type=(_) end + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. sig do params( content: T.any(String, OpenAI::Models::Responses::ResponseInputMessageContentList), @@ -51,44 +61,34 @@ module OpenAI def to_hash end + # Text, image, or audio input to the model, used to generate a response. Can also + # contain previous assistant responses. class Content < OpenAI::Union abstract! - class << self - sig do - override - .returns([[NilClass, String], [NilClass, OpenAI::Models::Responses::ResponseInputMessageContentList]]) - end - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, OpenAI::Models::Responses::ResponseInputMessageContentList)} } end + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. class Role < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + USER = :user ASSISTANT = :assistant SYSTEM = :system DEVELOPER = :developer - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # The type of the message input. Always `message`. class Type < OpenAI::Enum abstract! - MESSAGE = :message + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + MESSAGE = :message end end end diff --git a/rbi/lib/openai/models/responses/file_search_tool.rbi b/rbi/lib/openai/models/responses/file_search_tool.rbi index 204c6f9d..3a6bd9db 100644 --- a/rbi/lib/openai/models/responses/file_search_tool.rbi +++ b/rbi/lib/openai/models/responses/file_search_tool.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class FileSearchTool < OpenAI::BaseModel + # The type of the file search tool. Always `file_search`. sig { returns(Symbol) } def type end @@ -12,6 +13,7 @@ module OpenAI def type=(_) end + # The IDs of the vector stores to search. sig { returns(T::Array[String]) } def vector_store_ids end @@ -20,6 +22,7 @@ module OpenAI def vector_store_ids=(_) end + # A filter to apply based on file attributes. sig { returns(T.nilable(T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter))) } def filters end @@ -31,6 +34,8 @@ module OpenAI def filters=(_) end + # The maximum number of results to return. This number should be between 1 and 50 + # inclusive. sig { returns(T.nilable(Integer)) } def max_num_results end @@ -39,6 +44,7 @@ module OpenAI def max_num_results=(_) end + # Ranking options for search. sig { returns(T.nilable(OpenAI::Models::Responses::FileSearchTool::RankingOptions)) } def ranking_options end @@ -50,6 +56,9 @@ module OpenAI def ranking_options=(_) end + # A tool that searches for relevant content from uploaded files. Learn more about + # the + # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). sig do params( vector_store_ids: T::Array[String], @@ -78,20 +87,15 @@ module OpenAI def to_hash end + # A filter to apply based on file attributes. class Filters < OpenAI::Union abstract! - class << self - sig do - override - .returns([[NilClass, OpenAI::Models::ComparisonFilter], [NilClass, OpenAI::Models::CompoundFilter]]) - end - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter)} } end class RankingOptions < OpenAI::BaseModel + # The ranker to use for the file search. sig { returns(T.nilable(Symbol)) } def ranker end @@ -100,6 +104,9 @@ module OpenAI def ranker=(_) end + # The score threshold for the file search, a number between 0 and 1. Numbers + # closer to 1 will attempt to return only the most relevant results, but may + # return fewer results. sig { returns(T.nilable(Float)) } def score_threshold end @@ -108,6 +115,7 @@ module OpenAI def score_threshold=(_) end + # Ranking options for search. sig { params(ranker: Symbol, score_threshold: Float).returns(T.attached_class) } def self.new(ranker: nil, score_threshold: nil) end @@ -116,17 +124,14 @@ module OpenAI def to_hash end + # The ranker to use for the file search. class Ranker < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + AUTO = :auto DEFAULT_2024_11_15 = :"default-2024-11-15" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/function_tool.rbi b/rbi/lib/openai/models/responses/function_tool.rbi index b1e8d293..8513be94 100644 --- a/rbi/lib/openai/models/responses/function_tool.rbi +++ b/rbi/lib/openai/models/responses/function_tool.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class FunctionTool < OpenAI::BaseModel + # The name of the function to call. sig { returns(String) } def name end @@ -12,6 +13,7 @@ module OpenAI def name=(_) end + # A JSON schema object describing the parameters of the function. sig { returns(T::Hash[Symbol, T.anything]) } def parameters end @@ -20,6 +22,7 @@ module OpenAI def parameters=(_) end + # Whether to enforce strict parameter validation. Default `true`. sig { returns(T::Boolean) } def strict end @@ -28,6 +31,7 @@ module OpenAI def strict=(_) end + # The type of the function tool. Always `function`. sig { returns(Symbol) } def type end @@ -36,6 +40,8 @@ module OpenAI def type=(_) end + # A description of the function. Used by the model to determine whether or not to + # call the function. sig { returns(T.nilable(String)) } def description end @@ -44,6 +50,9 @@ module OpenAI def description=(_) end + # Defines a function in your own code the model can choose to call. Learn more + # about + # [function calling](https://platform.openai.com/docs/guides/function-calling). sig do params( name: String, diff --git a/rbi/lib/openai/models/responses/input_item_list_params.rbi b/rbi/lib/openai/models/responses/input_item_list_params.rbi index 8f16ac93..1162dc6a 100644 --- a/rbi/lib/openai/models/responses/input_item_list_params.rbi +++ b/rbi/lib/openai/models/responses/input_item_list_params.rbi @@ -7,6 +7,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # An item ID to list items after, used in pagination. sig { returns(T.nilable(String)) } def after end @@ -15,6 +16,7 @@ module OpenAI def after=(_) end + # An item ID to list items before, used in pagination. sig { returns(T.nilable(String)) } def before end @@ -23,6 +25,8 @@ module OpenAI def before=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -31,6 +35,10 @@ module OpenAI def limit=(_) end + # The order to return the input items in. Default is `asc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -67,17 +75,17 @@ module OpenAI def to_hash end + # The order to return the input items in. Default is `asc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. class Order < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ASC = :asc DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/response.rbi b/rbi/lib/openai/models/responses/response.rbi index c4764143..b7062e57 100644 --- a/rbi/lib/openai/models/responses/response.rbi +++ b/rbi/lib/openai/models/responses/response.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class Response < OpenAI::BaseModel + # Unique identifier for this Response. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # Unix timestamp (in seconds) of when this Response was created. sig { returns(Float) } def created_at end @@ -20,6 +22,7 @@ module OpenAI def created_at=(_) end + # An error object returned when the model fails to generate a Response. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseError)) } def error end @@ -31,6 +34,7 @@ module OpenAI def error=(_) end + # Details about why the response is incomplete. sig { returns(T.nilable(OpenAI::Models::Responses::Response::IncompleteDetails)) } def incomplete_details end @@ -42,6 +46,12 @@ module OpenAI def incomplete_details=(_) end + # Inserts a system (or developer) message as the first item in the model's + # context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will be not be carried over to the next response. This makes it simple + # to swap out system (or developer) messages in new responses. sig { returns(T.nilable(String)) } def instructions end @@ -50,6 +60,12 @@ module OpenAI def instructions=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -58,6 +74,11 @@ module OpenAI def metadata=(_) end + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. sig { returns(T.any(String, Symbol)) } def model end @@ -66,6 +87,7 @@ module OpenAI def model=(_) end + # The object type of this resource - always set to `response`. sig { returns(Symbol) } def object end @@ -74,6 +96,13 @@ module OpenAI def object=(_) end + # An array of content items generated by the model. + # + # - The length and order of items in the `output` array is dependent on the + # model's response. + # - Rather than accessing the first item in the `output` array and assuming it's + # an `assistant` message with the content generated by the model, you might + # consider using the `output_text` property where supported in SDKs. sig do returns( T::Array[ @@ -120,6 +149,7 @@ module OpenAI def output=(_) end + # Whether to allow the model to run tool calls in parallel. sig { returns(T::Boolean) } def parallel_tool_calls end @@ -128,6 +158,10 @@ module OpenAI def parallel_tool_calls=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. sig { returns(T.nilable(Float)) } def temperature end @@ -136,6 +170,9 @@ module OpenAI def temperature=(_) end + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. sig do returns( T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction) @@ -155,6 +192,20 @@ module OpenAI def tool_choice=(_) end + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). sig do returns( T::Array[ @@ -195,6 +246,11 @@ module OpenAI def tools=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -203,6 +259,9 @@ module OpenAI def top_p=(_) end + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(Integer)) } def max_output_tokens end @@ -211,6 +270,9 @@ module OpenAI def max_output_tokens=(_) end + # The unique ID of the previous response to the model. Use this to create + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). sig { returns(T.nilable(String)) } def previous_response_id end @@ -219,6 +281,10 @@ module OpenAI def previous_response_id=(_) end + # **o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(OpenAI::Models::Reasoning)) } def reasoning end @@ -227,6 +293,8 @@ module OpenAI def reasoning=(_) end + # The status of the response generation. One of `completed`, `failed`, + # `in_progress`, or `incomplete`. sig { returns(T.nilable(Symbol)) } def status end @@ -235,6 +303,11 @@ module OpenAI def status=(_) end + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) sig { returns(T.nilable(OpenAI::Models::Responses::ResponseTextConfig)) } def text end @@ -246,6 +319,13 @@ module OpenAI def text=(_) end + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. sig { returns(T.nilable(Symbol)) } def truncation end @@ -254,6 +334,8 @@ module OpenAI def truncation=(_) end + # Represents token usage details including input tokens, output tokens, a + # breakdown of output tokens, and the total tokens used. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseUsage)) } def usage end @@ -262,6 +344,9 @@ module OpenAI def usage=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -388,6 +473,7 @@ module OpenAI end class IncompleteDetails < OpenAI::BaseModel + # The reason why the response is incomplete. sig { returns(T.nilable(Symbol)) } def reason end @@ -396,6 +482,7 @@ module OpenAI def reason=(_) end + # Details about why the response is incomplete. sig { params(reason: Symbol).returns(T.attached_class) } def self.new(reason: nil) end @@ -404,56 +491,55 @@ module OpenAI def to_hash end + # The reason why the response is incomplete. class Reason < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + MAX_OUTPUT_TOKENS = :max_output_tokens CONTENT_FILTER = :content_filter - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. class Model < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. class ToolChoice < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[NilClass, Symbol], [NilClass, OpenAI::Models::Responses::ToolChoiceTypes], [NilClass, OpenAI::Models::Responses::ToolChoiceFunction]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction) + } end end + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. class Truncation < OpenAI::Enum abstract! - AUTO = T.let(:auto, T.nilable(Symbol)) - DISABLED = T.let(:disabled, T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + AUTO = :auto + DISABLED = :disabled end end end diff --git a/rbi/lib/openai/models/responses/response_audio_delta_event.rbi b/rbi/lib/openai/models/responses/response_audio_delta_event.rbi index cecdd81e..54ec9c86 100644 --- a/rbi/lib/openai/models/responses/response_audio_delta_event.rbi +++ b/rbi/lib/openai/models/responses/response_audio_delta_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseAudioDeltaEvent < OpenAI::BaseModel + # A chunk of Base64 encoded response audio bytes. sig { returns(String) } def delta end @@ -12,6 +13,7 @@ module OpenAI def delta=(_) end + # The type of the event. Always `response.audio.delta`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # Emitted when there is a partial audio response. sig { params(delta: String, type: Symbol).returns(T.attached_class) } def self.new(delta:, type: :"response.audio.delta") end diff --git a/rbi/lib/openai/models/responses/response_audio_done_event.rbi b/rbi/lib/openai/models/responses/response_audio_done_event.rbi index c67012a1..d60d8ffe 100644 --- a/rbi/lib/openai/models/responses/response_audio_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_audio_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseAudioDoneEvent < OpenAI::BaseModel + # The type of the event. Always `response.audio.done`. sig { returns(Symbol) } def type end @@ -12,6 +13,7 @@ module OpenAI def type=(_) end + # Emitted when the audio response is complete. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :"response.audio.done") end diff --git a/rbi/lib/openai/models/responses/response_audio_transcript_delta_event.rbi b/rbi/lib/openai/models/responses/response_audio_transcript_delta_event.rbi index fe5f4c18..072b6541 100644 --- a/rbi/lib/openai/models/responses/response_audio_transcript_delta_event.rbi +++ b/rbi/lib/openai/models/responses/response_audio_transcript_delta_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseAudioTranscriptDeltaEvent < OpenAI::BaseModel + # The partial transcript of the audio response. sig { returns(String) } def delta end @@ -12,6 +13,7 @@ module OpenAI def delta=(_) end + # The type of the event. Always `response.audio.transcript.delta`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # Emitted when there is a partial transcript of audio. sig { params(delta: String, type: Symbol).returns(T.attached_class) } def self.new(delta:, type: :"response.audio.transcript.delta") end diff --git a/rbi/lib/openai/models/responses/response_audio_transcript_done_event.rbi b/rbi/lib/openai/models/responses/response_audio_transcript_done_event.rbi index 97204636..940f3497 100644 --- a/rbi/lib/openai/models/responses/response_audio_transcript_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_audio_transcript_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseAudioTranscriptDoneEvent < OpenAI::BaseModel + # The type of the event. Always `response.audio.transcript.done`. sig { returns(Symbol) } def type end @@ -12,6 +13,7 @@ module OpenAI def type=(_) end + # Emitted when the full audio transcript is completed. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :"response.audio.transcript.done") end diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi index 15a3e9c4..21d55044 100644 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +++ b/rbi/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::BaseModel + # The partial code snippet added by the code interpreter. sig { returns(String) } def delta end @@ -12,6 +13,7 @@ module OpenAI def delta=(_) end + # The index of the output item that the code interpreter call is in progress. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.code_interpreter_call.code.delta`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when a partial code snippet is added by the code interpreter. sig { params(delta: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(delta:, output_index:, type: :"response.code_interpreter_call.code.delta") end diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rbi index 25f31749..294664ab 100644 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::BaseModel + # The final code snippet output by the code interpreter. sig { returns(String) } def code end @@ -12,6 +13,7 @@ module OpenAI def code=(_) end + # The index of the output item that the code interpreter call is in progress. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.code_interpreter_call.code.done`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when code snippet output is finalized by the code interpreter. sig { params(code: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(code:, output_index:, type: :"response.code_interpreter_call.code.done") end diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_completed_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_completed_event.rbi index 93ae27fe..389d9f49 100644 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_completed_event.rbi +++ b/rbi/lib/openai/models/responses/response_code_interpreter_call_completed_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCodeInterpreterCallCompletedEvent < OpenAI::BaseModel + # A tool call to run code. sig { returns(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) } def code_interpreter_call end @@ -15,6 +16,7 @@ module OpenAI def code_interpreter_call=(_) end + # The index of the output item that the code interpreter call is in progress. sig { returns(Integer) } def output_index end @@ -23,6 +25,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.code_interpreter_call.completed`. sig { returns(Symbol) } def type end @@ -31,6 +34,7 @@ module OpenAI def type=(_) end + # Emitted when the code interpreter call is completed. sig do params( code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi index 815750a0..9d0d0524 100644 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +++ b/rbi/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCodeInterpreterCallInProgressEvent < OpenAI::BaseModel + # A tool call to run code. sig { returns(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) } def code_interpreter_call end @@ -15,6 +16,7 @@ module OpenAI def code_interpreter_call=(_) end + # The index of the output item that the code interpreter call is in progress. sig { returns(Integer) } def output_index end @@ -23,6 +25,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.code_interpreter_call.in_progress`. sig { returns(Symbol) } def type end @@ -31,6 +34,7 @@ module OpenAI def type=(_) end + # Emitted when a code interpreter call is in progress. sig do params( code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi index fa22f0e6..4757018f 100644 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +++ b/rbi/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCodeInterpreterCallInterpretingEvent < OpenAI::BaseModel + # A tool call to run code. sig { returns(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) } def code_interpreter_call end @@ -15,6 +16,7 @@ module OpenAI def code_interpreter_call=(_) end + # The index of the output item that the code interpreter call is in progress. sig { returns(Integer) } def output_index end @@ -23,6 +25,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.code_interpreter_call.interpreting`. sig { returns(Symbol) } def type end @@ -31,6 +34,7 @@ module OpenAI def type=(_) end + # Emitted when the code interpreter is actively interpreting the code snippet. sig do params( code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_tool_call.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_tool_call.rbi index d1a61cfa..b68befef 100644 --- a/rbi/lib/openai/models/responses/response_code_interpreter_tool_call.rbi +++ b/rbi/lib/openai/models/responses/response_code_interpreter_tool_call.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCodeInterpreterToolCall < OpenAI::BaseModel + # The unique ID of the code interpreter tool call. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # The code to run. sig { returns(String) } def code end @@ -20,6 +22,7 @@ module OpenAI def code=(_) end + # The results of the code interpreter tool call. sig do returns( T::Array[ @@ -54,6 +57,7 @@ module OpenAI def results=(_) end + # The status of the code interpreter tool call. sig { returns(Symbol) } def status end @@ -62,6 +66,7 @@ module OpenAI def status=(_) end + # The type of the code interpreter tool call. Always `code_interpreter_call`. sig { returns(Symbol) } def type end @@ -70,6 +75,7 @@ module OpenAI def type=(_) end + # A tool call to run code. sig do params( id: String, @@ -108,10 +114,21 @@ module OpenAI def to_hash end + # The output of a code interpreter tool call that is text. class Result < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Logs, + OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files + ) + } + end + class Logs < OpenAI::BaseModel + # The logs of the code interpreter tool call. sig { returns(String) } def logs end @@ -120,6 +137,7 @@ module OpenAI def logs=(_) end + # The type of the code interpreter text output. Always `logs`. sig { returns(Symbol) } def type end @@ -128,6 +146,7 @@ module OpenAI def type=(_) end + # The output of a code interpreter tool call that is text. sig { params(logs: String, type: Symbol).returns(T.attached_class) } def self.new(logs:, type: :logs) end @@ -149,6 +168,7 @@ module OpenAI def files=(_) end + # The type of the code interpreter file output. Always `files`. sig { returns(Symbol) } def type end @@ -157,6 +177,7 @@ module OpenAI def type=(_) end + # The output of a code interpreter tool call that is a file. sig do params( files: T::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files::File], @@ -180,6 +201,7 @@ module OpenAI end class File < OpenAI::BaseModel + # The ID of the file. sig { returns(String) } def file_id end @@ -188,6 +210,7 @@ module OpenAI def file_id=(_) end + # The MIME type of the file. sig { returns(String) } def mime_type end @@ -205,31 +228,17 @@ module OpenAI end end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Logs], [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files]] - ) - end - private def variants - end - end end + # The status of the code interpreter tool call. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress INTERPRETING = :interpreting COMPLETED = :completed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/response_completed_event.rbi b/rbi/lib/openai/models/responses/response_completed_event.rbi index 6ae602db..7db04649 100644 --- a/rbi/lib/openai/models/responses/response_completed_event.rbi +++ b/rbi/lib/openai/models/responses/response_completed_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCompletedEvent < OpenAI::BaseModel + # Properties of the completed response. sig { returns(OpenAI::Models::Responses::Response) } def response end @@ -12,6 +13,7 @@ module OpenAI def response=(_) end + # The type of the event. Always `response.completed`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # Emitted when the model response is complete. sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } def self.new(response:, type: :"response.completed") end diff --git a/rbi/lib/openai/models/responses/response_computer_tool_call.rbi b/rbi/lib/openai/models/responses/response_computer_tool_call.rbi index 9360dc6c..bc8782dc 100644 --- a/rbi/lib/openai/models/responses/response_computer_tool_call.rbi +++ b/rbi/lib/openai/models/responses/response_computer_tool_call.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseComputerToolCall < OpenAI::BaseModel + # The unique ID of the computer call. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # A click action. sig do returns( T.any( @@ -61,6 +63,7 @@ module OpenAI def action=(_) end + # An identifier used when responding to the tool call with output. sig { returns(String) } def call_id end @@ -69,6 +72,7 @@ module OpenAI def call_id=(_) end + # The pending safety checks for the computer call. sig { returns(T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck]) } def pending_safety_checks end @@ -80,6 +84,8 @@ module OpenAI def pending_safety_checks=(_) end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. sig { returns(Symbol) } def status end @@ -88,6 +94,7 @@ module OpenAI def status=(_) end + # The type of the computer call. Always `computer_call`. sig { returns(Symbol) } def type end @@ -96,6 +103,9 @@ module OpenAI def type=(_) end + # A tool call to a computer use tool. See the + # [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) + # for more information. sig do params( id: String, @@ -146,10 +156,29 @@ module OpenAI def to_hash end + # A click action. class Action < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, + OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, + OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, + OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, + OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, + OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, + OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, + OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, + OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait + ) + } + end + class Click < OpenAI::BaseModel + # Indicates which mouse button was pressed during the click. One of `left`, + # `right`, `wheel`, `back`, or `forward`. sig { returns(Symbol) } def button end @@ -158,6 +187,8 @@ module OpenAI def button=(_) end + # Specifies the event type. For a click action, this property is always set to + # `click`. sig { returns(Symbol) } def type end @@ -166,6 +197,7 @@ module OpenAI def type=(_) end + # The x-coordinate where the click occurred. sig { returns(Integer) } def x end @@ -174,6 +206,7 @@ module OpenAI def x=(_) end + # The y-coordinate where the click occurred. sig { returns(Integer) } def y_ end @@ -182,6 +215,7 @@ module OpenAI def y_=(_) end + # A click action. sig { params(button: Symbol, x: Integer, y_: Integer, type: Symbol).returns(T.attached_class) } def self.new(button:, x:, y_:, type: :click) end @@ -190,24 +224,24 @@ module OpenAI def to_hash end + # Indicates which mouse button was pressed during the click. One of `left`, + # `right`, `wheel`, `back`, or `forward`. class Button < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + LEFT = :left RIGHT = :right WHEEL = :wheel BACK = :back FORWARD = :forward - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end class DoubleClick < OpenAI::BaseModel + # Specifies the event type. For a double click action, this property is always set + # to `double_click`. sig { returns(Symbol) } def type end @@ -216,6 +250,7 @@ module OpenAI def type=(_) end + # The x-coordinate where the double click occurred. sig { returns(Integer) } def x end @@ -224,6 +259,7 @@ module OpenAI def x=(_) end + # The y-coordinate where the double click occurred. sig { returns(Integer) } def y_ end @@ -232,6 +268,7 @@ module OpenAI def y_=(_) end + # A double click action. sig { params(x: Integer, y_: Integer, type: Symbol).returns(T.attached_class) } def self.new(x:, y_:, type: :double_click) end @@ -242,6 +279,15 @@ module OpenAI end class Drag < OpenAI::BaseModel + # An array of coordinates representing the path of the drag action. Coordinates + # will appear as an array of objects, eg + # + # ``` + # [ + # { x: 100, y: 200 }, + # { x: 200, y: 300 } + # ] + # ``` sig { returns(T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path]) } def path end @@ -253,6 +299,8 @@ module OpenAI def path=(_) end + # Specifies the event type. For a drag action, this property is always set to + # `drag`. sig { returns(Symbol) } def type end @@ -261,6 +309,7 @@ module OpenAI def type=(_) end + # A drag action. sig do params( path: T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path], @@ -281,6 +330,7 @@ module OpenAI end class Path < OpenAI::BaseModel + # The x-coordinate. sig { returns(Integer) } def x end @@ -289,6 +339,7 @@ module OpenAI def x=(_) end + # The y-coordinate. sig { returns(Integer) } def y_ end @@ -297,6 +348,7 @@ module OpenAI def y_=(_) end + # A series of x/y coordinate pairs in the drag path. sig { params(x: Integer, y_: Integer).returns(T.attached_class) } def self.new(x:, y_:) end @@ -308,6 +360,8 @@ module OpenAI end class Keypress < OpenAI::BaseModel + # The combination of keys the model is requesting to be pressed. This is an array + # of strings, each representing a key. sig { returns(T::Array[String]) } def keys end @@ -316,6 +370,8 @@ module OpenAI def keys=(_) end + # Specifies the event type. For a keypress action, this property is always set to + # `keypress`. sig { returns(Symbol) } def type end @@ -324,6 +380,7 @@ module OpenAI def type=(_) end + # A collection of keypresses the model would like to perform. sig { params(keys: T::Array[String], type: Symbol).returns(T.attached_class) } def self.new(keys:, type: :keypress) end @@ -334,6 +391,8 @@ module OpenAI end class Move < OpenAI::BaseModel + # Specifies the event type. For a move action, this property is always set to + # `move`. sig { returns(Symbol) } def type end @@ -342,6 +401,7 @@ module OpenAI def type=(_) end + # The x-coordinate to move to. sig { returns(Integer) } def x end @@ -350,6 +410,7 @@ module OpenAI def x=(_) end + # The y-coordinate to move to. sig { returns(Integer) } def y_ end @@ -358,6 +419,7 @@ module OpenAI def y_=(_) end + # A mouse move action. sig { params(x: Integer, y_: Integer, type: Symbol).returns(T.attached_class) } def self.new(x:, y_:, type: :move) end @@ -368,6 +430,8 @@ module OpenAI end class Screenshot < OpenAI::BaseModel + # Specifies the event type. For a screenshot action, this property is always set + # to `screenshot`. sig { returns(Symbol) } def type end @@ -376,6 +440,7 @@ module OpenAI def type=(_) end + # A screenshot action. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :screenshot) end @@ -386,6 +451,7 @@ module OpenAI end class Scroll < OpenAI::BaseModel + # The horizontal scroll distance. sig { returns(Integer) } def scroll_x end @@ -394,6 +460,7 @@ module OpenAI def scroll_x=(_) end + # The vertical scroll distance. sig { returns(Integer) } def scroll_y end @@ -402,6 +469,8 @@ module OpenAI def scroll_y=(_) end + # Specifies the event type. For a scroll action, this property is always set to + # `scroll`. sig { returns(Symbol) } def type end @@ -410,6 +479,7 @@ module OpenAI def type=(_) end + # The x-coordinate where the scroll occurred. sig { returns(Integer) } def x end @@ -418,6 +488,7 @@ module OpenAI def x=(_) end + # The y-coordinate where the scroll occurred. sig { returns(Integer) } def y_ end @@ -426,6 +497,7 @@ module OpenAI def y_=(_) end + # A scroll action. sig do params(scroll_x: Integer, scroll_y: Integer, x: Integer, y_: Integer, type: Symbol) .returns(T.attached_class) @@ -441,6 +513,7 @@ module OpenAI end class Type < OpenAI::BaseModel + # The text to type. sig { returns(String) } def text end @@ -449,6 +522,8 @@ module OpenAI def text=(_) end + # Specifies the event type. For a type action, this property is always set to + # `type`. sig { returns(Symbol) } def type end @@ -457,6 +532,7 @@ module OpenAI def type=(_) end + # An action to type in text. sig { params(text: String, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :type) end @@ -467,6 +543,8 @@ module OpenAI end class Wait < OpenAI::BaseModel + # Specifies the event type. For a wait action, this property is always set to + # `wait`. sig { returns(Symbol) } def type end @@ -475,6 +553,7 @@ module OpenAI def type=(_) end + # A wait action. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :wait) end @@ -483,20 +562,10 @@ module OpenAI def to_hash end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait]] - ) - end - private def variants - end - end end class PendingSafetyCheck < OpenAI::BaseModel + # The ID of the pending safety check. sig { returns(String) } def id end @@ -505,6 +574,7 @@ module OpenAI def id=(_) end + # The type of the pending safety check. sig { returns(String) } def code end @@ -513,6 +583,7 @@ module OpenAI def code=(_) end + # Details about the pending safety check. sig { returns(String) } def message end @@ -521,6 +592,7 @@ module OpenAI def message=(_) end + # A pending safety check for the computer call. sig { params(id: String, code: String, message: String).returns(T.attached_class) } def self.new(id:, code:, message:) end @@ -530,30 +602,25 @@ module OpenAI end end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # The type of the computer call. Always `computer_call`. class Type < OpenAI::Enum abstract! - COMPUTER_CALL = :computer_call + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + COMPUTER_CALL = :computer_call end end end diff --git a/rbi/lib/openai/models/responses/response_content.rbi b/rbi/lib/openai/models/responses/response_content.rbi index c2e6aed1..58f3f635 100644 --- a/rbi/lib/openai/models/responses/response_content.rbi +++ b/rbi/lib/openai/models/responses/response_content.rbi @@ -3,18 +3,20 @@ module OpenAI module Models module Responses + # Multi-modal input and output contents. class ResponseContent < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[NilClass, OpenAI::Models::Responses::ResponseInputText], [NilClass, OpenAI::Models::Responses::ResponseInputImage], [NilClass, OpenAI::Models::Responses::ResponseInputFile], [NilClass, OpenAI::Models::Responses::ResponseOutputText], [NilClass, OpenAI::Models::Responses::ResponseOutputRefusal]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Responses::ResponseInputText, + OpenAI::Models::Responses::ResponseInputImage, + OpenAI::Models::Responses::ResponseInputFile, + OpenAI::Models::Responses::ResponseOutputText, + OpenAI::Models::Responses::ResponseOutputRefusal + ) + } end end end diff --git a/rbi/lib/openai/models/responses/response_content_part_added_event.rbi b/rbi/lib/openai/models/responses/response_content_part_added_event.rbi index 6280062a..ca3acd5c 100644 --- a/rbi/lib/openai/models/responses/response_content_part_added_event.rbi +++ b/rbi/lib/openai/models/responses/response_content_part_added_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseContentPartAddedEvent < OpenAI::BaseModel + # The index of the content part that was added. sig { returns(Integer) } def content_index end @@ -12,6 +13,7 @@ module OpenAI def content_index=(_) end + # The ID of the output item that the content part was added to. sig { returns(String) } def item_id end @@ -20,6 +22,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the content part was added to. sig { returns(Integer) } def output_index end @@ -28,6 +31,7 @@ module OpenAI def output_index=(_) end + # The content part that was added. sig do returns( T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal) @@ -47,6 +51,7 @@ module OpenAI def part=(_) end + # The type of the event. Always `response.content_part.added`. sig { returns(Symbol) } def type end @@ -55,6 +60,7 @@ module OpenAI def type=(_) end + # Emitted when a new content part is added. sig do params( content_index: Integer, @@ -83,18 +89,14 @@ module OpenAI def to_hash end + # The content part that was added. class Part < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseOutputText], [Symbol, OpenAI::Models::Responses::ResponseOutputRefusal]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal) + } end end end diff --git a/rbi/lib/openai/models/responses/response_content_part_done_event.rbi b/rbi/lib/openai/models/responses/response_content_part_done_event.rbi index 01ea5776..65977cbe 100644 --- a/rbi/lib/openai/models/responses/response_content_part_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_content_part_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseContentPartDoneEvent < OpenAI::BaseModel + # The index of the content part that is done. sig { returns(Integer) } def content_index end @@ -12,6 +13,7 @@ module OpenAI def content_index=(_) end + # The ID of the output item that the content part was added to. sig { returns(String) } def item_id end @@ -20,6 +22,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the content part was added to. sig { returns(Integer) } def output_index end @@ -28,6 +31,7 @@ module OpenAI def output_index=(_) end + # The content part that is done. sig do returns( T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal) @@ -47,6 +51,7 @@ module OpenAI def part=(_) end + # The type of the event. Always `response.content_part.done`. sig { returns(Symbol) } def type end @@ -55,6 +60,7 @@ module OpenAI def type=(_) end + # Emitted when a content part is done. sig do params( content_index: Integer, @@ -83,18 +89,14 @@ module OpenAI def to_hash end + # The content part that is done. class Part < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseOutputText], [Symbol, OpenAI::Models::Responses::ResponseOutputRefusal]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal) + } end end end diff --git a/rbi/lib/openai/models/responses/response_create_params.rbi b/rbi/lib/openai/models/responses/response_create_params.rbi index fe1d6f07..f7396668 100644 --- a/rbi/lib/openai/models/responses/response_create_params.rbi +++ b/rbi/lib/openai/models/responses/response_create_params.rbi @@ -7,6 +7,15 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) sig { returns(T.any(String, OpenAI::Models::Responses::ResponseInput)) } def input end @@ -18,6 +27,11 @@ module OpenAI def input=(_) end + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. sig { returns(T.any(String, Symbol)) } def model end @@ -26,6 +40,14 @@ module OpenAI def model=(_) end + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. sig { returns(T.nilable(T::Array[Symbol])) } def include end @@ -34,6 +56,12 @@ module OpenAI def include=(_) end + # Inserts a system (or developer) message as the first item in the model's + # context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will be not be carried over to the next response. This makes it simple + # to swap out system (or developer) messages in new responses. sig { returns(T.nilable(String)) } def instructions end @@ -42,6 +70,9 @@ module OpenAI def instructions=(_) end + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(Integer)) } def max_output_tokens end @@ -50,6 +81,12 @@ module OpenAI def max_output_tokens=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -58,6 +95,7 @@ module OpenAI def metadata=(_) end + # Whether to allow the model to run tool calls in parallel. sig { returns(T.nilable(T::Boolean)) } def parallel_tool_calls end @@ -66,6 +104,9 @@ module OpenAI def parallel_tool_calls=(_) end + # The unique ID of the previous response to the model. Use this to create + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). sig { returns(T.nilable(String)) } def previous_response_id end @@ -74,6 +115,10 @@ module OpenAI def previous_response_id=(_) end + # **o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(OpenAI::Models::Reasoning)) } def reasoning end @@ -82,6 +127,7 @@ module OpenAI def reasoning=(_) end + # Whether to store the generated model response for later retrieval via API. sig { returns(T.nilable(T::Boolean)) } def store end @@ -90,6 +136,10 @@ module OpenAI def store=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. sig { returns(T.nilable(Float)) } def temperature end @@ -98,6 +148,11 @@ module OpenAI def temperature=(_) end + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) sig { returns(T.nilable(OpenAI::Models::Responses::ResponseTextConfig)) } def text end @@ -109,6 +164,9 @@ module OpenAI def text=(_) end + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. sig do returns( T.nilable( @@ -130,6 +188,20 @@ module OpenAI def tool_choice=(_) end + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). sig do returns( T.nilable( @@ -172,6 +244,11 @@ module OpenAI def tools=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -180,6 +257,13 @@ module OpenAI def top_p=(_) end + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. sig { returns(T.nilable(Symbol)) } def truncation end @@ -188,6 +272,9 @@ module OpenAI def truncation=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -283,52 +370,59 @@ module OpenAI def to_hash end + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) class Input < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, OpenAI::Models::Responses::ResponseInput]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, OpenAI::Models::Responses::ResponseInput)} } end + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. class Model < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Symbol)} } end + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. class ToolChoice < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[NilClass, Symbol], [NilClass, OpenAI::Models::Responses::ToolChoiceTypes], [NilClass, OpenAI::Models::Responses::ToolChoiceFunction]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction) + } end end + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. class Truncation < OpenAI::Enum abstract! - AUTO = T.let(:auto, T.nilable(Symbol)) - DISABLED = T.let(:disabled, T.nilable(Symbol)) + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + AUTO = :auto + DISABLED = :disabled end end end diff --git a/rbi/lib/openai/models/responses/response_created_event.rbi b/rbi/lib/openai/models/responses/response_created_event.rbi index 2a500348..c68b3697 100644 --- a/rbi/lib/openai/models/responses/response_created_event.rbi +++ b/rbi/lib/openai/models/responses/response_created_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCreatedEvent < OpenAI::BaseModel + # The response that was created. sig { returns(OpenAI::Models::Responses::Response) } def response end @@ -12,6 +13,7 @@ module OpenAI def response=(_) end + # The type of the event. Always `response.created`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # An event that is emitted when a response is created. sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } def self.new(response:, type: :"response.created") end diff --git a/rbi/lib/openai/models/responses/response_error.rbi b/rbi/lib/openai/models/responses/response_error.rbi index 367eea43..7f9b2db2 100644 --- a/rbi/lib/openai/models/responses/response_error.rbi +++ b/rbi/lib/openai/models/responses/response_error.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseError < OpenAI::BaseModel + # The error code for the response. sig { returns(Symbol) } def code end @@ -12,6 +13,7 @@ module OpenAI def code=(_) end + # A human-readable description of the error. sig { returns(String) } def message end @@ -20,6 +22,7 @@ module OpenAI def message=(_) end + # An error object returned when the model fails to generate a Response. sig { params(code: Symbol, message: String).returns(T.attached_class) } def self.new(code:, message:) end @@ -28,9 +31,12 @@ module OpenAI def to_hash end + # The error code for the response. class Code < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + SERVER_ERROR = :server_error RATE_LIMIT_EXCEEDED = :rate_limit_exceeded INVALID_PROMPT = :invalid_prompt @@ -49,12 +55,6 @@ module OpenAI EMPTY_IMAGE_FILE = :empty_image_file FAILED_TO_DOWNLOAD_IMAGE = :failed_to_download_image IMAGE_FILE_NOT_FOUND = :image_file_not_found - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/response_error_event.rbi b/rbi/lib/openai/models/responses/response_error_event.rbi index 03c5b3b3..f4c0e9f0 100644 --- a/rbi/lib/openai/models/responses/response_error_event.rbi +++ b/rbi/lib/openai/models/responses/response_error_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseErrorEvent < OpenAI::BaseModel + # The error code. sig { returns(T.nilable(String)) } def code end @@ -12,6 +13,7 @@ module OpenAI def code=(_) end + # The error message. sig { returns(String) } def message end @@ -20,6 +22,7 @@ module OpenAI def message=(_) end + # The error parameter. sig { returns(T.nilable(String)) } def param end @@ -28,6 +31,7 @@ module OpenAI def param=(_) end + # The type of the event. Always `error`. sig { returns(Symbol) } def type end @@ -36,6 +40,7 @@ module OpenAI def type=(_) end + # Emitted when an error occurs. sig do params(code: T.nilable(String), message: String, param: T.nilable(String), type: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/responses/response_failed_event.rbi b/rbi/lib/openai/models/responses/response_failed_event.rbi index 5c3f69cb..c6d9fd32 100644 --- a/rbi/lib/openai/models/responses/response_failed_event.rbi +++ b/rbi/lib/openai/models/responses/response_failed_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFailedEvent < OpenAI::BaseModel + # The response that failed. sig { returns(OpenAI::Models::Responses::Response) } def response end @@ -12,6 +13,7 @@ module OpenAI def response=(_) end + # The type of the event. Always `response.failed`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # An event that is emitted when a response fails. sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } def self.new(response:, type: :"response.failed") end diff --git a/rbi/lib/openai/models/responses/response_file_search_call_completed_event.rbi b/rbi/lib/openai/models/responses/response_file_search_call_completed_event.rbi index 77ecf89c..ffb5cae7 100644 --- a/rbi/lib/openai/models/responses/response_file_search_call_completed_event.rbi +++ b/rbi/lib/openai/models/responses/response_file_search_call_completed_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFileSearchCallCompletedEvent < OpenAI::BaseModel + # The ID of the output item that the file search call is initiated. sig { returns(String) } def item_id end @@ -12,6 +13,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the file search call is initiated. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.file_search_call.completed`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when a file search call is completed (results found). sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(item_id:, output_index:, type: :"response.file_search_call.completed") end diff --git a/rbi/lib/openai/models/responses/response_file_search_call_in_progress_event.rbi b/rbi/lib/openai/models/responses/response_file_search_call_in_progress_event.rbi index f4d4c09f..2feeebed 100644 --- a/rbi/lib/openai/models/responses/response_file_search_call_in_progress_event.rbi +++ b/rbi/lib/openai/models/responses/response_file_search_call_in_progress_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFileSearchCallInProgressEvent < OpenAI::BaseModel + # The ID of the output item that the file search call is initiated. sig { returns(String) } def item_id end @@ -12,6 +13,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the file search call is initiated. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.file_search_call.in_progress`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when a file search call is initiated. sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(item_id:, output_index:, type: :"response.file_search_call.in_progress") end diff --git a/rbi/lib/openai/models/responses/response_file_search_call_searching_event.rbi b/rbi/lib/openai/models/responses/response_file_search_call_searching_event.rbi index 3ea7ffd2..b340e2ff 100644 --- a/rbi/lib/openai/models/responses/response_file_search_call_searching_event.rbi +++ b/rbi/lib/openai/models/responses/response_file_search_call_searching_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFileSearchCallSearchingEvent < OpenAI::BaseModel + # The ID of the output item that the file search call is initiated. sig { returns(String) } def item_id end @@ -12,6 +13,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the file search call is searching. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.file_search_call.searching`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when a file search is currently searching. sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(item_id:, output_index:, type: :"response.file_search_call.searching") end diff --git a/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi b/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi index c0c8564d..17a4bdb2 100644 --- a/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi +++ b/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFileSearchToolCall < OpenAI::BaseModel + # The unique ID of the file search tool call. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # The queries used to search for files. sig { returns(T::Array[String]) } def queries end @@ -20,6 +22,8 @@ module OpenAI def queries=(_) end + # The status of the file search tool call. One of `in_progress`, `searching`, + # `incomplete` or `failed`, sig { returns(Symbol) } def status end @@ -28,6 +32,7 @@ module OpenAI def status=(_) end + # The type of the file search tool call. Always `file_search_call`. sig { returns(Symbol) } def type end @@ -36,6 +41,7 @@ module OpenAI def type=(_) end + # The results of the file search tool call. sig { returns(T.nilable(T::Array[OpenAI::Models::Responses::ResponseFileSearchToolCall::Result])) } def results end @@ -47,6 +53,9 @@ module OpenAI def results=(_) end + # The results of a file search tool call. See the + # [file search guide](https://platform.openai.com/docs/guides/tools-file-search) + # for more information. sig do params( id: String, @@ -75,23 +84,26 @@ module OpenAI def to_hash end + # The status of the file search tool call. One of `in_progress`, `searching`, + # `incomplete` or `failed`, class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress SEARCHING = :searching COMPLETED = :completed INCOMPLETE = :incomplete FAILED = :failed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class Result < OpenAI::BaseModel + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } def attributes end @@ -103,6 +115,7 @@ module OpenAI def attributes=(_) end + # The unique ID of the file. sig { returns(T.nilable(String)) } def file_id end @@ -111,6 +124,7 @@ module OpenAI def file_id=(_) end + # The name of the file. sig { returns(T.nilable(String)) } def filename end @@ -119,6 +133,7 @@ module OpenAI def filename=(_) end + # The relevance score of the file - a value between 0 and 1. sig { returns(T.nilable(Float)) } def score end @@ -127,6 +142,7 @@ module OpenAI def score=(_) end + # The text that was retrieved from the file. sig { returns(T.nilable(String)) } def text end @@ -166,11 +182,7 @@ module OpenAI class Attribute < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Float, T::Boolean)} } end end end diff --git a/rbi/lib/openai/models/responses/response_format_text_config.rbi b/rbi/lib/openai/models/responses/response_format_text_config.rbi index 170c0610..f3595c51 100644 --- a/rbi/lib/openai/models/responses/response_format_text_config.rbi +++ b/rbi/lib/openai/models/responses/response_format_text_config.rbi @@ -3,18 +3,30 @@ module OpenAI module Models module Responses + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. class ResponseFormatTextConfig < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::ResponseFormatText], [Symbol, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig], [Symbol, OpenAI::Models::ResponseFormatJSONObject]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::ResponseFormatText, + OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, + OpenAI::Models::ResponseFormatJSONObject + ) + } end end end diff --git a/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi b/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi index 4899b49f..aab69ea5 100644 --- a/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi +++ b/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi @@ -4,6 +4,8 @@ module OpenAI module Models module Responses class ResponseFormatTextJSONSchemaConfig < OpenAI::BaseModel + # The schema for the response format, described as a JSON Schema object. Learn how + # to build JSON schemas [here](https://json-schema.org/). sig { returns(T::Hash[Symbol, T.anything]) } def schema end @@ -12,6 +14,7 @@ module OpenAI def schema=(_) end + # The type of response format being defined. Always `json_schema`. sig { returns(Symbol) } def type end @@ -20,6 +23,8 @@ module OpenAI def type=(_) end + # A description of what the response format is for, used by the model to determine + # how to respond in the format. sig { returns(T.nilable(String)) } def description end @@ -28,6 +33,8 @@ module OpenAI def description=(_) end + # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + # and dashes, with a maximum length of 64. sig { returns(T.nilable(String)) } def name end @@ -36,6 +43,11 @@ module OpenAI def name=(_) end + # Whether to enable strict schema adherence when generating the output. If set to + # true, the model will always follow the exact schema defined in the `schema` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. To + # learn more, read the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). sig { returns(T.nilable(T::Boolean)) } def strict end @@ -44,6 +56,9 @@ module OpenAI def strict=(_) end + # JSON Schema response format. Used to generate structured JSON responses. Learn + # more about + # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). sig do params( schema: T::Hash[Symbol, T.anything], diff --git a/rbi/lib/openai/models/responses/response_function_call_arguments_delta_event.rbi b/rbi/lib/openai/models/responses/response_function_call_arguments_delta_event.rbi index c543936a..bd790e94 100644 --- a/rbi/lib/openai/models/responses/response_function_call_arguments_delta_event.rbi +++ b/rbi/lib/openai/models/responses/response_function_call_arguments_delta_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFunctionCallArgumentsDeltaEvent < OpenAI::BaseModel + # The function-call arguments delta that is added. sig { returns(String) } def delta end @@ -12,6 +13,7 @@ module OpenAI def delta=(_) end + # The ID of the output item that the function-call arguments delta is added to. sig { returns(String) } def item_id end @@ -20,6 +22,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the function-call arguments delta is added to. sig { returns(Integer) } def output_index end @@ -28,6 +31,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.function_call_arguments.delta`. sig { returns(Symbol) } def type end @@ -36,6 +40,7 @@ module OpenAI def type=(_) end + # Emitted when there is a partial function-call arguments delta. sig do params( delta: String, diff --git a/rbi/lib/openai/models/responses/response_function_call_arguments_done_event.rbi b/rbi/lib/openai/models/responses/response_function_call_arguments_done_event.rbi index 17234bf9..48684e3e 100644 --- a/rbi/lib/openai/models/responses/response_function_call_arguments_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_function_call_arguments_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFunctionCallArgumentsDoneEvent < OpenAI::BaseModel + # The function-call arguments. sig { returns(String) } def arguments end @@ -12,6 +13,7 @@ module OpenAI def arguments=(_) end + # The ID of the item. sig { returns(String) } def item_id end @@ -20,6 +22,7 @@ module OpenAI def item_id=(_) end + # The index of the output item. sig { returns(Integer) } def output_index end @@ -36,6 +39,7 @@ module OpenAI def type=(_) end + # Emitted when function-call arguments are finalized. sig do params( arguments: String, diff --git a/rbi/lib/openai/models/responses/response_function_tool_call.rbi b/rbi/lib/openai/models/responses/response_function_tool_call.rbi index 97c8db84..a92d38ed 100644 --- a/rbi/lib/openai/models/responses/response_function_tool_call.rbi +++ b/rbi/lib/openai/models/responses/response_function_tool_call.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFunctionToolCall < OpenAI::BaseModel + # The unique ID of the function tool call. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # A JSON string of the arguments to pass to the function. sig { returns(String) } def arguments end @@ -20,6 +22,7 @@ module OpenAI def arguments=(_) end + # The unique ID of the function tool call generated by the model. sig { returns(String) } def call_id end @@ -28,6 +31,7 @@ module OpenAI def call_id=(_) end + # The name of the function to run. sig { returns(String) } def name end @@ -36,6 +40,7 @@ module OpenAI def name=(_) end + # The type of the function tool call. Always `function_call`. sig { returns(Symbol) } def type end @@ -44,6 +49,8 @@ module OpenAI def type=(_) end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -52,6 +59,9 @@ module OpenAI def status=(_) end + # A tool call to run a function. See the + # [function calling guide](https://platform.openai.com/docs/guides/function-calling) + # for more information. sig do params(id: String, arguments: String, call_id: String, name: String, status: Symbol, type: Symbol) .returns(T.attached_class) @@ -73,18 +83,16 @@ module OpenAI def to_hash end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/response_function_web_search.rbi b/rbi/lib/openai/models/responses/response_function_web_search.rbi index fc8ec7a6..fa36c718 100644 --- a/rbi/lib/openai/models/responses/response_function_web_search.rbi +++ b/rbi/lib/openai/models/responses/response_function_web_search.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFunctionWebSearch < OpenAI::BaseModel + # The unique ID of the web search tool call. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # The status of the web search tool call. sig { returns(Symbol) } def status end @@ -20,6 +22,7 @@ module OpenAI def status=(_) end + # The type of the web search tool call. Always `web_search_call`. sig { returns(Symbol) } def type end @@ -28,6 +31,9 @@ module OpenAI def type=(_) end + # The results of a web search tool call. See the + # [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for + # more information. sig { params(id: String, status: Symbol, type: Symbol).returns(T.attached_class) } def self.new(id:, status:, type: :web_search_call) end @@ -36,19 +42,16 @@ module OpenAI def to_hash end + # The status of the web search tool call. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress SEARCHING = :searching COMPLETED = :completed FAILED = :failed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/response_in_progress_event.rbi b/rbi/lib/openai/models/responses/response_in_progress_event.rbi index ad20d756..57a31950 100644 --- a/rbi/lib/openai/models/responses/response_in_progress_event.rbi +++ b/rbi/lib/openai/models/responses/response_in_progress_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseInProgressEvent < OpenAI::BaseModel + # The response that is in progress. sig { returns(OpenAI::Models::Responses::Response) } def response end @@ -12,6 +13,7 @@ module OpenAI def response=(_) end + # The type of the event. Always `response.in_progress`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # Emitted when the response is in progress. sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } def self.new(response:, type: :"response.in_progress") end diff --git a/rbi/lib/openai/models/responses/response_includable.rbi b/rbi/lib/openai/models/responses/response_includable.rbi index 6eab634e..363cdad4 100644 --- a/rbi/lib/openai/models/responses/response_includable.rbi +++ b/rbi/lib/openai/models/responses/response_includable.rbi @@ -3,18 +3,22 @@ module OpenAI module Models module Responses + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. class ResponseIncludable < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + FILE_SEARCH_CALL_RESULTS = :"file_search_call.results" MESSAGE_INPUT_IMAGE_IMAGE_URL = :"message.input_image.image_url" COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL = :"computer_call_output.output.image_url" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/response_incomplete_event.rbi b/rbi/lib/openai/models/responses/response_incomplete_event.rbi index ec47033d..9ca0c85e 100644 --- a/rbi/lib/openai/models/responses/response_incomplete_event.rbi +++ b/rbi/lib/openai/models/responses/response_incomplete_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseIncompleteEvent < OpenAI::BaseModel + # The response that was incomplete. sig { returns(OpenAI::Models::Responses::Response) } def response end @@ -12,6 +13,7 @@ module OpenAI def response=(_) end + # The type of the event. Always `response.incomplete`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # An event that is emitted when a response finishes as incomplete. sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } def self.new(response:, type: :"response.incomplete") end diff --git a/rbi/lib/openai/models/responses/response_input_audio.rbi b/rbi/lib/openai/models/responses/response_input_audio.rbi index d7f1eee2..f49d5163 100644 --- a/rbi/lib/openai/models/responses/response_input_audio.rbi +++ b/rbi/lib/openai/models/responses/response_input_audio.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseInputAudio < OpenAI::BaseModel + # Base64-encoded audio data. sig { returns(String) } def data end @@ -12,6 +13,7 @@ module OpenAI def data=(_) end + # The format of the audio data. Currently supported formats are `mp3` and `wav`. sig { returns(Symbol) } def format_ end @@ -20,6 +22,7 @@ module OpenAI def format_=(_) end + # The type of the input item. Always `input_audio`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # An audio input to the model. sig { params(data: String, format_: Symbol, type: Symbol).returns(T.attached_class) } def self.new(data:, format_:, type: :input_audio) end @@ -36,17 +40,14 @@ module OpenAI def to_hash end + # The format of the audio data. Currently supported formats are `mp3` and `wav`. class Format < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + MP3 = :mp3 WAV = :wav - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/response_input_content.rbi b/rbi/lib/openai/models/responses/response_input_content.rbi index 5857ddd3..957c8020 100644 --- a/rbi/lib/openai/models/responses/response_input_content.rbi +++ b/rbi/lib/openai/models/responses/response_input_content.rbi @@ -3,18 +3,18 @@ module OpenAI module Models module Responses + # A text input to the model. class ResponseInputContent < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseInputText], [Symbol, OpenAI::Models::Responses::ResponseInputImage], [Symbol, OpenAI::Models::Responses::ResponseInputFile]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Responses::ResponseInputText, + OpenAI::Models::Responses::ResponseInputImage, + OpenAI::Models::Responses::ResponseInputFile + ) + } end end end diff --git a/rbi/lib/openai/models/responses/response_input_file.rbi b/rbi/lib/openai/models/responses/response_input_file.rbi index e6245523..cedf90ec 100644 --- a/rbi/lib/openai/models/responses/response_input_file.rbi +++ b/rbi/lib/openai/models/responses/response_input_file.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseInputFile < OpenAI::BaseModel + # The type of the input item. Always `input_file`. sig { returns(Symbol) } def type end @@ -12,6 +13,7 @@ module OpenAI def type=(_) end + # The content of the file to be sent to the model. sig { returns(T.nilable(String)) } def file_data end @@ -20,6 +22,7 @@ module OpenAI def file_data=(_) end + # The ID of the file to be sent to the model. sig { returns(T.nilable(String)) } def file_id end @@ -28,6 +31,7 @@ module OpenAI def file_id=(_) end + # The name of the file to be sent to the model. sig { returns(T.nilable(String)) } def filename end @@ -36,6 +40,7 @@ module OpenAI def filename=(_) end + # A file input to the model. sig do params(file_data: String, file_id: String, filename: String, type: Symbol).returns(T.attached_class) end diff --git a/rbi/lib/openai/models/responses/response_input_image.rbi b/rbi/lib/openai/models/responses/response_input_image.rbi index 62bd604a..f4e450ae 100644 --- a/rbi/lib/openai/models/responses/response_input_image.rbi +++ b/rbi/lib/openai/models/responses/response_input_image.rbi @@ -4,6 +4,8 @@ module OpenAI module Models module Responses class ResponseInputImage < OpenAI::BaseModel + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. sig { returns(Symbol) } def detail end @@ -12,6 +14,7 @@ module OpenAI def detail=(_) end + # The type of the input item. Always `input_image`. sig { returns(Symbol) } def type end @@ -20,6 +23,7 @@ module OpenAI def type=(_) end + # The ID of the file to be sent to the model. sig { returns(T.nilable(String)) } def file_id end @@ -28,6 +32,8 @@ module OpenAI def file_id=(_) end + # The URL of the image to be sent to the model. A fully qualified URL or base64 + # encoded image in a data URL. sig { returns(T.nilable(String)) } def image_url end @@ -36,6 +42,8 @@ module OpenAI def image_url=(_) end + # An image input to the model. Learn about + # [image inputs](https://platform.openai.com/docs/guides/vision). sig do params(detail: Symbol, file_id: T.nilable(String), image_url: T.nilable(String), type: Symbol) .returns(T.attached_class) @@ -56,18 +64,16 @@ module OpenAI def to_hash end + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. class Detail < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + HIGH = :high LOW = :low AUTO = :auto - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/response_input_item.rbi b/rbi/lib/openai/models/responses/response_input_item.rbi index a2ac733a..fc0ede1f 100644 --- a/rbi/lib/openai/models/responses/response_input_item.rbi +++ b/rbi/lib/openai/models/responses/response_input_item.rbi @@ -3,10 +3,35 @@ module OpenAI module Models module Responses + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. class ResponseInputItem < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Responses::EasyInputMessage, + OpenAI::Models::Responses::ResponseInputItem::Message, + OpenAI::Models::Responses::ResponseOutputMessage, + OpenAI::Models::Responses::ResponseFileSearchToolCall, + OpenAI::Models::Responses::ResponseComputerToolCall, + OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, + OpenAI::Models::Responses::ResponseFunctionWebSearch, + OpenAI::Models::Responses::ResponseFunctionToolCall, + OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, + OpenAI::Models::Responses::ResponseReasoningItem, + OpenAI::Models::Responses::ResponseInputItem::ItemReference + ) + } + end + class Message < OpenAI::BaseModel + # A list of one or many input items to the model, containing different content + # types. sig { returns(OpenAI::Models::Responses::ResponseInputMessageContentList) } def content end @@ -18,6 +43,7 @@ module OpenAI def content=(_) end + # The role of the message input. One of `user`, `system`, or `developer`. sig { returns(Symbol) } def role end @@ -26,6 +52,8 @@ module OpenAI def role=(_) end + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -34,6 +62,7 @@ module OpenAI def status=(_) end + # The type of the message input. Always set to `message`. sig { returns(T.nilable(Symbol)) } def type end @@ -42,6 +71,9 @@ module OpenAI def type=(_) end + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. sig do params( content: OpenAI::Models::Responses::ResponseInputMessageContentList, @@ -63,48 +95,41 @@ module OpenAI def to_hash end + # The role of the message input. One of `user`, `system`, or `developer`. class Role < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + USER = :user SYSTEM = :system DEVELOPER = :developer - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # The type of the message input. Always set to `message`. class Type < OpenAI::Enum abstract! - MESSAGE = :message + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + MESSAGE = :message end end class ComputerCallOutput < OpenAI::BaseModel + # The ID of the computer tool call that produced the output. sig { returns(String) } def call_id end @@ -113,6 +138,7 @@ module OpenAI def call_id=(_) end + # A computer screenshot image used with the computer use tool. sig { returns(OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Output) } def output end @@ -124,6 +150,7 @@ module OpenAI def output=(_) end + # The type of the computer tool call output. Always `computer_call_output`. sig { returns(Symbol) } def type end @@ -132,6 +159,7 @@ module OpenAI def type=(_) end + # The ID of the computer tool call output. sig { returns(T.nilable(String)) } def id end @@ -140,6 +168,8 @@ module OpenAI def id=(_) end + # The safety checks reported by the API that have been acknowledged by the + # developer. sig do returns( T.nilable( @@ -161,6 +191,8 @@ module OpenAI def acknowledged_safety_checks=(_) end + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -169,6 +201,7 @@ module OpenAI def status=(_) end + # The output of a computer tool call. sig do params( call_id: String, @@ -200,6 +233,8 @@ module OpenAI end class Output < OpenAI::BaseModel + # Specifies the event type. For a computer screenshot, this property is always set + # to `computer_screenshot`. sig { returns(Symbol) } def type end @@ -208,6 +243,7 @@ module OpenAI def type=(_) end + # The identifier of an uploaded file that contains the screenshot. sig { returns(T.nilable(String)) } def file_id end @@ -216,6 +252,7 @@ module OpenAI def file_id=(_) end + # The URL of the screenshot image. sig { returns(T.nilable(String)) } def image_url end @@ -224,6 +261,7 @@ module OpenAI def image_url=(_) end + # A computer screenshot image used with the computer use tool. sig { params(file_id: String, image_url: String, type: Symbol).returns(T.attached_class) } def self.new(file_id: nil, image_url: nil, type: :computer_screenshot) end @@ -234,6 +272,7 @@ module OpenAI end class AcknowledgedSafetyCheck < OpenAI::BaseModel + # The ID of the pending safety check. sig { returns(String) } def id end @@ -242,6 +281,7 @@ module OpenAI def id=(_) end + # The type of the pending safety check. sig { returns(String) } def code end @@ -250,6 +290,7 @@ module OpenAI def code=(_) end + # Details about the pending safety check. sig { returns(String) } def message end @@ -258,6 +299,7 @@ module OpenAI def message=(_) end + # A pending safety check for the computer call. sig { params(id: String, code: String, message: String).returns(T.attached_class) } def self.new(id:, code:, message:) end @@ -267,22 +309,21 @@ module OpenAI end end + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end class FunctionCallOutput < OpenAI::BaseModel + # The unique ID of the function tool call generated by the model. sig { returns(String) } def call_id end @@ -291,6 +332,7 @@ module OpenAI def call_id=(_) end + # A JSON string of the output of the function tool call. sig { returns(String) } def output end @@ -299,6 +341,7 @@ module OpenAI def output=(_) end + # The type of the function tool call output. Always `function_call_output`. sig { returns(Symbol) } def type end @@ -307,6 +350,8 @@ module OpenAI def type=(_) end + # The unique ID of the function tool call output. Populated when this item is + # returned via API. sig { returns(T.nilable(String)) } def id end @@ -315,6 +360,8 @@ module OpenAI def id=(_) end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -323,6 +370,7 @@ module OpenAI def status=(_) end + # The output of a function tool call. sig do params( call_id: String, @@ -341,22 +389,21 @@ module OpenAI def to_hash end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end class ItemReference < OpenAI::BaseModel + # The ID of the item to reference. sig { returns(String) } def id end @@ -365,6 +412,7 @@ module OpenAI def id=(_) end + # The type of item to reference. Always `item_reference`. sig { returns(Symbol) } def type end @@ -373,6 +421,7 @@ module OpenAI def type=(_) end + # An internal identifier for an item to reference. sig { params(id: String, type: Symbol).returns(T.attached_class) } def self.new(id:, type: :item_reference) end @@ -381,17 +430,6 @@ module OpenAI def to_hash end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::EasyInputMessage], [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message], [Symbol, OpenAI::Models::Responses::ResponseOutputMessage], [Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall], [Symbol, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput], [Symbol, OpenAI::Models::Responses::ResponseFunctionWebSearch], [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCall], [Symbol, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput], [Symbol, OpenAI::Models::Responses::ResponseReasoningItem], [Symbol, OpenAI::Models::Responses::ResponseInputItem::ItemReference]] - ) - end - private def variants - end - end end end end diff --git a/rbi/lib/openai/models/responses/response_input_text.rbi b/rbi/lib/openai/models/responses/response_input_text.rbi index df900197..16fc4040 100644 --- a/rbi/lib/openai/models/responses/response_input_text.rbi +++ b/rbi/lib/openai/models/responses/response_input_text.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseInputText < OpenAI::BaseModel + # The text input to the model. sig { returns(String) } def text end @@ -12,6 +13,7 @@ module OpenAI def text=(_) end + # The type of the input item. Always `input_text`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # A text input to the model. sig { params(text: String, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :input_text) end diff --git a/rbi/lib/openai/models/responses/response_item_list.rbi b/rbi/lib/openai/models/responses/response_item_list.rbi index c24e41f8..f6e9982c 100644 --- a/rbi/lib/openai/models/responses/response_item_list.rbi +++ b/rbi/lib/openai/models/responses/response_item_list.rbi @@ -6,6 +6,7 @@ module OpenAI module Responses class ResponseItemList < OpenAI::BaseModel + # A list of items used to generate this response. sig do returns( T::Array[ @@ -58,6 +59,7 @@ module OpenAI def data=(_) end + # The ID of the first item in the list. sig { returns(String) } def first_id end @@ -66,6 +68,7 @@ module OpenAI def first_id=(_) end + # Whether there are more items available. sig { returns(T::Boolean) } def has_more end @@ -74,6 +77,7 @@ module OpenAI def has_more=(_) end + # The ID of the last item in the list. sig { returns(String) } def last_id end @@ -82,6 +86,7 @@ module OpenAI def last_id=(_) end + # The type of object returned, must be `list`. sig { returns(Symbol) } def object end @@ -90,6 +95,7 @@ module OpenAI def object=(_) end + # A list of Response items. sig do params( data: T::Array[ @@ -140,10 +146,27 @@ module OpenAI def to_hash end + # Content item used to generate a response. class Data < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Responses::ResponseItemList::Data::Message, + OpenAI::Models::Responses::ResponseOutputMessage, + OpenAI::Models::Responses::ResponseFileSearchToolCall, + OpenAI::Models::Responses::ResponseComputerToolCall, + OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput, + OpenAI::Models::Responses::ResponseFunctionWebSearch, + OpenAI::Models::Responses::ResponseFunctionToolCall, + OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput + ) + } + end + class Message < OpenAI::BaseModel + # The unique ID of the message input. sig { returns(String) } def id end @@ -152,6 +175,8 @@ module OpenAI def id=(_) end + # A list of one or many input items to the model, containing different content + # types. sig { returns(OpenAI::Models::Responses::ResponseInputMessageContentList) } def content end @@ -163,6 +188,7 @@ module OpenAI def content=(_) end + # The role of the message input. One of `user`, `system`, or `developer`. sig { returns(Symbol) } def role end @@ -171,6 +197,8 @@ module OpenAI def role=(_) end + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -179,6 +207,7 @@ module OpenAI def status=(_) end + # The type of the message input. Always set to `message`. sig { returns(T.nilable(Symbol)) } def type end @@ -215,48 +244,41 @@ module OpenAI def to_hash end + # The role of the message input. One of `user`, `system`, or `developer`. class Role < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + USER = :user SYSTEM = :system DEVELOPER = :developer - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # The type of the message input. Always set to `message`. class Type < OpenAI::Enum abstract! - MESSAGE = :message + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + MESSAGE = :message end end class ComputerCallOutput < OpenAI::BaseModel + # The unique ID of the computer call tool output. sig { returns(String) } def id end @@ -265,6 +287,7 @@ module OpenAI def id=(_) end + # The ID of the computer tool call that produced the output. sig { returns(String) } def call_id end @@ -273,6 +296,7 @@ module OpenAI def call_id=(_) end + # A computer screenshot image used with the computer use tool. sig { returns(OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Output) } def output end @@ -284,6 +308,7 @@ module OpenAI def output=(_) end + # The type of the computer tool call output. Always `computer_call_output`. sig { returns(Symbol) } def type end @@ -292,6 +317,8 @@ module OpenAI def type=(_) end + # The safety checks reported by the API that have been acknowledged by the + # developer. sig do returns( T.nilable( @@ -313,6 +340,8 @@ module OpenAI def acknowledged_safety_checks=(_) end + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -352,6 +381,8 @@ module OpenAI end class Output < OpenAI::BaseModel + # Specifies the event type. For a computer screenshot, this property is always set + # to `computer_screenshot`. sig { returns(Symbol) } def type end @@ -360,6 +391,7 @@ module OpenAI def type=(_) end + # The identifier of an uploaded file that contains the screenshot. sig { returns(T.nilable(String)) } def file_id end @@ -368,6 +400,7 @@ module OpenAI def file_id=(_) end + # The URL of the screenshot image. sig { returns(T.nilable(String)) } def image_url end @@ -376,6 +409,7 @@ module OpenAI def image_url=(_) end + # A computer screenshot image used with the computer use tool. sig { params(file_id: String, image_url: String, type: Symbol).returns(T.attached_class) } def self.new(file_id: nil, image_url: nil, type: :computer_screenshot) end @@ -386,6 +420,7 @@ module OpenAI end class AcknowledgedSafetyCheck < OpenAI::BaseModel + # The ID of the pending safety check. sig { returns(String) } def id end @@ -394,6 +429,7 @@ module OpenAI def id=(_) end + # The type of the pending safety check. sig { returns(String) } def code end @@ -402,6 +438,7 @@ module OpenAI def code=(_) end + # Details about the pending safety check. sig { returns(String) } def message end @@ -410,6 +447,7 @@ module OpenAI def message=(_) end + # A pending safety check for the computer call. sig { params(id: String, code: String, message: String).returns(T.attached_class) } def self.new(id:, code:, message:) end @@ -419,22 +457,21 @@ module OpenAI end end + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end class FunctionCallOutput < OpenAI::BaseModel + # The unique ID of the function call tool output. sig { returns(String) } def id end @@ -443,6 +480,7 @@ module OpenAI def id=(_) end + # The unique ID of the function tool call generated by the model. sig { returns(String) } def call_id end @@ -451,6 +489,7 @@ module OpenAI def call_id=(_) end + # A JSON string of the output of the function tool call. sig { returns(String) } def output end @@ -459,6 +498,7 @@ module OpenAI def output=(_) end + # The type of the function tool call output. Always `function_call_output`. sig { returns(Symbol) } def type end @@ -467,6 +507,8 @@ module OpenAI def type=(_) end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -493,29 +535,16 @@ module OpenAI def to_hash end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseItemList::Data::Message], [Symbol, OpenAI::Models::Responses::ResponseOutputMessage], [Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall], [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput], [Symbol, OpenAI::Models::Responses::ResponseFunctionWebSearch], [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCall], [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput]] - ) - end - private def variants end end end diff --git a/rbi/lib/openai/models/responses/response_output_audio.rbi b/rbi/lib/openai/models/responses/response_output_audio.rbi index 1aed1ddd..162e5138 100644 --- a/rbi/lib/openai/models/responses/response_output_audio.rbi +++ b/rbi/lib/openai/models/responses/response_output_audio.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseOutputAudio < OpenAI::BaseModel + # Base64-encoded audio data from the model. sig { returns(String) } def data end @@ -12,6 +13,7 @@ module OpenAI def data=(_) end + # The transcript of the audio data from the model. sig { returns(String) } def transcript end @@ -20,6 +22,7 @@ module OpenAI def transcript=(_) end + # The type of the output audio. Always `output_audio`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # An audio output from the model. sig { params(data: String, transcript: String, type: Symbol).returns(T.attached_class) } def self.new(data:, transcript:, type: :output_audio) end diff --git a/rbi/lib/openai/models/responses/response_output_item.rbi b/rbi/lib/openai/models/responses/response_output_item.rbi index 8a2bf039..75949da7 100644 --- a/rbi/lib/openai/models/responses/response_output_item.rbi +++ b/rbi/lib/openai/models/responses/response_output_item.rbi @@ -3,18 +3,21 @@ module OpenAI module Models module Responses + # An output message from the model. class ResponseOutputItem < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseOutputMessage], [Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall], [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCall], [Symbol, OpenAI::Models::Responses::ResponseFunctionWebSearch], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall], [Symbol, OpenAI::Models::Responses::ResponseReasoningItem]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Responses::ResponseOutputMessage, + OpenAI::Models::Responses::ResponseFileSearchToolCall, + OpenAI::Models::Responses::ResponseFunctionToolCall, + OpenAI::Models::Responses::ResponseFunctionWebSearch, + OpenAI::Models::Responses::ResponseComputerToolCall, + OpenAI::Models::Responses::ResponseReasoningItem + ) + } end end end diff --git a/rbi/lib/openai/models/responses/response_output_item_added_event.rbi b/rbi/lib/openai/models/responses/response_output_item_added_event.rbi index 837047df..0e49a206 100644 --- a/rbi/lib/openai/models/responses/response_output_item_added_event.rbi +++ b/rbi/lib/openai/models/responses/response_output_item_added_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseOutputItemAddedEvent < OpenAI::BaseModel + # The output item that was added. sig do returns( T.any( @@ -44,6 +45,7 @@ module OpenAI def item=(_) end + # The index of the output item that was added. sig { returns(Integer) } def output_index end @@ -52,6 +54,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.output_item.added`. sig { returns(Symbol) } def type end @@ -60,6 +63,7 @@ module OpenAI def type=(_) end + # Emitted when a new output item is added. sig do params( item: T.any( diff --git a/rbi/lib/openai/models/responses/response_output_item_done_event.rbi b/rbi/lib/openai/models/responses/response_output_item_done_event.rbi index 75961890..e53adef5 100644 --- a/rbi/lib/openai/models/responses/response_output_item_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_output_item_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseOutputItemDoneEvent < OpenAI::BaseModel + # The output item that was marked done. sig do returns( T.any( @@ -44,6 +45,7 @@ module OpenAI def item=(_) end + # The index of the output item that was marked done. sig { returns(Integer) } def output_index end @@ -52,6 +54,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.output_item.done`. sig { returns(Symbol) } def type end @@ -60,6 +63,7 @@ module OpenAI def type=(_) end + # Emitted when an output item is marked done. sig do params( item: T.any( diff --git a/rbi/lib/openai/models/responses/response_output_message.rbi b/rbi/lib/openai/models/responses/response_output_message.rbi index d2cbf773..7fb7026c 100644 --- a/rbi/lib/openai/models/responses/response_output_message.rbi +++ b/rbi/lib/openai/models/responses/response_output_message.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseOutputMessage < OpenAI::BaseModel + # The unique ID of the output message. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # The content of the output message. sig do returns( T::Array[T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)] @@ -31,6 +33,7 @@ module OpenAI def content=(_) end + # The role of the output message. Always `assistant`. sig { returns(Symbol) } def role end @@ -39,6 +42,8 @@ module OpenAI def role=(_) end + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. sig { returns(Symbol) } def status end @@ -47,6 +52,7 @@ module OpenAI def status=(_) end + # The type of the output message. Always `message`. sig { returns(Symbol) } def type end @@ -55,6 +61,7 @@ module OpenAI def type=(_) end + # An output message from the model. sig do params( id: String, @@ -83,33 +90,27 @@ module OpenAI def to_hash end + # A text output from the model. class Content < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseOutputText], [Symbol, OpenAI::Models::Responses::ResponseOutputRefusal]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal) + } end end + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/response_output_refusal.rbi b/rbi/lib/openai/models/responses/response_output_refusal.rbi index f573eb83..1db3c101 100644 --- a/rbi/lib/openai/models/responses/response_output_refusal.rbi +++ b/rbi/lib/openai/models/responses/response_output_refusal.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseOutputRefusal < OpenAI::BaseModel + # The refusal explanationfrom the model. sig { returns(String) } def refusal end @@ -12,6 +13,7 @@ module OpenAI def refusal=(_) end + # The type of the refusal. Always `refusal`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # A refusal from the model. sig { params(refusal: String, type: Symbol).returns(T.attached_class) } def self.new(refusal:, type: :refusal) end diff --git a/rbi/lib/openai/models/responses/response_output_text.rbi b/rbi/lib/openai/models/responses/response_output_text.rbi index 7f9e510e..e04d3988 100644 --- a/rbi/lib/openai/models/responses/response_output_text.rbi +++ b/rbi/lib/openai/models/responses/response_output_text.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseOutputText < OpenAI::BaseModel + # The annotations of the text output. sig do returns( T::Array[ @@ -41,6 +42,7 @@ module OpenAI def annotations=(_) end + # The text output from the model. sig { returns(String) } def text end @@ -49,6 +51,7 @@ module OpenAI def text=(_) end + # The type of the output text. Always `output_text`. sig { returns(Symbol) } def type end @@ -57,6 +60,7 @@ module OpenAI def type=(_) end + # A text output from the model. sig do params( annotations: T::Array[ @@ -93,10 +97,22 @@ module OpenAI def to_hash end + # A citation to a file. class Annotation < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, + OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, + OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath + ) + } + end + class FileCitation < OpenAI::BaseModel + # The ID of the file. sig { returns(String) } def file_id end @@ -105,6 +121,7 @@ module OpenAI def file_id=(_) end + # The index of the file in the list of files. sig { returns(Integer) } def index end @@ -113,6 +130,7 @@ module OpenAI def index=(_) end + # The type of the file citation. Always `file_citation`. sig { returns(Symbol) } def type end @@ -121,6 +139,7 @@ module OpenAI def type=(_) end + # A citation to a file. sig { params(file_id: String, index: Integer, type: Symbol).returns(T.attached_class) } def self.new(file_id:, index:, type: :file_citation) end @@ -131,6 +150,7 @@ module OpenAI end class URLCitation < OpenAI::BaseModel + # The index of the last character of the URL citation in the message. sig { returns(Integer) } def end_index end @@ -139,6 +159,7 @@ module OpenAI def end_index=(_) end + # The index of the first character of the URL citation in the message. sig { returns(Integer) } def start_index end @@ -147,6 +168,7 @@ module OpenAI def start_index=(_) end + # The title of the web resource. sig { returns(String) } def title end @@ -155,6 +177,7 @@ module OpenAI def title=(_) end + # The type of the URL citation. Always `url_citation`. sig { returns(Symbol) } def type end @@ -163,6 +186,7 @@ module OpenAI def type=(_) end + # The URL of the web resource. sig { returns(String) } def url end @@ -171,6 +195,7 @@ module OpenAI def url=(_) end + # A citation for a web resource used to generate a model response. sig do params(end_index: Integer, start_index: Integer, title: String, url: String, type: Symbol) .returns(T.attached_class) @@ -194,6 +219,7 @@ module OpenAI end class FilePath < OpenAI::BaseModel + # The ID of the file. sig { returns(String) } def file_id end @@ -202,6 +228,7 @@ module OpenAI def file_id=(_) end + # The index of the file in the list of files. sig { returns(Integer) } def index end @@ -210,6 +237,7 @@ module OpenAI def index=(_) end + # The type of the file path. Always `file_path`. sig { returns(Symbol) } def type end @@ -218,6 +246,7 @@ module OpenAI def type=(_) end + # A path to a file. sig { params(file_id: String, index: Integer, type: Symbol).returns(T.attached_class) } def self.new(file_id:, index:, type: :file_path) end @@ -226,17 +255,6 @@ module OpenAI def to_hash end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation], [Symbol, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation], [Symbol, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath]] - ) - end - private def variants - end - end end end end diff --git a/rbi/lib/openai/models/responses/response_reasoning_item.rbi b/rbi/lib/openai/models/responses/response_reasoning_item.rbi index 4733fee4..66b69b58 100644 --- a/rbi/lib/openai/models/responses/response_reasoning_item.rbi +++ b/rbi/lib/openai/models/responses/response_reasoning_item.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseReasoningItem < OpenAI::BaseModel + # The unique identifier of the reasoning content. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # Reasoning text contents. sig { returns(T::Array[OpenAI::Models::Responses::ResponseReasoningItem::Summary]) } def summary end @@ -23,6 +25,7 @@ module OpenAI def summary=(_) end + # The type of the object. Always `reasoning`. sig { returns(Symbol) } def type end @@ -31,6 +34,8 @@ module OpenAI def type=(_) end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -39,6 +44,8 @@ module OpenAI def status=(_) end + # A description of the chain of thought used by a reasoning model while generating + # a response. sig do params( id: String, @@ -66,6 +73,7 @@ module OpenAI end class Summary < OpenAI::BaseModel + # A short summary of the reasoning used by the model when generating the response. sig { returns(String) } def text end @@ -74,6 +82,7 @@ module OpenAI def text=(_) end + # The type of the object. Always `summary_text`. sig { returns(Symbol) } def type end @@ -91,18 +100,16 @@ module OpenAI end end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/response_refusal_delta_event.rbi b/rbi/lib/openai/models/responses/response_refusal_delta_event.rbi index 7517aba1..f4a53f02 100644 --- a/rbi/lib/openai/models/responses/response_refusal_delta_event.rbi +++ b/rbi/lib/openai/models/responses/response_refusal_delta_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseRefusalDeltaEvent < OpenAI::BaseModel + # The index of the content part that the refusal text is added to. sig { returns(Integer) } def content_index end @@ -12,6 +13,7 @@ module OpenAI def content_index=(_) end + # The refusal text that is added. sig { returns(String) } def delta end @@ -20,6 +22,7 @@ module OpenAI def delta=(_) end + # The ID of the output item that the refusal text is added to. sig { returns(String) } def item_id end @@ -28,6 +31,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the refusal text is added to. sig { returns(Integer) } def output_index end @@ -36,6 +40,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.refusal.delta`. sig { returns(Symbol) } def type end @@ -44,6 +49,7 @@ module OpenAI def type=(_) end + # Emitted when there is a partial refusal text. sig do params(content_index: Integer, delta: String, item_id: String, output_index: Integer, type: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/responses/response_refusal_done_event.rbi b/rbi/lib/openai/models/responses/response_refusal_done_event.rbi index 560a27ef..1e4bf80b 100644 --- a/rbi/lib/openai/models/responses/response_refusal_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_refusal_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseRefusalDoneEvent < OpenAI::BaseModel + # The index of the content part that the refusal text is finalized. sig { returns(Integer) } def content_index end @@ -12,6 +13,7 @@ module OpenAI def content_index=(_) end + # The ID of the output item that the refusal text is finalized. sig { returns(String) } def item_id end @@ -20,6 +22,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the refusal text is finalized. sig { returns(Integer) } def output_index end @@ -28,6 +31,7 @@ module OpenAI def output_index=(_) end + # The refusal text that is finalized. sig { returns(String) } def refusal end @@ -36,6 +40,7 @@ module OpenAI def refusal=(_) end + # The type of the event. Always `response.refusal.done`. sig { returns(Symbol) } def type end @@ -44,6 +49,7 @@ module OpenAI def type=(_) end + # Emitted when refusal text is finalized. sig do params( content_index: Integer, diff --git a/rbi/lib/openai/models/responses/response_retrieve_params.rbi b/rbi/lib/openai/models/responses/response_retrieve_params.rbi index 229f8fbb..8e817f0d 100644 --- a/rbi/lib/openai/models/responses/response_retrieve_params.rbi +++ b/rbi/lib/openai/models/responses/response_retrieve_params.rbi @@ -7,6 +7,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Additional fields to include in the response. See the `include` parameter for + # Response creation above for more information. sig { returns(T.nilable(T::Array[Symbol])) } def include end diff --git a/rbi/lib/openai/models/responses/response_status.rbi b/rbi/lib/openai/models/responses/response_status.rbi index 11ae0a2a..f6a3f6ce 100644 --- a/rbi/lib/openai/models/responses/response_status.rbi +++ b/rbi/lib/openai/models/responses/response_status.rbi @@ -3,19 +3,17 @@ module OpenAI module Models module Responses + # The status of the response generation. One of `completed`, `failed`, + # `in_progress`, or `incomplete`. class ResponseStatus < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + COMPLETED = :completed FAILED = :failed IN_PROGRESS = :in_progress INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/response_stream_event.rbi b/rbi/lib/openai/models/responses/response_stream_event.rbi index a0f3b82c..0b1bb12f 100644 --- a/rbi/lib/openai/models/responses/response_stream_event.rbi +++ b/rbi/lib/openai/models/responses/response_stream_event.rbi @@ -3,18 +3,47 @@ module OpenAI module Models module Responses + # Emitted when there is a partial audio response. class ResponseStreamEvent < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseAudioDeltaEvent], [Symbol, OpenAI::Models::Responses::ResponseAudioDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent], [Symbol, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent], [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent], [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent], [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent], [Symbol, OpenAI::Models::Responses::ResponseCompletedEvent], [Symbol, OpenAI::Models::Responses::ResponseContentPartAddedEvent], [Symbol, OpenAI::Models::Responses::ResponseContentPartDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseCreatedEvent], [Symbol, OpenAI::Models::Responses::ResponseErrorEvent], [Symbol, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent], [Symbol, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent], [Symbol, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent], [Symbol, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent], [Symbol, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseInProgressEvent], [Symbol, OpenAI::Models::Responses::ResponseFailedEvent], [Symbol, OpenAI::Models::Responses::ResponseIncompleteEvent], [Symbol, OpenAI::Models::Responses::ResponseOutputItemAddedEvent], [Symbol, OpenAI::Models::Responses::ResponseOutputItemDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseRefusalDeltaEvent], [Symbol, OpenAI::Models::Responses::ResponseRefusalDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent], [Symbol, OpenAI::Models::Responses::ResponseTextDeltaEvent], [Symbol, OpenAI::Models::Responses::ResponseTextDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent], [Symbol, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent], [Symbol, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Responses::ResponseAudioDeltaEvent, + OpenAI::Models::Responses::ResponseAudioDoneEvent, + OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, + OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, + OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, + OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, + OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, + OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, + OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, + OpenAI::Models::Responses::ResponseCompletedEvent, + OpenAI::Models::Responses::ResponseContentPartAddedEvent, + OpenAI::Models::Responses::ResponseContentPartDoneEvent, + OpenAI::Models::Responses::ResponseCreatedEvent, + OpenAI::Models::Responses::ResponseErrorEvent, + OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, + OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, + OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, + OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, + OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, + OpenAI::Models::Responses::ResponseInProgressEvent, + OpenAI::Models::Responses::ResponseFailedEvent, + OpenAI::Models::Responses::ResponseIncompleteEvent, + OpenAI::Models::Responses::ResponseOutputItemAddedEvent, + OpenAI::Models::Responses::ResponseOutputItemDoneEvent, + OpenAI::Models::Responses::ResponseRefusalDeltaEvent, + OpenAI::Models::Responses::ResponseRefusalDoneEvent, + OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent, + OpenAI::Models::Responses::ResponseTextDeltaEvent, + OpenAI::Models::Responses::ResponseTextDoneEvent, + OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, + OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, + OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent + ) + } end end end diff --git a/rbi/lib/openai/models/responses/response_text_annotation_delta_event.rbi b/rbi/lib/openai/models/responses/response_text_annotation_delta_event.rbi index 2d60b64e..00dfd4c2 100644 --- a/rbi/lib/openai/models/responses/response_text_annotation_delta_event.rbi +++ b/rbi/lib/openai/models/responses/response_text_annotation_delta_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseTextAnnotationDeltaEvent < OpenAI::BaseModel + # A citation to a file. sig do returns( T.any( @@ -35,6 +36,7 @@ module OpenAI def annotation=(_) end + # The index of the annotation that was added. sig { returns(Integer) } def annotation_index end @@ -43,6 +45,7 @@ module OpenAI def annotation_index=(_) end + # The index of the content part that the text annotation was added to. sig { returns(Integer) } def content_index end @@ -51,6 +54,7 @@ module OpenAI def content_index=(_) end + # The ID of the output item that the text annotation was added to. sig { returns(String) } def item_id end @@ -59,6 +63,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the text annotation was added to. sig { returns(Integer) } def output_index end @@ -67,6 +72,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.output_text.annotation.added`. sig { returns(Symbol) } def type end @@ -75,6 +81,7 @@ module OpenAI def type=(_) end + # Emitted when a text annotation is added. sig do params( annotation: T.any( @@ -120,10 +127,22 @@ module OpenAI def to_hash end + # A citation to a file. class Annotation < OpenAI::Union abstract! + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation, + OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation, + OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath + ) + } + end + class FileCitation < OpenAI::BaseModel + # The ID of the file. sig { returns(String) } def file_id end @@ -132,6 +151,7 @@ module OpenAI def file_id=(_) end + # The index of the file in the list of files. sig { returns(Integer) } def index end @@ -140,6 +160,7 @@ module OpenAI def index=(_) end + # The type of the file citation. Always `file_citation`. sig { returns(Symbol) } def type end @@ -148,6 +169,7 @@ module OpenAI def type=(_) end + # A citation to a file. sig { params(file_id: String, index: Integer, type: Symbol).returns(T.attached_class) } def self.new(file_id:, index:, type: :file_citation) end @@ -158,6 +180,7 @@ module OpenAI end class URLCitation < OpenAI::BaseModel + # The index of the last character of the URL citation in the message. sig { returns(Integer) } def end_index end @@ -166,6 +189,7 @@ module OpenAI def end_index=(_) end + # The index of the first character of the URL citation in the message. sig { returns(Integer) } def start_index end @@ -174,6 +198,7 @@ module OpenAI def start_index=(_) end + # The title of the web resource. sig { returns(String) } def title end @@ -182,6 +207,7 @@ module OpenAI def title=(_) end + # The type of the URL citation. Always `url_citation`. sig { returns(Symbol) } def type end @@ -190,6 +216,7 @@ module OpenAI def type=(_) end + # The URL of the web resource. sig { returns(String) } def url end @@ -198,6 +225,7 @@ module OpenAI def url=(_) end + # A citation for a web resource used to generate a model response. sig do params(end_index: Integer, start_index: Integer, title: String, url: String, type: Symbol) .returns(T.attached_class) @@ -221,6 +249,7 @@ module OpenAI end class FilePath < OpenAI::BaseModel + # The ID of the file. sig { returns(String) } def file_id end @@ -229,6 +258,7 @@ module OpenAI def file_id=(_) end + # The index of the file in the list of files. sig { returns(Integer) } def index end @@ -237,6 +267,7 @@ module OpenAI def index=(_) end + # The type of the file path. Always `file_path`. sig { returns(Symbol) } def type end @@ -245,6 +276,7 @@ module OpenAI def type=(_) end + # A path to a file. sig { params(file_id: String, index: Integer, type: Symbol).returns(T.attached_class) } def self.new(file_id:, index:, type: :file_path) end @@ -253,17 +285,6 @@ module OpenAI def to_hash end end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation], [Symbol, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation], [Symbol, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath]] - ) - end - private def variants - end - end end end end diff --git a/rbi/lib/openai/models/responses/response_text_config.rbi b/rbi/lib/openai/models/responses/response_text_config.rbi index a04a35f9..2287e496 100644 --- a/rbi/lib/openai/models/responses/response_text_config.rbi +++ b/rbi/lib/openai/models/responses/response_text_config.rbi @@ -4,6 +4,19 @@ module OpenAI module Models module Responses class ResponseTextConfig < OpenAI::BaseModel + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. sig do returns( T.nilable( @@ -37,6 +50,11 @@ module OpenAI def format_=(_) end + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) sig do params( format_: T.any( diff --git a/rbi/lib/openai/models/responses/response_text_delta_event.rbi b/rbi/lib/openai/models/responses/response_text_delta_event.rbi index acc2fa04..50307018 100644 --- a/rbi/lib/openai/models/responses/response_text_delta_event.rbi +++ b/rbi/lib/openai/models/responses/response_text_delta_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseTextDeltaEvent < OpenAI::BaseModel + # The index of the content part that the text delta was added to. sig { returns(Integer) } def content_index end @@ -12,6 +13,7 @@ module OpenAI def content_index=(_) end + # The text delta that was added. sig { returns(String) } def delta end @@ -20,6 +22,7 @@ module OpenAI def delta=(_) end + # The ID of the output item that the text delta was added to. sig { returns(String) } def item_id end @@ -28,6 +31,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the text delta was added to. sig { returns(Integer) } def output_index end @@ -36,6 +40,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.output_text.delta`. sig { returns(Symbol) } def type end @@ -44,6 +49,7 @@ module OpenAI def type=(_) end + # Emitted when there is an additional text delta. sig do params(content_index: Integer, delta: String, item_id: String, output_index: Integer, type: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/responses/response_text_done_event.rbi b/rbi/lib/openai/models/responses/response_text_done_event.rbi index 8b2ece95..56e6ddd7 100644 --- a/rbi/lib/openai/models/responses/response_text_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_text_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseTextDoneEvent < OpenAI::BaseModel + # The index of the content part that the text content is finalized. sig { returns(Integer) } def content_index end @@ -12,6 +13,7 @@ module OpenAI def content_index=(_) end + # The ID of the output item that the text content is finalized. sig { returns(String) } def item_id end @@ -20,6 +22,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the text content is finalized. sig { returns(Integer) } def output_index end @@ -28,6 +31,7 @@ module OpenAI def output_index=(_) end + # The text content that is finalized. sig { returns(String) } def text end @@ -36,6 +40,7 @@ module OpenAI def text=(_) end + # The type of the event. Always `response.output_text.done`. sig { returns(Symbol) } def type end @@ -44,6 +49,7 @@ module OpenAI def type=(_) end + # Emitted when text content is finalized. sig do params(content_index: Integer, item_id: String, output_index: Integer, text: String, type: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/responses/response_usage.rbi b/rbi/lib/openai/models/responses/response_usage.rbi index 87f7c238..b46f45aa 100644 --- a/rbi/lib/openai/models/responses/response_usage.rbi +++ b/rbi/lib/openai/models/responses/response_usage.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseUsage < OpenAI::BaseModel + # The number of input tokens. sig { returns(Integer) } def input_tokens end @@ -12,6 +13,7 @@ module OpenAI def input_tokens=(_) end + # The number of output tokens. sig { returns(Integer) } def output_tokens end @@ -20,6 +22,7 @@ module OpenAI def output_tokens=(_) end + # A detailed breakdown of the output tokens. sig { returns(OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails) } def output_tokens_details end @@ -31,6 +34,7 @@ module OpenAI def output_tokens_details=(_) end + # The total number of tokens used. sig { returns(Integer) } def total_tokens end @@ -39,6 +43,8 @@ module OpenAI def total_tokens=(_) end + # Represents token usage details including input tokens, output tokens, a + # breakdown of output tokens, and the total tokens used. sig do params( input_tokens: Integer, @@ -66,6 +72,7 @@ module OpenAI end class OutputTokensDetails < OpenAI::BaseModel + # The number of reasoning tokens. sig { returns(Integer) } def reasoning_tokens end @@ -74,6 +81,7 @@ module OpenAI def reasoning_tokens=(_) end + # A detailed breakdown of the output tokens. sig { params(reasoning_tokens: Integer).returns(T.attached_class) } def self.new(reasoning_tokens:) end diff --git a/rbi/lib/openai/models/responses/response_web_search_call_completed_event.rbi b/rbi/lib/openai/models/responses/response_web_search_call_completed_event.rbi index 16bde4c4..1348fded 100644 --- a/rbi/lib/openai/models/responses/response_web_search_call_completed_event.rbi +++ b/rbi/lib/openai/models/responses/response_web_search_call_completed_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseWebSearchCallCompletedEvent < OpenAI::BaseModel + # Unique ID for the output item associated with the web search call. sig { returns(String) } def item_id end @@ -12,6 +13,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the web search call is associated with. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.web_search_call.completed`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when a web search call is completed. sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(item_id:, output_index:, type: :"response.web_search_call.completed") end diff --git a/rbi/lib/openai/models/responses/response_web_search_call_in_progress_event.rbi b/rbi/lib/openai/models/responses/response_web_search_call_in_progress_event.rbi index 654b7293..891725d6 100644 --- a/rbi/lib/openai/models/responses/response_web_search_call_in_progress_event.rbi +++ b/rbi/lib/openai/models/responses/response_web_search_call_in_progress_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseWebSearchCallInProgressEvent < OpenAI::BaseModel + # Unique ID for the output item associated with the web search call. sig { returns(String) } def item_id end @@ -12,6 +13,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the web search call is associated with. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.web_search_call.in_progress`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when a web search call is initiated. sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(item_id:, output_index:, type: :"response.web_search_call.in_progress") end diff --git a/rbi/lib/openai/models/responses/response_web_search_call_searching_event.rbi b/rbi/lib/openai/models/responses/response_web_search_call_searching_event.rbi index c0de9efa..a2f0a421 100644 --- a/rbi/lib/openai/models/responses/response_web_search_call_searching_event.rbi +++ b/rbi/lib/openai/models/responses/response_web_search_call_searching_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseWebSearchCallSearchingEvent < OpenAI::BaseModel + # Unique ID for the output item associated with the web search call. sig { returns(String) } def item_id end @@ -12,6 +13,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the web search call is associated with. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.web_search_call.searching`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when a web search call is executing. sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(item_id:, output_index:, type: :"response.web_search_call.searching") end diff --git a/rbi/lib/openai/models/responses/tool.rbi b/rbi/lib/openai/models/responses/tool.rbi index 00e4ecf8..e1477b05 100644 --- a/rbi/lib/openai/models/responses/tool.rbi +++ b/rbi/lib/openai/models/responses/tool.rbi @@ -3,18 +3,21 @@ module OpenAI module Models module Responses + # A tool that searches for relevant content from uploaded files. Learn more about + # the + # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). class Tool < OpenAI::Union abstract! - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::FileSearchTool], [Symbol, OpenAI::Models::Responses::FunctionTool], [Symbol, OpenAI::Models::Responses::ComputerTool], [NilClass, OpenAI::Models::Responses::WebSearchTool]] - ) - end - private def variants - end + Variants = type_template(:out) do + { + fixed: T.any( + OpenAI::Models::Responses::FileSearchTool, + OpenAI::Models::Responses::FunctionTool, + OpenAI::Models::Responses::ComputerTool, + OpenAI::Models::Responses::WebSearchTool + ) + } end end end diff --git a/rbi/lib/openai/models/responses/tool_choice_function.rbi b/rbi/lib/openai/models/responses/tool_choice_function.rbi index c11c91a4..a8afd2d4 100644 --- a/rbi/lib/openai/models/responses/tool_choice_function.rbi +++ b/rbi/lib/openai/models/responses/tool_choice_function.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ToolChoiceFunction < OpenAI::BaseModel + # The name of the function to call. sig { returns(String) } def name end @@ -12,6 +13,7 @@ module OpenAI def name=(_) end + # For function calling, the type is always `function`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # Use this option to force the model to call a specific function. sig { params(name: String, type: Symbol).returns(T.attached_class) } def self.new(name:, type: :function) end diff --git a/rbi/lib/openai/models/responses/tool_choice_options.rbi b/rbi/lib/openai/models/responses/tool_choice_options.rbi index 7e1f9984..c047abb7 100644 --- a/rbi/lib/openai/models/responses/tool_choice_options.rbi +++ b/rbi/lib/openai/models/responses/tool_choice_options.rbi @@ -3,18 +3,22 @@ module OpenAI module Models module Responses + # Controls which (if any) tool is called by the model. + # + # `none` means the model will not call any tool and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling one or + # more tools. + # + # `required` means the model must call one or more tools. class ToolChoiceOptions < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + NONE = :none AUTO = :auto REQUIRED = :required - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/tool_choice_types.rbi b/rbi/lib/openai/models/responses/tool_choice_types.rbi index 8176455f..8459293b 100644 --- a/rbi/lib/openai/models/responses/tool_choice_types.rbi +++ b/rbi/lib/openai/models/responses/tool_choice_types.rbi @@ -4,6 +4,14 @@ module OpenAI module Models module Responses class ToolChoiceTypes < OpenAI::BaseModel + # The type of hosted tool the model should to use. Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # + # Allowed values are: + # + # - `file_search` + # - `web_search_preview` + # - `computer_use_preview` sig { returns(Symbol) } def type end @@ -12,6 +20,8 @@ module OpenAI def type=(_) end + # Indicates that the model should use a built-in tool to generate a response. + # [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). sig { params(type: Symbol).returns(T.attached_class) } def self.new(type:) end @@ -20,19 +30,23 @@ module OpenAI def to_hash end + # The type of hosted tool the model should to use. Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # + # Allowed values are: + # + # - `file_search` + # - `web_search_preview` + # - `computer_use_preview` class Type < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + FILE_SEARCH = :file_search WEB_SEARCH_PREVIEW = :web_search_preview COMPUTER_USE_PREVIEW = :computer_use_preview WEB_SEARCH_PREVIEW_2025_03_11 = :web_search_preview_2025_03_11 - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/responses/web_search_tool.rbi b/rbi/lib/openai/models/responses/web_search_tool.rbi index be37fbf0..cf2fb2f6 100644 --- a/rbi/lib/openai/models/responses/web_search_tool.rbi +++ b/rbi/lib/openai/models/responses/web_search_tool.rbi @@ -4,6 +4,10 @@ module OpenAI module Models module Responses class WebSearchTool < OpenAI::BaseModel + # The type of the web search tool. One of: + # + # - `web_search_preview` + # - `web_search_preview_2025_03_11` sig { returns(Symbol) } def type end @@ -12,6 +16,8 @@ module OpenAI def type=(_) end + # High level guidance for the amount of context window space to use for the + # search. One of `low`, `medium`, or `high`. `medium` is the default. sig { returns(T.nilable(Symbol)) } def search_context_size end @@ -31,6 +37,9 @@ module OpenAI def user_location=(_) end + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search). sig do params( type: Symbol, @@ -55,34 +64,33 @@ module OpenAI def to_hash end + # The type of the web search tool. One of: + # + # - `web_search_preview` + # - `web_search_preview_2025_03_11` class Type < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + WEB_SEARCH_PREVIEW = :web_search_preview WEB_SEARCH_PREVIEW_2025_03_11 = :web_search_preview_2025_03_11 - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # High level guidance for the amount of context window space to use for the + # search. One of `low`, `medium`, or `high`. `medium` is the default. class SearchContextSize < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + LOW = :low MEDIUM = :medium HIGH = :high - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class UserLocation < OpenAI::BaseModel + # The type of location approximation. Always `approximate`. sig { returns(Symbol) } def type end @@ -91,6 +99,7 @@ module OpenAI def type=(_) end + # Free text input for the city of the user, e.g. `San Francisco`. sig { returns(T.nilable(String)) } def city end @@ -99,6 +108,8 @@ module OpenAI def city=(_) end + # The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + # the user, e.g. `US`. sig { returns(T.nilable(String)) } def country end @@ -107,6 +118,7 @@ module OpenAI def country=(_) end + # Free text input for the region of the user, e.g. `California`. sig { returns(T.nilable(String)) } def region end @@ -115,6 +127,8 @@ module OpenAI def region=(_) end + # The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + # user, e.g. `America/Los_Angeles`. sig { returns(T.nilable(String)) } def timezone end diff --git a/rbi/lib/openai/models/static_file_chunking_strategy.rbi b/rbi/lib/openai/models/static_file_chunking_strategy.rbi index b076e093..1d8d219c 100644 --- a/rbi/lib/openai/models/static_file_chunking_strategy.rbi +++ b/rbi/lib/openai/models/static_file_chunking_strategy.rbi @@ -3,6 +3,9 @@ module OpenAI module Models class StaticFileChunkingStrategy < OpenAI::BaseModel + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. sig { returns(Integer) } def chunk_overlap_tokens end @@ -11,6 +14,8 @@ module OpenAI def chunk_overlap_tokens=(_) end + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. sig { returns(Integer) } def max_chunk_size_tokens end diff --git a/rbi/lib/openai/models/static_file_chunking_strategy_object.rbi b/rbi/lib/openai/models/static_file_chunking_strategy_object.rbi index 94e5e78c..99645473 100644 --- a/rbi/lib/openai/models/static_file_chunking_strategy_object.rbi +++ b/rbi/lib/openai/models/static_file_chunking_strategy_object.rbi @@ -11,6 +11,7 @@ module OpenAI def static=(_) end + # Always `static`. sig { returns(Symbol) } def type end diff --git a/rbi/lib/openai/models/static_file_chunking_strategy_object_param.rbi b/rbi/lib/openai/models/static_file_chunking_strategy_object_param.rbi index 3eed65cb..a7ed94e1 100644 --- a/rbi/lib/openai/models/static_file_chunking_strategy_object_param.rbi +++ b/rbi/lib/openai/models/static_file_chunking_strategy_object_param.rbi @@ -11,6 +11,7 @@ module OpenAI def static=(_) end + # Always `static`. sig { returns(Symbol) } def type end @@ -19,6 +20,7 @@ module OpenAI def type=(_) end + # Customize your own chunking strategy by setting chunk size and chunk overlap. sig { params(static: OpenAI::Models::StaticFileChunkingStrategy, type: Symbol).returns(T.attached_class) } def self.new(static:, type: :static) end diff --git a/rbi/lib/openai/models/upload.rbi b/rbi/lib/openai/models/upload.rbi index 422b80e9..1da23f04 100644 --- a/rbi/lib/openai/models/upload.rbi +++ b/rbi/lib/openai/models/upload.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class Upload < OpenAI::BaseModel + # The Upload unique identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -11,6 +12,7 @@ module OpenAI def id=(_) end + # The intended number of bytes to be uploaded. sig { returns(Integer) } def bytes end @@ -19,6 +21,7 @@ module OpenAI def bytes=(_) end + # The Unix timestamp (in seconds) for when the Upload was created. sig { returns(Integer) } def created_at end @@ -27,6 +30,7 @@ module OpenAI def created_at=(_) end + # The Unix timestamp (in seconds) for when the Upload will expire. sig { returns(Integer) } def expires_at end @@ -35,6 +39,7 @@ module OpenAI def expires_at=(_) end + # The name of the file to be uploaded. sig { returns(String) } def filename end @@ -43,6 +48,7 @@ module OpenAI def filename=(_) end + # The object type, which is always "upload". sig { returns(Symbol) } def object end @@ -51,6 +57,9 @@ module OpenAI def object=(_) end + # The intended purpose of the file. + # [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose) + # for acceptable values. sig { returns(String) } def purpose end @@ -59,6 +68,7 @@ module OpenAI def purpose=(_) end + # The status of the Upload. sig { returns(Symbol) } def status end @@ -67,6 +77,7 @@ module OpenAI def status=(_) end + # The `File` object represents a document that has been uploaded to OpenAI. sig { returns(T.nilable(OpenAI::Models::FileObject)) } def file end @@ -75,6 +86,7 @@ module OpenAI def file=(_) end + # The Upload object can accept byte chunks in the form of Parts. sig do params( id: String, @@ -111,19 +123,16 @@ module OpenAI def to_hash end + # The status of the Upload. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + PENDING = :pending COMPLETED = :completed CANCELLED = :cancelled EXPIRED = :expired - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/upload_complete_params.rbi b/rbi/lib/openai/models/upload_complete_params.rbi index 840707e6..f8550617 100644 --- a/rbi/lib/openai/models/upload_complete_params.rbi +++ b/rbi/lib/openai/models/upload_complete_params.rbi @@ -6,6 +6,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The ordered list of Part IDs. sig { returns(T::Array[String]) } def part_ids end @@ -14,6 +15,8 @@ module OpenAI def part_ids=(_) end + # The optional md5 checksum for the file contents to verify if the bytes uploaded + # matches what you expect. sig { returns(T.nilable(String)) } def md5 end diff --git a/rbi/lib/openai/models/upload_create_params.rbi b/rbi/lib/openai/models/upload_create_params.rbi index 6d144a54..22555f0e 100644 --- a/rbi/lib/openai/models/upload_create_params.rbi +++ b/rbi/lib/openai/models/upload_create_params.rbi @@ -6,6 +6,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The number of bytes in the file you are uploading. sig { returns(Integer) } def bytes end @@ -14,6 +15,7 @@ module OpenAI def bytes=(_) end + # The name of the file to upload. sig { returns(String) } def filename end @@ -22,6 +24,10 @@ module OpenAI def filename=(_) end + # The MIME type of the file. + # + # This must fall within the supported MIME types for your file purpose. See the + # supported MIME types for assistants and vision. sig { returns(String) } def mime_type end @@ -30,6 +36,10 @@ module OpenAI def mime_type=(_) end + # The intended purpose of the uploaded file. + # + # See the + # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). sig { returns(Symbol) } def purpose end diff --git a/rbi/lib/openai/models/uploads/part_create_params.rbi b/rbi/lib/openai/models/uploads/part_create_params.rbi index 4e2fe8c7..7ef6052a 100644 --- a/rbi/lib/openai/models/uploads/part_create_params.rbi +++ b/rbi/lib/openai/models/uploads/part_create_params.rbi @@ -7,6 +7,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The chunk of bytes for this Part. sig { returns(T.any(IO, StringIO)) } def data end diff --git a/rbi/lib/openai/models/uploads/upload_part.rbi b/rbi/lib/openai/models/uploads/upload_part.rbi index 6801e859..ae805178 100644 --- a/rbi/lib/openai/models/uploads/upload_part.rbi +++ b/rbi/lib/openai/models/uploads/upload_part.rbi @@ -6,6 +6,7 @@ module OpenAI module Uploads class UploadPart < OpenAI::BaseModel + # The upload Part unique identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the Part was created. sig { returns(Integer) } def created_at end @@ -22,6 +24,7 @@ module OpenAI def created_at=(_) end + # The object type, which is always `upload.part`. sig { returns(Symbol) } def object end @@ -30,6 +33,7 @@ module OpenAI def object=(_) end + # The ID of the Upload object that this Part was added to. sig { returns(String) } def upload_id end @@ -38,6 +42,7 @@ module OpenAI def upload_id=(_) end + # The upload Part represents a chunk of bytes we can add to an Upload object. sig do params(id: String, created_at: Integer, upload_id: String, object: Symbol).returns(T.attached_class) end diff --git a/rbi/lib/openai/models/vector_store.rbi b/rbi/lib/openai/models/vector_store.rbi index e60530c4..642c27bf 100644 --- a/rbi/lib/openai/models/vector_store.rbi +++ b/rbi/lib/openai/models/vector_store.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class VectorStore < OpenAI::BaseModel + # The identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -11,6 +12,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the vector store was created. sig { returns(Integer) } def created_at end @@ -27,6 +29,7 @@ module OpenAI def file_counts=(_) end + # The Unix timestamp (in seconds) for when the vector store was last active. sig { returns(T.nilable(Integer)) } def last_active_at end @@ -35,6 +38,12 @@ module OpenAI def last_active_at=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -43,6 +52,7 @@ module OpenAI def metadata=(_) end + # The name of the vector store. sig { returns(String) } def name end @@ -51,6 +61,7 @@ module OpenAI def name=(_) end + # The object type, which is always `vector_store`. sig { returns(Symbol) } def object end @@ -59,6 +70,9 @@ module OpenAI def object=(_) end + # The status of the vector store, which can be either `expired`, `in_progress`, or + # `completed`. A status of `completed` indicates that the vector store is ready + # for use. sig { returns(Symbol) } def status end @@ -67,6 +81,7 @@ module OpenAI def status=(_) end + # The total number of bytes used by the files in the vector store. sig { returns(Integer) } def usage_bytes end @@ -75,6 +90,7 @@ module OpenAI def usage_bytes=(_) end + # The expiration policy for a vector store. sig { returns(T.nilable(OpenAI::Models::VectorStore::ExpiresAfter)) } def expires_after end @@ -83,6 +99,7 @@ module OpenAI def expires_after=(_) end + # The Unix timestamp (in seconds) for when the vector store will expire. sig { returns(T.nilable(Integer)) } def expires_at end @@ -91,6 +108,8 @@ module OpenAI def expires_at=(_) end + # A vector store is a collection of processed files can be used by the + # `file_search` tool. sig do params( id: String, @@ -144,6 +163,7 @@ module OpenAI end class FileCounts < OpenAI::BaseModel + # The number of files that were cancelled. sig { returns(Integer) } def cancelled end @@ -152,6 +172,7 @@ module OpenAI def cancelled=(_) end + # The number of files that have been successfully processed. sig { returns(Integer) } def completed end @@ -160,6 +181,7 @@ module OpenAI def completed=(_) end + # The number of files that have failed to process. sig { returns(Integer) } def failed end @@ -168,6 +190,7 @@ module OpenAI def failed=(_) end + # The number of files that are currently being processed. sig { returns(Integer) } def in_progress end @@ -176,6 +199,7 @@ module OpenAI def in_progress=(_) end + # The total number of files. sig { returns(Integer) } def total end @@ -211,21 +235,22 @@ module OpenAI end end + # The status of the vector store, which can be either `expired`, `in_progress`, or + # `completed`. A status of `completed` indicates that the vector store is ready + # for use. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + EXPIRED = :expired IN_PROGRESS = :in_progress COMPLETED = :completed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class ExpiresAfter < OpenAI::BaseModel + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `last_active_at`. sig { returns(Symbol) } def anchor end @@ -234,6 +259,7 @@ module OpenAI def anchor=(_) end + # The number of days after the anchor time that the vector store will expire. sig { returns(Integer) } def days end @@ -242,6 +268,7 @@ module OpenAI def days=(_) end + # The expiration policy for a vector store. sig { params(days: Integer, anchor: Symbol).returns(T.attached_class) } def self.new(days:, anchor: :last_active_at) end diff --git a/rbi/lib/openai/models/vector_store_create_params.rbi b/rbi/lib/openai/models/vector_store_create_params.rbi index 4444751d..02ffb8d9 100644 --- a/rbi/lib/openai/models/vector_store_create_params.rbi +++ b/rbi/lib/openai/models/vector_store_create_params.rbi @@ -6,6 +6,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. sig do returns( T.nilable( @@ -36,6 +38,7 @@ module OpenAI def chunking_strategy=(_) end + # The expiration policy for a vector store. sig { returns(T.nilable(OpenAI::Models::VectorStoreCreateParams::ExpiresAfter)) } def expires_after end @@ -47,6 +50,9 @@ module OpenAI def expires_after=(_) end + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -55,6 +61,12 @@ module OpenAI def file_ids=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -63,6 +75,7 @@ module OpenAI def metadata=(_) end + # The name of the vector store. sig { returns(T.nilable(String)) } def name end @@ -108,6 +121,8 @@ module OpenAI end class ExpiresAfter < OpenAI::BaseModel + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `last_active_at`. sig { returns(Symbol) } def anchor end @@ -116,6 +131,7 @@ module OpenAI def anchor=(_) end + # The number of days after the anchor time that the vector store will expire. sig { returns(Integer) } def days end @@ -124,6 +140,7 @@ module OpenAI def days=(_) end + # The expiration policy for a vector store. sig { params(days: Integer, anchor: Symbol).returns(T.attached_class) } def self.new(days:, anchor: :last_active_at) end diff --git a/rbi/lib/openai/models/vector_store_list_params.rbi b/rbi/lib/openai/models/vector_store_list_params.rbi index a0c37415..5749ce8a 100644 --- a/rbi/lib/openai/models/vector_store_list_params.rbi +++ b/rbi/lib/openai/models/vector_store_list_params.rbi @@ -6,6 +6,10 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -14,6 +18,10 @@ module OpenAI def after=(_) end + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } def before end @@ -22,6 +30,8 @@ module OpenAI def before=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -30,6 +40,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -66,17 +78,15 @@ module OpenAI def to_hash end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ASC = :asc DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/vector_store_search_params.rbi b/rbi/lib/openai/models/vector_store_search_params.rbi index 52ea39e4..91a2ec22 100644 --- a/rbi/lib/openai/models/vector_store_search_params.rbi +++ b/rbi/lib/openai/models/vector_store_search_params.rbi @@ -6,6 +6,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A query string for a search sig { returns(T.any(String, T::Array[String])) } def query end @@ -14,6 +15,7 @@ module OpenAI def query=(_) end + # A filter to apply based on file attributes. sig { returns(T.nilable(T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter))) } def filters end @@ -25,6 +27,8 @@ module OpenAI def filters=(_) end + # The maximum number of results to return. This number should be between 1 and 50 + # inclusive. sig { returns(T.nilable(Integer)) } def max_num_results end @@ -33,6 +37,7 @@ module OpenAI def max_num_results=(_) end + # Ranking options for search. sig { returns(T.nilable(OpenAI::Models::VectorStoreSearchParams::RankingOptions)) } def ranking_options end @@ -44,6 +49,7 @@ module OpenAI def ranking_options=(_) end + # Whether to rewrite the natural language query for vector search. sig { returns(T.nilable(T::Boolean)) } def rewrite_query end @@ -82,29 +88,20 @@ module OpenAI def to_hash end + # A query string for a search class Query < OpenAI::Union abstract! - StringArray = T.type_alias { T::Array[String] } + Variants = type_template(:out) { {fixed: T.any(String, T::Array[String])} } - class << self - sig { override.returns([[NilClass, String], [NilClass, T::Array[String]]]) } - private def variants - end - end + StringArray = T.type_alias { T::Array[String] } end + # A filter to apply based on file attributes. class Filters < OpenAI::Union abstract! - class << self - sig do - override - .returns([[NilClass, OpenAI::Models::ComparisonFilter], [NilClass, OpenAI::Models::CompoundFilter]]) - end - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter)} } end class RankingOptions < OpenAI::BaseModel @@ -124,6 +121,7 @@ module OpenAI def score_threshold=(_) end + # Ranking options for search. sig { params(ranker: Symbol, score_threshold: Float).returns(T.attached_class) } def self.new(ranker: nil, score_threshold: nil) end @@ -135,14 +133,10 @@ module OpenAI class Ranker < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + AUTO = :auto DEFAULT_2024_11_15 = :"default-2024-11-15" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/vector_store_search_response.rbi b/rbi/lib/openai/models/vector_store_search_response.rbi index d8ffa5fa..a2443437 100644 --- a/rbi/lib/openai/models/vector_store_search_response.rbi +++ b/rbi/lib/openai/models/vector_store_search_response.rbi @@ -3,6 +3,11 @@ module OpenAI module Models class VectorStoreSearchResponse < OpenAI::BaseModel + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } def attributes end @@ -14,6 +19,7 @@ module OpenAI def attributes=(_) end + # Content chunks from the file. sig { returns(T::Array[OpenAI::Models::VectorStoreSearchResponse::Content]) } def content end @@ -25,6 +31,7 @@ module OpenAI def content=(_) end + # The ID of the vector store file. sig { returns(String) } def file_id end @@ -33,6 +40,7 @@ module OpenAI def file_id=(_) end + # The name of the vector store file. sig { returns(String) } def filename end @@ -41,6 +49,7 @@ module OpenAI def filename=(_) end + # The similarity score for the result. sig { returns(Float) } def score end @@ -80,14 +89,11 @@ module OpenAI class Attribute < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Float, T::Boolean)} } end class Content < OpenAI::BaseModel + # The text content returned from search. sig { returns(String) } def text end @@ -96,6 +102,7 @@ module OpenAI def text=(_) end + # The type of content. sig { returns(Symbol) } def type end @@ -112,16 +119,13 @@ module OpenAI def to_hash end + # The type of content. class Type < OpenAI::Enum abstract! - TEXT = :text + Value = type_template(:out) { {fixed: Symbol} } - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end + TEXT = :text end end end diff --git a/rbi/lib/openai/models/vector_store_update_params.rbi b/rbi/lib/openai/models/vector_store_update_params.rbi index 609a1ea7..88614cee 100644 --- a/rbi/lib/openai/models/vector_store_update_params.rbi +++ b/rbi/lib/openai/models/vector_store_update_params.rbi @@ -6,6 +6,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The expiration policy for a vector store. sig { returns(T.nilable(OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter)) } def expires_after end @@ -17,6 +18,12 @@ module OpenAI def expires_after=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -25,6 +32,7 @@ module OpenAI def metadata=(_) end + # The name of the vector store. sig { returns(T.nilable(String)) } def name end @@ -60,6 +68,8 @@ module OpenAI end class ExpiresAfter < OpenAI::BaseModel + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `last_active_at`. sig { returns(Symbol) } def anchor end @@ -68,6 +78,7 @@ module OpenAI def anchor=(_) end + # The number of days after the anchor time that the vector store will expire. sig { returns(Integer) } def days end @@ -76,6 +87,7 @@ module OpenAI def days=(_) end + # The expiration policy for a vector store. sig { params(days: Integer, anchor: Symbol).returns(T.attached_class) } def self.new(days:, anchor: :last_active_at) end diff --git a/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi b/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi index 50d4260c..4be0dcb6 100644 --- a/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi @@ -7,6 +7,9 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. sig { returns(T::Array[String]) } def file_ids end @@ -15,6 +18,11 @@ module OpenAI def file_ids=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } def attributes end @@ -26,6 +34,8 @@ module OpenAI def attributes=(_) end + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. sig do returns( T.nilable( @@ -91,11 +101,7 @@ module OpenAI class Attribute < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Float, T::Boolean)} } end end end diff --git a/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi b/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi index 6847460c..c7da2f41 100644 --- a/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi @@ -15,6 +15,10 @@ module OpenAI def vector_store_id=(_) end + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -23,6 +27,10 @@ module OpenAI def after=(_) end + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } def before end @@ -31,6 +39,7 @@ module OpenAI def before=(_) end + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. sig { returns(T.nilable(Symbol)) } def filter end @@ -39,6 +48,8 @@ module OpenAI def filter=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -47,6 +58,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -87,32 +100,27 @@ module OpenAI def to_hash end + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. class Filter < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress COMPLETED = :completed FAILED = :failed CANCELLED = :cancelled - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ASC = :asc DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/vector_stores/file_content_response.rbi b/rbi/lib/openai/models/vector_stores/file_content_response.rbi index da110839..7986b030 100644 --- a/rbi/lib/openai/models/vector_stores/file_content_response.rbi +++ b/rbi/lib/openai/models/vector_stores/file_content_response.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module VectorStores class FileContentResponse < OpenAI::BaseModel + # The text content sig { returns(T.nilable(String)) } def text end @@ -12,6 +13,7 @@ module OpenAI def text=(_) end + # The content type (currently only `"text"`) sig { returns(T.nilable(String)) } def type end diff --git a/rbi/lib/openai/models/vector_stores/file_create_params.rbi b/rbi/lib/openai/models/vector_stores/file_create_params.rbi index 9c1d277d..57e35c52 100644 --- a/rbi/lib/openai/models/vector_stores/file_create_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_create_params.rbi @@ -7,6 +7,9 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A [File](https://platform.openai.com/docs/api-reference/files) ID that the + # vector store should use. Useful for tools like `file_search` that can access + # files. sig { returns(String) } def file_id end @@ -15,6 +18,11 @@ module OpenAI def file_id=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } def attributes end @@ -26,6 +34,8 @@ module OpenAI def attributes=(_) end + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. sig do returns( T.nilable( @@ -91,11 +101,7 @@ module OpenAI class Attribute < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Float, T::Boolean)} } end end end diff --git a/rbi/lib/openai/models/vector_stores/file_list_params.rbi b/rbi/lib/openai/models/vector_stores/file_list_params.rbi index 42032f29..0f62a65d 100644 --- a/rbi/lib/openai/models/vector_stores/file_list_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_list_params.rbi @@ -7,6 +7,10 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -15,6 +19,10 @@ module OpenAI def after=(_) end + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } def before end @@ -23,6 +31,7 @@ module OpenAI def before=(_) end + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. sig { returns(T.nilable(Symbol)) } def filter end @@ -31,6 +40,8 @@ module OpenAI def filter=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -39,6 +50,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -77,32 +90,27 @@ module OpenAI def to_hash end + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. class Filter < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress COMPLETED = :completed FAILED = :failed CANCELLED = :cancelled - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + ASC = :asc DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/models/vector_stores/file_update_params.rbi b/rbi/lib/openai/models/vector_stores/file_update_params.rbi index 865c29c6..e3693815 100644 --- a/rbi/lib/openai/models/vector_stores/file_update_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_update_params.rbi @@ -15,6 +15,11 @@ module OpenAI def vector_store_id=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } def attributes end @@ -53,11 +58,7 @@ module OpenAI class Attribute < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Float, T::Boolean)} } end end end diff --git a/rbi/lib/openai/models/vector_stores/vector_store_file.rbi b/rbi/lib/openai/models/vector_stores/vector_store_file.rbi index 102bd1c5..0d906238 100644 --- a/rbi/lib/openai/models/vector_stores/vector_store_file.rbi +++ b/rbi/lib/openai/models/vector_stores/vector_store_file.rbi @@ -6,6 +6,7 @@ module OpenAI module VectorStores class VectorStoreFile < OpenAI::BaseModel + # The identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the vector store file was created. sig { returns(Integer) } def created_at end @@ -22,6 +24,8 @@ module OpenAI def created_at=(_) end + # The last error associated with this vector store file. Will be `null` if there + # are no errors. sig { returns(T.nilable(OpenAI::Models::VectorStores::VectorStoreFile::LastError)) } def last_error end @@ -33,6 +37,7 @@ module OpenAI def last_error=(_) end + # The object type, which is always `vector_store.file`. sig { returns(Symbol) } def object end @@ -41,6 +46,9 @@ module OpenAI def object=(_) end + # The status of the vector store file, which can be either `in_progress`, + # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + # vector store file is ready for use. sig { returns(Symbol) } def status end @@ -49,6 +57,8 @@ module OpenAI def status=(_) end + # The total vector store usage in bytes. Note that this may be different from the + # original file size. sig { returns(Integer) } def usage_bytes end @@ -57,6 +67,10 @@ module OpenAI def usage_bytes=(_) end + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # that the [File](https://platform.openai.com/docs/api-reference/files) is + # attached to. sig { returns(String) } def vector_store_id end @@ -65,6 +79,11 @@ module OpenAI def vector_store_id=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } def attributes end @@ -76,6 +95,7 @@ module OpenAI def attributes=(_) end + # The strategy used to chunk the file. sig do returns( T.nilable( @@ -97,6 +117,7 @@ module OpenAI def chunking_strategy=(_) end + # A list of files attached to a vector store. sig do params( id: String, @@ -144,6 +165,7 @@ module OpenAI end class LastError < OpenAI::BaseModel + # One of `server_error` or `rate_limit_exceeded`. sig { returns(Symbol) } def code end @@ -152,6 +174,7 @@ module OpenAI def code=(_) end + # A human-readable description of the error. sig { returns(String) } def message end @@ -160,6 +183,8 @@ module OpenAI def message=(_) end + # The last error associated with this vector store file. Will be `null` if there + # are no errors. sig { params(code: Symbol, message: String).returns(T.attached_class) } def self.new(code:, message:) end @@ -168,44 +193,36 @@ module OpenAI def to_hash end + # One of `server_error` or `rate_limit_exceeded`. class Code < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + SERVER_ERROR = :server_error UNSUPPORTED_FILE = :unsupported_file INVALID_FILE = :invalid_file - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end + # The status of the vector store file, which can be either `in_progress`, + # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + # vector store file is ready for use. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress COMPLETED = :completed CANCELLED = :cancelled FAILED = :failed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end class Attribute < OpenAI::Union abstract! - class << self - sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } - private def variants - end - end + Variants = type_template(:out) { {fixed: T.any(String, Float, T::Boolean)} } end end end diff --git a/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi b/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi index 1fa22e5f..3ee9a72d 100644 --- a/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi +++ b/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi @@ -6,6 +6,7 @@ module OpenAI module VectorStores class VectorStoreFileBatch < OpenAI::BaseModel + # The identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -14,6 +15,8 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the vector store files batch was + # created. sig { returns(Integer) } def created_at end @@ -33,6 +36,7 @@ module OpenAI def file_counts=(_) end + # The object type, which is always `vector_store.file_batch`. sig { returns(Symbol) } def object end @@ -41,6 +45,8 @@ module OpenAI def object=(_) end + # The status of the vector store files batch, which can be either `in_progress`, + # `completed`, `cancelled` or `failed`. sig { returns(Symbol) } def status end @@ -49,6 +55,10 @@ module OpenAI def status=(_) end + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # that the [File](https://platform.openai.com/docs/api-reference/files) is + # attached to. sig { returns(String) } def vector_store_id end @@ -57,6 +67,7 @@ module OpenAI def vector_store_id=(_) end + # A batch of files attached to a vector store. sig do params( id: String, @@ -88,6 +99,7 @@ module OpenAI end class FileCounts < OpenAI::BaseModel + # The number of files that where cancelled. sig { returns(Integer) } def cancelled end @@ -96,6 +108,7 @@ module OpenAI def cancelled=(_) end + # The number of files that have been processed. sig { returns(Integer) } def completed end @@ -104,6 +117,7 @@ module OpenAI def completed=(_) end + # The number of files that have failed to process. sig { returns(Integer) } def failed end @@ -112,6 +126,7 @@ module OpenAI def failed=(_) end + # The number of files that are currently being processed. sig { returns(Integer) } def in_progress end @@ -120,6 +135,7 @@ module OpenAI def in_progress=(_) end + # The total number of files. sig { returns(Integer) } def total end @@ -155,19 +171,17 @@ module OpenAI end end + # The status of the vector store files batch, which can be either `in_progress`, + # `completed`, `cancelled` or `failed`. class Status < OpenAI::Enum abstract! + Value = type_template(:out) { {fixed: Symbol} } + IN_PROGRESS = :in_progress COMPLETED = :completed CANCELLED = :cancelled FAILED = :failed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end end end end diff --git a/rbi/lib/openai/page.rbi b/rbi/lib/openai/page.rbi index 9f33956d..a738bab8 100644 --- a/rbi/lib/openai/page.rbi +++ b/rbi/lib/openai/page.rbi @@ -6,11 +6,11 @@ module OpenAI Elem = type_member - sig { returns(T::Array[Elem]) } + sig { returns(T.nilable(T::Array[Elem])) } def data end - sig { params(_: T::Array[Elem]).returns(T::Array[Elem]) } + sig { params(_: T.nilable(T::Array[Elem])).returns(T.nilable(T::Array[Elem])) } def data=(_) end @@ -21,17 +21,5 @@ module OpenAI sig { params(_: String).returns(String) } def object=(_) end - - sig do - params( - client: OpenAI::BaseClient, - req: OpenAI::BaseClient::RequestComponentsShape, - headers: T.any(T::Hash[String, String], Net::HTTPHeader), - page_data: T::Array[T.anything] - ) - .returns(T.attached_class) - end - def self.new(client:, req:, headers:, page_data:) - end end end diff --git a/rbi/lib/openai/pooled_net_requester.rbi b/rbi/lib/openai/pooled_net_requester.rbi index 9d80cd5f..2cdf7b6b 100644 --- a/rbi/lib/openai/pooled_net_requester.rbi +++ b/rbi/lib/openai/pooled_net_requester.rbi @@ -1,20 +1,24 @@ # typed: strong module OpenAI + # @api private class PooledNetRequester RequestShape = T.type_alias do {method: Symbol, url: URI::Generic, headers: T::Hash[String, String], body: T.anything, deadline: Float} end class << self + # @api private sig { params(url: URI::Generic).returns(Net::HTTP) } def connect(url) end + # @api private sig { params(conn: Net::HTTP, deadline: Float).void } def calibrate_socket_timeout(conn, deadline) end + # @api private sig do params(request: OpenAI::PooledNetRequester::RequestShape, blk: T.proc.params(arg0: String).void) .returns(Net::HTTPGenericRequest) @@ -23,17 +27,20 @@ module OpenAI end end - sig { params(url: URI::Generic, blk: T.proc.params(arg0: Net::HTTP).void).void } - private def with_pool(url, &blk) + # @api private + sig { params(url: URI::Generic, deadline: Float, blk: T.proc.params(arg0: Net::HTTP).void).void } + private def with_pool(url, deadline:, &blk) end + # @api private sig do params(request: OpenAI::PooledNetRequester::RequestShape) - .returns([Net::HTTPResponse, T::Enumerable[String]]) + .returns([Integer, Net::HTTPResponse, T::Enumerable[String]]) end def execute(request) end + # @api private sig { params(size: Integer).returns(T.attached_class) } def self.new(size: Etc.nprocessors) end diff --git a/rbi/lib/openai/request_options.rbi b/rbi/lib/openai/request_options.rbi index f55d6e56..ded742c2 100644 --- a/rbi/lib/openai/request_options.rbi +++ b/rbi/lib/openai/request_options.rbi @@ -1,9 +1,9 @@ # typed: strong module OpenAI + # @api private module RequestParameters - abstract! - + # Options to specify HTTP behaviour for this request. sig { returns(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) } def request_options end @@ -15,18 +15,28 @@ module OpenAI def request_options=(_) end + # @api private module Converter + # @api private sig { params(params: T.anything).returns([T.anything, T::Hash[Symbol, T.anything]]) } def dump_request(params) end end end + # Specify HTTP behaviour to use for a specific request. These options supplement + # or override those provided at the client level. + # + # When making a request, you can pass an actual {RequestOptions} instance, or + # simply pass a Hash with symbol keys matching the attributes on this class. class RequestOptions < OpenAI::BaseModel + # @api private sig { params(opts: T.any(T.self_type, T::Hash[Symbol, T.anything])).void } def self.validate!(opts) end + # Idempotency key to send with request and all associated retries. Will only be + # sent for write requests. sig { returns(T.nilable(String)) } def idempotency_key end @@ -35,6 +45,8 @@ module OpenAI def idempotency_key=(_) end + # Extra query params to send with the request. These are `.merge`’d into any + # `query` given at the client level. sig { returns(T.nilable(T::Hash[String, T.nilable(T.any(T::Array[String], String))])) } def extra_query end @@ -46,6 +58,8 @@ module OpenAI def extra_query=(_) end + # Extra headers to send with the request. These are `.merged`’d into any + # `extra_headers` given at the client level. sig { returns(T.nilable(T::Hash[String, T.nilable(String)])) } def extra_headers end @@ -57,6 +71,8 @@ module OpenAI def extra_headers=(_) end + # Extra data to send with the request. These are deep merged into any data + # generated as part of the normal request. sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } def extra_body end @@ -65,6 +81,7 @@ module OpenAI def extra_body=(_) end + # Maximum number of retries to attempt after a failed initial request. sig { returns(T.nilable(Integer)) } def max_retries end @@ -73,6 +90,7 @@ module OpenAI def max_retries=(_) end + # Request timeout in seconds. sig { returns(T.nilable(Float)) } def timeout end @@ -81,6 +99,7 @@ module OpenAI def timeout=(_) end + # Returns a new instance of RequestOptions. sig { params(values: T::Hash[Symbol, T.anything]).returns(T.attached_class) } def self.new(values = {}) end diff --git a/rbi/lib/openai/resources/audio/speech.rbi b/rbi/lib/openai/resources/audio/speech.rbi index 5ad154db..ae6f4be5 100644 --- a/rbi/lib/openai/resources/audio/speech.rbi +++ b/rbi/lib/openai/resources/audio/speech.rbi @@ -4,6 +4,7 @@ module OpenAI module Resources class Audio class Speech + # Generates audio from the input text. sig do params( input: String, @@ -15,7 +16,25 @@ module OpenAI ) .returns(T.anything) end - def create(input:, model:, voice:, response_format: nil, speed: nil, request_options: {}) + def create( + # The text to generate audio for. The maximum length is 4096 characters. + input:, + # One of the available [TTS models](https://platform.openai.com/docs/models#tts): + # `tts-1` or `tts-1-hd` + model:, + # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + # `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the + # voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + voice:, + # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + # `wav`, and `pcm`. + response_format: nil, + # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is + # the default. + speed: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/audio/transcriptions.rbi b/rbi/lib/openai/resources/audio/transcriptions.rbi index a88bec77..50e5c416 100644 --- a/rbi/lib/openai/resources/audio/transcriptions.rbi +++ b/rbi/lib/openai/resources/audio/transcriptions.rbi @@ -4,6 +4,7 @@ module OpenAI module Resources class Audio class Transcriptions + # Transcribes audio into the input language. sig do params( file: T.any(IO, StringIO), @@ -18,12 +19,35 @@ module OpenAI .returns(T.any(OpenAI::Models::Audio::Transcription, OpenAI::Models::Audio::TranscriptionVerbose)) end def create( + # The audio file object (not file name) to transcribe, in one of these formats: + # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. file:, + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. model:, + # The language of the input audio. Supplying the input language in + # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + # format will improve accuracy and latency. language: nil, + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should match the audio language. prompt: nil, + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. response_format: nil, + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. temperature: nil, + # The timestamp granularities to populate for this transcription. + # `response_format` must be set `verbose_json` to use timestamp granularities. + # Either or both of these options are supported: `word`, or `segment`. Note: There + # is no additional latency for segment timestamps, but generating word timestamps + # incurs additional latency. timestamp_granularities: nil, request_options: {} ) diff --git a/rbi/lib/openai/resources/audio/translations.rbi b/rbi/lib/openai/resources/audio/translations.rbi index 2b323900..278e3855 100644 --- a/rbi/lib/openai/resources/audio/translations.rbi +++ b/rbi/lib/openai/resources/audio/translations.rbi @@ -4,6 +4,7 @@ module OpenAI module Resources class Audio class Translations + # Translates audio into English. sig do params( file: T.any(IO, StringIO), @@ -15,7 +16,29 @@ module OpenAI ) .returns(T.any(OpenAI::Models::Audio::Translation, OpenAI::Models::Audio::TranslationVerbose)) end - def create(file:, model:, prompt: nil, response_format: nil, temperature: nil, request_options: {}) + def create( + # The audio file object (not file name) translate, in one of these formats: flac, + # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + file:, + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. + model:, + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should be in English. + prompt: nil, + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. + response_format: nil, + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. + temperature: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/batches.rbi b/rbi/lib/openai/resources/batches.rbi index ec305bd4..a3f56e28 100644 --- a/rbi/lib/openai/resources/batches.rbi +++ b/rbi/lib/openai/resources/batches.rbi @@ -3,6 +3,7 @@ module OpenAI module Resources class Batches + # Creates and executes a batch from an uploaded file of requests sig do params( completion_window: Symbol, @@ -13,9 +14,37 @@ module OpenAI ) .returns(OpenAI::Models::Batch) end - def create(completion_window:, endpoint:, input_file_id:, metadata: nil, request_options: {}) + def create( + # The time frame within which the batch should be processed. Currently only `24h` + # is supported. + completion_window:, + # The endpoint to be used for all requests in the batch. Currently + # `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. + # Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 + # embedding inputs across all requests in the batch. + endpoint:, + # The ID of an uploaded file that contains requests for the new batch. + # + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. + # + # Your input file must be formatted as a + # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), + # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 + # requests, and can be up to 200 MB in size. + input_file_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + request_options: {} + ) end + # Retrieves a batch. sig do params( batch_id: String, @@ -23,9 +52,14 @@ module OpenAI ) .returns(OpenAI::Models::Batch) end - def retrieve(batch_id, request_options: {}) + def retrieve( + # The ID of the batch to retrieve. + batch_id, + request_options: {} + ) end + # List your organization's batches. sig do params( after: String, @@ -34,9 +68,22 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::Batch]) end - def list(after: nil, limit: nil, request_options: {}) + def list( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + request_options: {} + ) end + # Cancels an in-progress batch. The batch will be in status `cancelling` for up to + # 10 minutes, before changing to `cancelled`, where it will have partial results + # (if any) available in the output file. sig do params( batch_id: String, @@ -44,7 +91,11 @@ module OpenAI ) .returns(OpenAI::Models::Batch) end - def cancel(batch_id, request_options: {}) + def cancel( + # The ID of the batch to cancel. + batch_id, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/beta/assistants.rbi b/rbi/lib/openai/resources/beta/assistants.rbi index 72828f08..c9d62665 100644 --- a/rbi/lib/openai/resources/beta/assistants.rbi +++ b/rbi/lib/openai/resources/beta/assistants.rbi @@ -4,6 +4,7 @@ module OpenAI module Resources class Beta class Assistants + # Create an assistant with a model and instructions. sig do params( model: T.any(String, Symbol), @@ -35,21 +36,78 @@ module OpenAI .returns(OpenAI::Models::Beta::Assistant) end def create( + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. model:, + # The description of the assistant. The maximum length is 512 characters. description: nil, + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. instructions: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # The name of the assistant. The maximum length is 256 characters. name: nil, + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. temperature: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. tool_resources: nil, + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. top_p: nil, request_options: {} ) end + # Retrieves an assistant. sig do params( assistant_id: String, @@ -57,9 +115,14 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Assistant) end - def retrieve(assistant_id, request_options: {}) + def retrieve( + # The ID of the assistant to retrieve. + assistant_id, + request_options: {} + ) end + # Modifies an assistant. sig do params( assistant_id: String, @@ -92,22 +155,80 @@ module OpenAI .returns(OpenAI::Models::Beta::Assistant) end def update( + # The ID of the assistant to modify. assistant_id, + # The description of the assistant. The maximum length is 512 characters. description: nil, + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. instructions: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. model: nil, + # The name of the assistant. The maximum length is 256 characters. name: nil, + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. temperature: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. tool_resources: nil, + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. top_p: nil, request_options: {} ) end + # Returns a list of assistants. sig do params( after: String, @@ -118,9 +239,28 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::Beta::Assistant]) end - def list(after: nil, before: nil, limit: nil, order: nil, request_options: {}) + def list( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) end + # Delete an assistant. sig do params( assistant_id: String, @@ -128,7 +268,11 @@ module OpenAI ) .returns(OpenAI::Models::Beta::AssistantDeleted) end - def delete(assistant_id, request_options: {}) + def delete( + # The ID of the assistant to delete. + assistant_id, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/beta/threads.rbi b/rbi/lib/openai/resources/beta/threads.rbi index 02bc8060..2170549d 100644 --- a/rbi/lib/openai/resources/beta/threads.rbi +++ b/rbi/lib/openai/resources/beta/threads.rbi @@ -12,6 +12,7 @@ module OpenAI def messages end + # Create a thread. sig do params( messages: T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message], @@ -21,9 +22,27 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Thread) end - def create(messages: nil, metadata: nil, tool_resources: nil, request_options: {}) + def create( + # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + # start the thread with. + messages: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + tool_resources: nil, + request_options: {} + ) end + # Retrieves a thread. sig do params( thread_id: String, @@ -31,9 +50,14 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Thread) end - def retrieve(thread_id, request_options: {}) + def retrieve( + # The ID of the thread to retrieve. + thread_id, + request_options: {} + ) end + # Modifies a thread. sig do params( thread_id: String, @@ -43,9 +67,26 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Thread) end - def update(thread_id, metadata: nil, tool_resources: nil, request_options: {}) + def update( + # The ID of the thread to modify. Only the `metadata` can be modified. + thread_id, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + tool_resources: nil, + request_options: {} + ) end + # Delete a thread. sig do params( thread_id: String, @@ -53,9 +94,14 @@ module OpenAI ) .returns(OpenAI::Models::Beta::ThreadDeleted) end - def delete(thread_id, request_options: {}) + def delete( + # The ID of the thread to delete. + thread_id, + request_options: {} + ) end + # Create a thread and run it in one request. sig do params( assistant_id: String, @@ -93,25 +139,99 @@ module OpenAI .returns(OpenAI::Models::Beta::Threads::Run) end def create_and_run( + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. assistant_id:, + # Override the default system message of the assistant. This is useful for + # modifying the behavior on a per-run basis. instructions: nil, + # The maximum number of completion tokens that may be used over the course of the + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. max_completion_tokens: nil, + # The maximum number of prompt tokens that may be used over the course of the run. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. max_prompt_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. model: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. temperature: nil, + # Options to create a new thread. If no thread is provided when running a request, + # an empty thread will be created. thread: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. tool_choice: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. tool_resources: nil, + # Override the tools the assistant can use for this run. This is useful for + # modifying the behavior on a per-run basis. tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. top_p: nil, + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. truncation_strategy: nil, request_options: {} ) end + # Create a thread and run it in one request. sig do params( assistant_id: String, @@ -178,20 +298,93 @@ module OpenAI ) end def create_and_run_streaming( + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. assistant_id:, + # Override the default system message of the assistant. This is useful for + # modifying the behavior on a per-run basis. instructions: nil, + # The maximum number of completion tokens that may be used over the course of the + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. max_completion_tokens: nil, + # The maximum number of prompt tokens that may be used over the course of the run. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. max_prompt_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. model: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. temperature: nil, + # Options to create a new thread. If no thread is provided when running a request, + # an empty thread will be created. thread: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. tool_choice: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. tool_resources: nil, + # Override the tools the assistant can use for this run. This is useful for + # modifying the behavior on a per-run basis. tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. top_p: nil, + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. truncation_strategy: nil, request_options: {} ) diff --git a/rbi/lib/openai/resources/beta/threads/messages.rbi b/rbi/lib/openai/resources/beta/threads/messages.rbi index 28c889d1..d417b9d0 100644 --- a/rbi/lib/openai/resources/beta/threads/messages.rbi +++ b/rbi/lib/openai/resources/beta/threads/messages.rbi @@ -5,6 +5,7 @@ module OpenAI class Beta class Threads class Messages + # Create a message. sig do params( thread_id: String, @@ -25,9 +26,33 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Message) end - def create(thread_id, content:, role:, attachments: nil, metadata: nil, request_options: {}) + def create( + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # to create a message for. + thread_id, + # The text contents of the message. + content:, + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + role:, + # A list of files attached to the message, and the tools they should be added to. + attachments: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + request_options: {} + ) end + # Retrieve a message. sig do params( message_id: String, @@ -36,9 +61,17 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Message) end - def retrieve(message_id, thread_id:, request_options: {}) + def retrieve( + # The ID of the message to retrieve. + message_id, + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # to which this message belongs. + thread_id:, + request_options: {} + ) end + # Modifies a message. sig do params( message_id: String, @@ -48,9 +81,23 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Message) end - def update(message_id, thread_id:, metadata: nil, request_options: {}) + def update( + # Path param: The ID of the message to modify. + message_id, + # Path param: The ID of the thread to which this message belongs. + thread_id:, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + request_options: {} + ) end + # Returns a list of messages for a given thread. sig do params( thread_id: String, @@ -64,16 +111,32 @@ module OpenAI .returns(OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Message]) end def list( + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # the messages belong to. thread_id, + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. order: nil, + # Filter messages by the run ID that generated them. run_id: nil, request_options: {} ) end + # Deletes a message. sig do params( message_id: String, @@ -82,7 +145,13 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::MessageDeleted) end - def delete(message_id, thread_id:, request_options: {}) + def delete( + # The ID of the message to delete. + message_id, + # The ID of the thread to which this message belongs. + thread_id:, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/beta/threads/runs.rbi b/rbi/lib/openai/resources/beta/threads/runs.rbi index 7f5df3f4..c2a318dd 100644 --- a/rbi/lib/openai/resources/beta/threads/runs.rbi +++ b/rbi/lib/openai/resources/beta/threads/runs.rbi @@ -9,6 +9,7 @@ module OpenAI def steps end + # Create a run. sig do params( thread_id: String, @@ -49,28 +50,119 @@ module OpenAI .returns(OpenAI::Models::Beta::Threads::Run) end def create( + # Path param: The ID of the thread to run. thread_id, + # Body param: The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. assistant_id:, + # Query param: A list of additional fields to include in the response. Currently + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. include: nil, + # Body param: Appends additional instructions at the end of the instructions for + # the run. This is useful for modifying the behavior on a per-run basis without + # overriding other instructions. additional_instructions: nil, + # Body param: Adds additional messages to the thread before creating the run. additional_messages: nil, + # Body param: Overrides the + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. instructions: nil, + # Body param: The maximum number of completion tokens that may be used over the + # course of the run. The run will make a best effort to use only the number of + # completion tokens specified, across multiple turns of the run. If the run + # exceeds the number of completion tokens specified, the run will end with status + # `incomplete`. See `incomplete_details` for more info. max_completion_tokens: nil, + # Body param: The maximum number of prompt tokens that may be used over the course + # of the run. The run will make a best effort to use only the number of prompt + # tokens specified, across multiple turns of the run. If the run exceeds the + # number of prompt tokens specified, the run will end with status `incomplete`. + # See `incomplete_details` for more info. max_prompt_tokens: nil, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # Body param: The ID of the + # [Model](https://platform.openai.com/docs/api-reference/models) to be used to + # execute this run. If a value is provided here, it will override the model + # associated with the assistant. If not, the model associated with the assistant + # will be used. model: nil, + # Body param: Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, + # Body param: **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, + # Body param: Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, + # Body param: What sampling temperature to use, between 0 and 2. Higher values + # like 0.8 will make the output more random, while lower values like 0.2 will make + # it more focused and deterministic. temperature: nil, + # Body param: Controls which (if any) tool is called by the model. `none` means + # the model will not call any tools and instead generates a message. `auto` is the + # default value and means the model can pick between generating a message or + # calling one or more tools. `required` means the model must call one or more + # tools before responding to the user. Specifying a particular tool like + # `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. tool_choice: nil, + # Body param: Override the tools the assistant can use for this run. This is + # useful for modifying the behavior on a per-run basis. tools: nil, + # Body param: An alternative to sampling with temperature, called nucleus + # sampling, where the model considers the results of the tokens with top_p + # probability mass. So 0.1 means only the tokens comprising the top 10% + # probability mass are considered. + # + # We generally recommend altering this or temperature but not both. top_p: nil, + # Body param: Controls for how a thread will be truncated prior to the run. Use + # this to control the intial context window of the run. truncation_strategy: nil, request_options: {} ) end + # Create a run. sig do params( thread_id: String, @@ -140,28 +232,119 @@ module OpenAI ) end def create_streaming( + # Path param: The ID of the thread to run. thread_id, + # Body param: The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. assistant_id:, + # Query param: A list of additional fields to include in the response. Currently + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. include: nil, + # Body param: Appends additional instructions at the end of the instructions for + # the run. This is useful for modifying the behavior on a per-run basis without + # overriding other instructions. additional_instructions: nil, + # Body param: Adds additional messages to the thread before creating the run. additional_messages: nil, + # Body param: Overrides the + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. instructions: nil, + # Body param: The maximum number of completion tokens that may be used over the + # course of the run. The run will make a best effort to use only the number of + # completion tokens specified, across multiple turns of the run. If the run + # exceeds the number of completion tokens specified, the run will end with status + # `incomplete`. See `incomplete_details` for more info. max_completion_tokens: nil, + # Body param: The maximum number of prompt tokens that may be used over the course + # of the run. The run will make a best effort to use only the number of prompt + # tokens specified, across multiple turns of the run. If the run exceeds the + # number of prompt tokens specified, the run will end with status `incomplete`. + # See `incomplete_details` for more info. max_prompt_tokens: nil, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # Body param: The ID of the + # [Model](https://platform.openai.com/docs/api-reference/models) to be used to + # execute this run. If a value is provided here, it will override the model + # associated with the assistant. If not, the model associated with the assistant + # will be used. model: nil, + # Body param: Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, + # Body param: **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, + # Body param: Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. response_format: nil, + # Body param: What sampling temperature to use, between 0 and 2. Higher values + # like 0.8 will make the output more random, while lower values like 0.2 will make + # it more focused and deterministic. temperature: nil, + # Body param: Controls which (if any) tool is called by the model. `none` means + # the model will not call any tools and instead generates a message. `auto` is the + # default value and means the model can pick between generating a message or + # calling one or more tools. `required` means the model must call one or more + # tools before responding to the user. Specifying a particular tool like + # `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. tool_choice: nil, + # Body param: Override the tools the assistant can use for this run. This is + # useful for modifying the behavior on a per-run basis. tools: nil, + # Body param: An alternative to sampling with temperature, called nucleus + # sampling, where the model considers the results of the tokens with top_p + # probability mass. So 0.1 means only the tokens comprising the top 10% + # probability mass are considered. + # + # We generally recommend altering this or temperature but not both. top_p: nil, + # Body param: Controls for how a thread will be truncated prior to the run. Use + # this to control the intial context window of the run. truncation_strategy: nil, request_options: {} ) end + # Retrieves a run. sig do params( run_id: String, @@ -170,9 +353,17 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Run) end - def retrieve(run_id, thread_id:, request_options: {}) + def retrieve( + # The ID of the run to retrieve. + run_id, + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # that was run. + thread_id:, + request_options: {} + ) end + # Modifies a run. sig do params( run_id: String, @@ -182,9 +373,24 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Run) end - def update(run_id, thread_id:, metadata: nil, request_options: {}) + def update( + # Path param: The ID of the run to modify. + run_id, + # Path param: The ID of the + # [thread](https://platform.openai.com/docs/api-reference/threads) that was run. + thread_id:, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + request_options: {} + ) end + # Returns a list of runs belonging to a thread. sig do params( thread_id: String, @@ -196,9 +402,30 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Run]) end - def list(thread_id, after: nil, before: nil, limit: nil, order: nil, request_options: {}) + def list( + # The ID of the thread the run belongs to. + thread_id, + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) end + # Cancels a run that is `in_progress`. sig do params( run_id: String, @@ -207,9 +434,19 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Run) end - def cancel(run_id, thread_id:, request_options: {}) + def cancel( + # The ID of the run to cancel. + run_id, + # The ID of the thread to which this run belongs. + thread_id:, + request_options: {} + ) end + # When a run has the `status: "requires_action"` and `required_action.type` is + # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + # tool calls once they're all completed. All outputs must be submitted in a single + # request. sig do params( run_id: String, @@ -219,9 +456,23 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Run) end - def submit_tool_outputs(run_id, thread_id:, tool_outputs:, request_options: {}) + def submit_tool_outputs( + # Path param: The ID of the run that requires the tool output submission. + run_id, + # Path param: The ID of the + # [thread](https://platform.openai.com/docs/api-reference/threads) to which this + # run belongs. + thread_id:, + # Body param: A list of tools for which the outputs are being submitted. + tool_outputs:, + request_options: {} + ) end + # When a run has the `status: "requires_action"` and `required_action.type` is + # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + # tool calls once they're all completed. All outputs must be submitted in a single + # request. sig do params( run_id: String, @@ -260,7 +511,17 @@ module OpenAI ] ) end - def submit_tool_outputs_streaming(run_id, thread_id:, tool_outputs:, request_options: {}) + def submit_tool_outputs_streaming( + # Path param: The ID of the run that requires the tool output submission. + run_id, + # Path param: The ID of the + # [thread](https://platform.openai.com/docs/api-reference/threads) to which this + # run belongs. + thread_id:, + # Body param: A list of tools for which the outputs are being submitted. + tool_outputs:, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/beta/threads/runs/steps.rbi b/rbi/lib/openai/resources/beta/threads/runs/steps.rbi index 1714ab4d..4833b5fb 100644 --- a/rbi/lib/openai/resources/beta/threads/runs/steps.rbi +++ b/rbi/lib/openai/resources/beta/threads/runs/steps.rbi @@ -6,6 +6,7 @@ module OpenAI class Threads class Runs class Steps + # Retrieves a run step. sig do params( step_id: String, @@ -16,9 +17,27 @@ module OpenAI ) .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) end - def retrieve(step_id, thread_id:, run_id:, include: nil, request_options: {}) + def retrieve( + # Path param: The ID of the run step to retrieve. + step_id, + # Path param: The ID of the thread to which the run and run step belongs. + thread_id:, + # Path param: The ID of the run to which the run step belongs. + run_id:, + # Query param: A list of additional fields to include in the response. Currently + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + include: nil, + request_options: {} + ) end + # Returns a list of run steps belonging to a run. sig do params( run_id: String, @@ -33,12 +52,34 @@ module OpenAI .returns(OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Runs::RunStep]) end def list( + # Path param: The ID of the run the run steps belong to. run_id, + # Path param: The ID of the thread the run and run steps belong to. thread_id:, + # Query param: A cursor for use in pagination. `after` is an object ID that + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, ending with obj_foo, your subsequent call can include + # after=obj_foo in order to fetch the next page of the list. after: nil, + # Query param: A cursor for use in pagination. `before` is an object ID that + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, starting with obj_foo, your subsequent call can include + # before=obj_foo in order to fetch the previous page of the list. before: nil, + # Query param: A list of additional fields to include in the response. Currently + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. include: nil, + # Query param: A limit on the number of objects to be returned. Limit can range + # between 1 and 100, and the default is 20. limit: nil, + # Query param: Sort order by the `created_at` timestamp of the objects. `asc` for + # ascending order and `desc` for descending order. order: nil, request_options: {} ) diff --git a/rbi/lib/openai/resources/chat/completions.rbi b/rbi/lib/openai/resources/chat/completions.rbi index 2ea5223f..7f6a851c 100644 --- a/rbi/lib/openai/resources/chat/completions.rbi +++ b/rbi/lib/openai/resources/chat/completions.rbi @@ -8,6 +8,23 @@ module OpenAI def messages end + # **Starting a new project?** We recommend trying + # [Responses](https://platform.openai.com/docs/api-reference/responses) to take + # advantage of the latest OpenAI platform features. Compare + # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + # + # --- + # + # Creates a model response for the given chat conversation. Learn more in the + # [text generation](https://platform.openai.com/docs/guides/text-generation), + # [vision](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio) guides. + # + # Parameter support can differ depending on the model used to generate the + # response, particularly for newer reasoning models. Parameters that are only + # supported for reasoning models are noted below. For the current state of + # unsupported parameters in reasoning models, + # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). sig do params( messages: T::Array[ @@ -58,40 +75,210 @@ module OpenAI .returns(OpenAI::Models::Chat::ChatCompletion) end def create( + # A list of messages comprising the conversation so far. Depending on the + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). messages:, + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. model:, + # Parameters for audio output. Required when audio output is requested with + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). audio: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. frequency_penalty: nil, + # Deprecated in favor of `tool_choice`. + # + # Controls which (if any) function is called by the model. + # + # `none` means the model will not call a function and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling a + # function. + # + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + # + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. function_call: nil, + # Deprecated in favor of `tools`. + # + # A list of functions the model may generate JSON inputs for. functions: nil, + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. logit_bias: nil, + # Whether to return log probabilities of the output tokens or not. If true, + # returns the log probabilities of each output token returned in the `content` of + # `message`. logprobs: nil, + # An upper bound for the number of tokens that can be generated for a completion, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). max_completion_tokens: nil, + # The maximum number of [tokens](/tokenizer) that can be generated in the chat + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. + # + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o1 series models](https://platform.openai.com/docs/guides/reasoning). max_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # Output types that you would like the model to generate. Most models are capable + # of generating text, which is the default: + # + # `["text"]` + # + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: + # + # `["text", "audio"]` modalities: nil, + # How many chat completion choices to generate for each input message. Note that + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. n: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, + # Static predicted output content, such as the content of a text file that is + # being regenerated. prediction: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. presence_penalty: nil, + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. response_format: nil, + # This feature is in Beta. If specified, our system will make a best effort to + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. seed: nil, + # Specifies the latency tier to use for processing the request. This parameter is + # relevant for customers subscribed to the scale tier service: + # + # - If set to 'auto', and the Project is Scale tier enabled, the system will + # utilize scale tier credits until they are exhausted. + # - If set to 'auto', and the Project is not Scale tier enabled, the request will + # be processed using the default service tier with a lower uptime SLA and no + # latency guarentee. + # - If set to 'default', the request will be processed using the default service + # tier with a lower uptime SLA and no latency guarentee. + # - When not set, the default behavior is 'auto'. + # + # When this parameter is set, the response body will include the `service_tier` + # utilized. service_tier: nil, + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. stop: nil, + # Whether or not to store the output of this chat completion request for use in + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. store: nil, + # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. temperature: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. tool_choice: nil, + # A list of tools the model may call. Currently, only functions are supported as a + # tool. Use this to provide a list of functions the model may generate JSON inputs + # for. A max of 128 functions are supported. tools: nil, + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. top_logprobs: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. top_p: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). web_search_options: nil, request_options: {} ) end + # **Starting a new project?** We recommend trying + # [Responses](https://platform.openai.com/docs/api-reference/responses) to take + # advantage of the latest OpenAI platform features. Compare + # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + # + # --- + # + # Creates a model response for the given chat conversation. Learn more in the + # [text generation](https://platform.openai.com/docs/guides/text-generation), + # [vision](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio) guides. + # + # Parameter support can differ depending on the model used to generate the + # response, particularly for newer reasoning models. Parameters that are only + # supported for reasoning models are noted below. For the current state of + # unsupported parameters in reasoning models, + # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). sig do params( messages: T::Array[ @@ -142,40 +329,195 @@ module OpenAI .returns(OpenAI::Stream[OpenAI::Models::Chat::ChatCompletionChunk]) end def create_streaming( + # A list of messages comprising the conversation so far. Depending on the + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). messages:, + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. model:, + # Parameters for audio output. Required when audio output is requested with + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). audio: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. frequency_penalty: nil, + # Deprecated in favor of `tool_choice`. + # + # Controls which (if any) function is called by the model. + # + # `none` means the model will not call a function and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling a + # function. + # + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + # + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. function_call: nil, + # Deprecated in favor of `tools`. + # + # A list of functions the model may generate JSON inputs for. functions: nil, + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. logit_bias: nil, + # Whether to return log probabilities of the output tokens or not. If true, + # returns the log probabilities of each output token returned in the `content` of + # `message`. logprobs: nil, + # An upper bound for the number of tokens that can be generated for a completion, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). max_completion_tokens: nil, + # The maximum number of [tokens](/tokenizer) that can be generated in the chat + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. + # + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o1 series models](https://platform.openai.com/docs/guides/reasoning). max_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # Output types that you would like the model to generate. Most models are capable + # of generating text, which is the default: + # + # `["text"]` + # + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: + # + # `["text", "audio"]` modalities: nil, + # How many chat completion choices to generate for each input message. Note that + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. n: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. parallel_tool_calls: nil, + # Static predicted output content, such as the content of a text file that is + # being regenerated. prediction: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. presence_penalty: nil, + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. reasoning_effort: nil, + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. response_format: nil, + # This feature is in Beta. If specified, our system will make a best effort to + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. seed: nil, + # Specifies the latency tier to use for processing the request. This parameter is + # relevant for customers subscribed to the scale tier service: + # + # - If set to 'auto', and the Project is Scale tier enabled, the system will + # utilize scale tier credits until they are exhausted. + # - If set to 'auto', and the Project is not Scale tier enabled, the request will + # be processed using the default service tier with a lower uptime SLA and no + # latency guarentee. + # - If set to 'default', the request will be processed using the default service + # tier with a lower uptime SLA and no latency guarentee. + # - When not set, the default behavior is 'auto'. + # + # When this parameter is set, the response body will include the `service_tier` + # utilized. service_tier: nil, + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. stop: nil, + # Whether or not to store the output of this chat completion request for use in + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. store: nil, + # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. temperature: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. tool_choice: nil, + # A list of tools the model may call. Currently, only functions are supported as a + # tool. Use this to provide a list of functions the model may generate JSON inputs + # for. A max of 128 functions are supported. tools: nil, + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. top_logprobs: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. top_p: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). web_search_options: nil, request_options: {} ) end + # Get a stored chat completion. Only Chat Completions that have been created with + # the `store` parameter set to `true` will be returned. sig do params( completion_id: String, @@ -183,9 +525,16 @@ module OpenAI ) .returns(OpenAI::Models::Chat::ChatCompletion) end - def retrieve(completion_id, request_options: {}) + def retrieve( + # The ID of the chat completion to retrieve. + completion_id, + request_options: {} + ) end + # Modify a stored chat completion. Only Chat Completions that have been created + # with the `store` parameter set to `true` can be modified. Currently, the only + # supported modification is to update the `metadata` field. sig do params( completion_id: String, @@ -194,9 +543,22 @@ module OpenAI ) .returns(OpenAI::Models::Chat::ChatCompletion) end - def update(completion_id, metadata:, request_options: {}) + def update( + # The ID of the chat completion to update. + completion_id, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + request_options: {} + ) end + # List stored Chat Completions. Only Chat Completions that have been stored with + # the `store` parameter set to `true` will be returned. sig do params( after: String, @@ -208,9 +570,26 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::Chat::ChatCompletion]) end - def list(after: nil, limit: nil, metadata: nil, model: nil, order: nil, request_options: {}) + def list( + # Identifier for the last chat completion from the previous pagination request. + after: nil, + # Number of Chat Completions to retrieve. + limit: nil, + # A list of metadata keys to filter the Chat Completions by. Example: + # + # `metadata[key1]=value1&metadata[key2]=value2` + metadata: nil, + # The model used to generate the Chat Completions. + model: nil, + # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. + order: nil, + request_options: {} + ) end + # Delete a stored chat completion. Only Chat Completions that have been created + # with the `store` parameter set to `true` can be deleted. sig do params( completion_id: String, @@ -218,7 +597,11 @@ module OpenAI ) .returns(OpenAI::Models::Chat::ChatCompletionDeleted) end - def delete(completion_id, request_options: {}) + def delete( + # The ID of the chat completion to delete. + completion_id, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/chat/completions/messages.rbi b/rbi/lib/openai/resources/chat/completions/messages.rbi index 4f39f196..3e7c16e2 100644 --- a/rbi/lib/openai/resources/chat/completions/messages.rbi +++ b/rbi/lib/openai/resources/chat/completions/messages.rbi @@ -5,6 +5,8 @@ module OpenAI class Chat class Completions class Messages + # Get the messages in a stored chat completion. Only Chat Completions that have + # been created with the `store` parameter set to `true` will be returned. sig do params( completion_id: String, @@ -15,7 +17,18 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::Chat::ChatCompletionStoreMessage]) end - def list(completion_id, after: nil, limit: nil, order: nil, request_options: {}) + def list( + # The ID of the chat completion to retrieve messages from. + completion_id, + # Identifier for the last message from the previous pagination request. + after: nil, + # Number of messages to retrieve. + limit: nil, + # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + # for descending order. Defaults to `asc`. + order: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/completions.rbi b/rbi/lib/openai/resources/completions.rbi index 5f018bc1..949f94ed 100644 --- a/rbi/lib/openai/resources/completions.rbi +++ b/rbi/lib/openai/resources/completions.rbi @@ -3,6 +3,7 @@ module OpenAI module Resources class Completions + # Creates a completion for the provided prompt and parameters. sig do params( model: T.any(String, Symbol), @@ -34,27 +35,114 @@ module OpenAI .returns(OpenAI::Models::Completion) end def create( + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. model:, + # The prompt(s) to generate completions for, encoded as a string, array of + # strings, array of tokens, or array of token arrays. + # + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. prompt:, + # Generates `best_of` completions server-side and returns the "best" (the one with + # the highest log probability per token). Results cannot be streamed. + # + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. best_of: nil, + # Echo back the prompt in addition to the completion echo: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) frequency_penalty: nil, + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. + # + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. logit_bias: nil, + # Include the log probabilities on the `logprobs` most likely output tokens, as + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. + # + # The maximum value for `logprobs` is 5. logprobs: nil, + # The maximum number of [tokens](/tokenizer) that can be generated in the + # completion. + # + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. max_tokens: nil, + # How many completions to generate for each prompt. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. n: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) presence_penalty: nil, + # If specified, our system will make a best effort to sample deterministically, + # such that repeated requests with the same `seed` and parameters should return + # the same result. + # + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. seed: nil, + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. stop: nil, + # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, + # The suffix that comes after a completion of inserted text. + # + # This parameter is only supported for `gpt-3.5-turbo-instruct`. suffix: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + # + # We generally recommend altering this or `top_p` but not both. temperature: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. top_p: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ) end + # Creates a completion for the provided prompt and parameters. sig do params( model: T.any(String, Symbol), @@ -86,22 +174,108 @@ module OpenAI .returns(OpenAI::Stream[OpenAI::Models::Completion]) end def create_streaming( + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. model:, + # The prompt(s) to generate completions for, encoded as a string, array of + # strings, array of tokens, or array of token arrays. + # + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. prompt:, + # Generates `best_of` completions server-side and returns the "best" (the one with + # the highest log probability per token). Results cannot be streamed. + # + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. best_of: nil, + # Echo back the prompt in addition to the completion echo: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) frequency_penalty: nil, + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. + # + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. logit_bias: nil, + # Include the log probabilities on the `logprobs` most likely output tokens, as + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. + # + # The maximum value for `logprobs` is 5. logprobs: nil, + # The maximum number of [tokens](/tokenizer) that can be generated in the + # completion. + # + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. max_tokens: nil, + # How many completions to generate for each prompt. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. n: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) presence_penalty: nil, + # If specified, our system will make a best effort to sample deterministically, + # such that repeated requests with the same `seed` and parameters should return + # the same result. + # + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. seed: nil, + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. stop: nil, + # Options for streaming response. Only set this when you set `stream: true`. stream_options: nil, + # The suffix that comes after a completion of inserted text. + # + # This parameter is only supported for `gpt-3.5-turbo-instruct`. suffix: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + # + # We generally recommend altering this or `top_p` but not both. temperature: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. top_p: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ) diff --git a/rbi/lib/openai/resources/embeddings.rbi b/rbi/lib/openai/resources/embeddings.rbi index 86d61392..025c37a7 100644 --- a/rbi/lib/openai/resources/embeddings.rbi +++ b/rbi/lib/openai/resources/embeddings.rbi @@ -3,6 +3,7 @@ module OpenAI module Resources class Embeddings + # Creates an embedding vector representing the input text. sig do params( input: T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]]), @@ -14,7 +15,34 @@ module OpenAI ) .returns(OpenAI::Models::CreateEmbeddingResponse) end - def create(input:, model:, dimensions: nil, encoding_format: nil, user: nil, request_options: {}) + def create( + # Input text to embed, encoded as a string or array of tokens. To embed multiple + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. Some models may also impose a limit on total number of + # tokens summed across inputs. + input:, + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + model:, + # The number of dimensions the resulting output embeddings should have. Only + # supported in `text-embedding-3` and later models. + dimensions: nil, + # The format to return the embeddings in. Can be either `float` or + # [`base64`](https://pypi.org/project/pybase64/). + encoding_format: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/files.rbi b/rbi/lib/openai/resources/files.rbi index 508e9d63..441f93e0 100644 --- a/rbi/lib/openai/resources/files.rbi +++ b/rbi/lib/openai/resources/files.rbi @@ -3,6 +3,27 @@ module OpenAI module Resources class Files + # Upload a file that can be used across various endpoints. Individual files can be + # up to 512 MB, and the size of all files uploaded by one organization can be up + # to 100 GB. + # + # The Assistants API supports files up to 2 million tokens and of specific file + # types. See the + # [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for + # details. + # + # The Fine-tuning API only supports `.jsonl` files. The input also has certain + # required formats for fine-tuning + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # models. + # + # The Batch API only supports `.jsonl` files up to 200 MB in size. The input also + # has a specific required + # [format](https://platform.openai.com/docs/api-reference/batch/request-input). + # + # Please [contact us](https://help.openai.com/) if you need to increase these + # storage limits. sig do params( file: T.any(IO, StringIO), @@ -11,9 +32,19 @@ module OpenAI ) .returns(OpenAI::Models::FileObject) end - def create(file:, purpose:, request_options: {}) + def create( + # The File object (not file name) to be uploaded. + file:, + # The intended purpose of the uploaded file. One of: - `assistants`: Used in the + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets + purpose:, + request_options: {} + ) end + # Returns information about a specific file. sig do params( file_id: String, @@ -21,9 +52,14 @@ module OpenAI ) .returns(OpenAI::Models::FileObject) end - def retrieve(file_id, request_options: {}) + def retrieve( + # The ID of the file to use for this request. + file_id, + request_options: {} + ) end + # Returns a list of files. sig do params( after: String, @@ -34,9 +70,25 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::FileObject]) end - def list(after: nil, limit: nil, order: nil, purpose: nil, request_options: {}) + def list( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 10,000, and the default is 10,000. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + # Only return files with the given purpose. + purpose: nil, + request_options: {} + ) end + # Delete a file. sig do params( file_id: String, @@ -44,9 +96,14 @@ module OpenAI ) .returns(OpenAI::Models::FileDeleted) end - def delete(file_id, request_options: {}) + def delete( + # The ID of the file to use for this request. + file_id, + request_options: {} + ) end + # Returns the contents of the specified file. sig do params( file_id: String, @@ -54,7 +111,11 @@ module OpenAI ) .returns(T.anything) end - def content(file_id, request_options: {}) + def content( + # The ID of the file to use for this request. + file_id, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/fine_tuning/jobs.rbi b/rbi/lib/openai/resources/fine_tuning/jobs.rbi index 8ace3da3..e1709e2c 100644 --- a/rbi/lib/openai/resources/fine_tuning/jobs.rbi +++ b/rbi/lib/openai/resources/fine_tuning/jobs.rbi @@ -8,6 +8,13 @@ module OpenAI def checkpoints end + # Creates a fine-tuning job which begins the process of creating a new model from + # a given dataset. + # + # Response includes details of the enqueued job including job status and the name + # of the fine-tuned models once complete. + # + # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) sig do params( model: T.any(String, Symbol), @@ -24,19 +31,71 @@ module OpenAI .returns(OpenAI::Models::FineTuning::FineTuningJob) end def create( + # The name of the model to fine-tune. You can select one of the + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). model:, + # The ID of an uploaded file that contains training data. + # + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. + # + # Your dataset must be formatted as a JSONL file. Additionally, you must upload + # your file with the purpose `fine-tune`. + # + # The contents of the file should differ depending on if the model uses the + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # format, or if the fine-tuning method uses the + # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) + # format. + # + # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + # for more details. training_file:, + # The hyperparameters used for the fine-tuning job. This value is now deprecated + # in favor of `method`, and should be passed in under the `method` parameter. hyperparameters: nil, + # A list of integrations to enable for your fine-tuning job. integrations: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # The method used for fine-tuning. method_: nil, + # The seed controls the reproducibility of the job. Passing in the same seed and + # job parameters should produce the same results, but may differ in rare cases. If + # a seed is not specified, one will be generated for you. seed: nil, + # A string of up to 64 characters that will be added to your fine-tuned model + # name. + # + # For example, a `suffix` of "custom-model-name" would produce a model name like + # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. suffix: nil, + # The ID of an uploaded file that contains validation data. + # + # If you provide this file, the data is used to generate validation metrics + # periodically during fine-tuning. These metrics can be viewed in the fine-tuning + # results file. The same data should not be present in both train and validation + # files. + # + # Your dataset must be formatted as a JSONL file. You must upload your file with + # the purpose `fine-tune`. + # + # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + # for more details. validation_file: nil, request_options: {} ) end + # Get info about a fine-tuning job. + # + # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) sig do params( fine_tuning_job_id: String, @@ -44,9 +103,14 @@ module OpenAI ) .returns(OpenAI::Models::FineTuning::FineTuningJob) end - def retrieve(fine_tuning_job_id, request_options: {}) + def retrieve( + # The ID of the fine-tuning job. + fine_tuning_job_id, + request_options: {} + ) end + # List your organization's fine-tuning jobs sig do params( after: String, @@ -56,9 +120,19 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::FineTuning::FineTuningJob]) end - def list(after: nil, limit: nil, metadata: nil, request_options: {}) + def list( + # Identifier for the last job from the previous pagination request. + after: nil, + # Number of fine-tuning jobs to retrieve. + limit: nil, + # Optional metadata filter. To filter, use the syntax `metadata[k]=v`. + # Alternatively, set `metadata=null` to indicate no metadata. + metadata: nil, + request_options: {} + ) end + # Immediately cancel a fine-tune job. sig do params( fine_tuning_job_id: String, @@ -66,9 +140,14 @@ module OpenAI ) .returns(OpenAI::Models::FineTuning::FineTuningJob) end - def cancel(fine_tuning_job_id, request_options: {}) + def cancel( + # The ID of the fine-tuning job to cancel. + fine_tuning_job_id, + request_options: {} + ) end + # Get status updates for a fine-tuning job. sig do params( fine_tuning_job_id: String, @@ -78,7 +157,15 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::FineTuning::FineTuningJobEvent]) end - def list_events(fine_tuning_job_id, after: nil, limit: nil, request_options: {}) + def list_events( + # The ID of the fine-tuning job to get events for. + fine_tuning_job_id, + # Identifier for the last event from the previous pagination request. + after: nil, + # Number of events to retrieve. + limit: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi b/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi index 297b57f1..24509186 100644 --- a/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi +++ b/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi @@ -5,6 +5,7 @@ module OpenAI class FineTuning class Jobs class Checkpoints + # List checkpoints for a fine-tuning job. sig do params( fine_tuning_job_id: String, @@ -14,7 +15,15 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint]) end - def list(fine_tuning_job_id, after: nil, limit: nil, request_options: {}) + def list( + # The ID of the fine-tuning job to get checkpoints for. + fine_tuning_job_id, + # Identifier for the last checkpoint ID from the previous pagination request. + after: nil, + # Number of checkpoints to retrieve. + limit: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/images.rbi b/rbi/lib/openai/resources/images.rbi index 357eccc3..396f2edd 100644 --- a/rbi/lib/openai/resources/images.rbi +++ b/rbi/lib/openai/resources/images.rbi @@ -3,6 +3,7 @@ module OpenAI module Resources class Images + # Creates a variation of a given image. sig do params( image: T.any(IO, StringIO), @@ -16,16 +17,31 @@ module OpenAI .returns(OpenAI::Models::ImagesResponse) end def create_variation( + # The image to use as the basis for the variation(s). Must be a valid PNG file, + # less than 4MB, and square. image:, + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. model: nil, + # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # `n=1` is supported. n: nil, + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. response_format: nil, + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. size: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ) end + # Creates an edited or extended image given an original image and a prompt. sig do params( image: T.any(IO, StringIO), @@ -41,18 +57,37 @@ module OpenAI .returns(OpenAI::Models::ImagesResponse) end def edit( + # The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask + # is not provided, image must have transparency, which will be used as the mask. image:, + # A text description of the desired image(s). The maximum length is 1000 + # characters. prompt:, + # An additional image whose fully transparent areas (e.g. where alpha is zero) + # indicate where `image` should be edited. Must be a valid PNG file, less than + # 4MB, and have the same dimensions as `image`. mask: nil, + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. model: nil, + # The number of images to generate. Must be between 1 and 10. n: nil, + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. response_format: nil, + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. size: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ) end + # Creates an image given a prompt. sig do params( prompt: String, @@ -68,13 +103,34 @@ module OpenAI .returns(OpenAI::Models::ImagesResponse) end def generate( + # A text description of the desired image(s). The maximum length is 1000 + # characters for `dall-e-2` and 4000 characters for `dall-e-3`. prompt:, + # The model to use for image generation. model: nil, + # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # `n=1` is supported. n: nil, + # The quality of the image that will be generated. `hd` creates images with finer + # details and greater consistency across the image. This param is only supported + # for `dall-e-3`. quality: nil, + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. response_format: nil, + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + # `1024x1792` for `dall-e-3` models. size: nil, + # The style of the generated images. Must be one of `vivid` or `natural`. Vivid + # causes the model to lean towards generating hyper-real and dramatic images. + # Natural causes the model to produce more natural, less hyper-real looking + # images. This param is only supported for `dall-e-3`. style: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ) diff --git a/rbi/lib/openai/resources/models.rbi b/rbi/lib/openai/resources/models.rbi index 2e4b916b..52e65f29 100644 --- a/rbi/lib/openai/resources/models.rbi +++ b/rbi/lib/openai/resources/models.rbi @@ -3,6 +3,8 @@ module OpenAI module Resources class Models + # Retrieves a model instance, providing basic information about the model such as + # the owner and permissioning. sig do params( model: String, @@ -10,9 +12,15 @@ module OpenAI ) .returns(OpenAI::Models::Model) end - def retrieve(model, request_options: {}) + def retrieve( + # The ID of the model to use for this request + model, + request_options: {} + ) end + # Lists the currently available models, and provides basic information about each + # one such as the owner and availability. sig do params(request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]))) .returns(OpenAI::Page[OpenAI::Models::Model]) @@ -20,6 +28,8 @@ module OpenAI def list(request_options: {}) end + # Delete a fine-tuned model. You must have the Owner role in your organization to + # delete a model. sig do params( model: String, @@ -27,7 +37,11 @@ module OpenAI ) .returns(OpenAI::Models::ModelDeleted) end - def delete(model, request_options: {}) + def delete( + # The model to delete + model, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/moderations.rbi b/rbi/lib/openai/resources/moderations.rbi index 8b836716..3b9b2bd0 100644 --- a/rbi/lib/openai/resources/moderations.rbi +++ b/rbi/lib/openai/resources/moderations.rbi @@ -3,6 +3,8 @@ module OpenAI module Resources class Moderations + # Classifies if text and/or image inputs are potentially harmful. Learn more in + # the [moderation guide](https://platform.openai.com/docs/guides/moderation). sig do params( input: T.any( @@ -15,7 +17,17 @@ module OpenAI ) .returns(OpenAI::Models::ModerationCreateResponse) end - def create(input:, model: nil, request_options: {}) + def create( + # Input (or inputs) to classify. Can be a single string, an array of strings, or + # an array of multi-modal input objects similar to other models. + input:, + # The content moderation model you would like to use. Learn more in + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). + model: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/responses.rbi b/rbi/lib/openai/resources/responses.rbi index d225d52c..823cc8a1 100644 --- a/rbi/lib/openai/resources/responses.rbi +++ b/rbi/lib/openai/resources/responses.rbi @@ -7,6 +7,17 @@ module OpenAI def input_items end + # Creates a model response. Provide + # [text](https://platform.openai.com/docs/guides/text) or + # [image](https://platform.openai.com/docs/guides/images) inputs to generate + # [text](https://platform.openai.com/docs/guides/text) or + # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + # the model call your own + # [custom code](https://platform.openai.com/docs/guides/function-calling) or use + # built-in [tools](https://platform.openai.com/docs/guides/tools) like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + # your own data as input for the model's response. sig do params( input: T.any(String, OpenAI::Models::Responses::ResponseInput), @@ -38,27 +49,125 @@ module OpenAI .returns(OpenAI::Models::Responses::Response) end def create( + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) input:, + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. model:, + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. include: nil, + # Inserts a system (or developer) message as the first item in the model's + # context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will be not be carried over to the next response. This makes it simple + # to swap out system (or developer) messages in new responses. instructions: nil, + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). max_output_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # Whether to allow the model to run tool calls in parallel. parallel_tool_calls: nil, + # The unique ID of the previous response to the model. Use this to create + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). previous_response_id: nil, + # **o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). reasoning: nil, + # Whether to store the generated model response for later retrieval via API. store: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) text: nil, + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. tool_choice: nil, + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. top_p: nil, + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. truncation: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ) end + # Creates a model response. Provide + # [text](https://platform.openai.com/docs/guides/text) or + # [image](https://platform.openai.com/docs/guides/images) inputs to generate + # [text](https://platform.openai.com/docs/guides/text) or + # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + # the model call your own + # [custom code](https://platform.openai.com/docs/guides/function-calling) or use + # built-in [tools](https://platform.openai.com/docs/guides/tools) like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + # your own data as input for the model's response. sig do params( input: T.any(String, OpenAI::Models::Responses::ResponseInput), @@ -127,27 +236,115 @@ module OpenAI ) end def create_streaming( + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) input:, + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. model:, + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. include: nil, + # Inserts a system (or developer) message as the first item in the model's + # context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will be not be carried over to the next response. This makes it simple + # to swap out system (or developer) messages in new responses. instructions: nil, + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). max_output_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # Whether to allow the model to run tool calls in parallel. parallel_tool_calls: nil, + # The unique ID of the previous response to the model. Use this to create + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). previous_response_id: nil, + # **o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). reasoning: nil, + # Whether to store the generated model response for later retrieval via API. store: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) text: nil, + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. tool_choice: nil, + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. top_p: nil, + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. truncation: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, request_options: {} ) end + # Retrieves a model response with the given ID. sig do params( response_id: String, @@ -156,9 +353,17 @@ module OpenAI ) .returns(OpenAI::Models::Responses::Response) end - def retrieve(response_id, include: nil, request_options: {}) + def retrieve( + # The ID of the response to retrieve. + response_id, + # Additional fields to include in the response. See the `include` parameter for + # Response creation above for more information. + include: nil, + request_options: {} + ) end + # Deletes a model response with the given ID. sig do params( response_id: String, @@ -166,7 +371,11 @@ module OpenAI ) .void end - def delete(response_id, request_options: {}) + def delete( + # The ID of the response to delete. + response_id, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/responses/input_items.rbi b/rbi/lib/openai/resources/responses/input_items.rbi index 5c8d359e..f6d2e651 100644 --- a/rbi/lib/openai/resources/responses/input_items.rbi +++ b/rbi/lib/openai/resources/responses/input_items.rbi @@ -4,6 +4,7 @@ module OpenAI module Resources class Responses class InputItems + # Returns a list of input items for a given response. sig do params( response_id: String, @@ -28,7 +29,23 @@ module OpenAI ] ) end - def list(response_id, after: nil, before: nil, limit: nil, order: nil, request_options: {}) + def list( + # The ID of the response to retrieve input items for. + response_id, + # An item ID to list items after, used in pagination. + after: nil, + # An item ID to list items before, used in pagination. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # The order to return the input items in. Default is `asc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. + order: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/uploads.rbi b/rbi/lib/openai/resources/uploads.rbi index 094b93ea..98a58dc5 100644 --- a/rbi/lib/openai/resources/uploads.rbi +++ b/rbi/lib/openai/resources/uploads.rbi @@ -7,6 +7,25 @@ module OpenAI def parts end + # Creates an intermediate + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object + # that you can add + # [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to. + # Currently, an Upload can accept at most 8 GB in total and expires after an hour + # after you create it. + # + # Once you complete the Upload, we will create a + # [File](https://platform.openai.com/docs/api-reference/files/object) object that + # contains all the parts you uploaded. This File is usable in the rest of our + # platform as a regular File object. + # + # For certain `purpose` values, the correct `mime_type` must be specified. Please + # refer to documentation for the + # [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). + # + # For guidance on the proper filename extensions for each purpose, please follow + # the documentation on + # [creating a File](https://platform.openai.com/docs/api-reference/files/create). sig do params( bytes: Integer, @@ -17,9 +36,26 @@ module OpenAI ) .returns(OpenAI::Models::Upload) end - def create(bytes:, filename:, mime_type:, purpose:, request_options: {}) + def create( + # The number of bytes in the file you are uploading. + bytes:, + # The name of the file to upload. + filename:, + # The MIME type of the file. + # + # This must fall within the supported MIME types for your file purpose. See the + # supported MIME types for assistants and vision. + mime_type:, + # The intended purpose of the uploaded file. + # + # See the + # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + purpose:, + request_options: {} + ) end + # Cancels the Upload. No Parts may be added after an Upload is cancelled. sig do params( upload_id: String, @@ -27,9 +63,26 @@ module OpenAI ) .returns(OpenAI::Models::Upload) end - def cancel(upload_id, request_options: {}) + def cancel( + # The ID of the Upload. + upload_id, + request_options: {} + ) end + # Completes the + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object). + # + # Within the returned Upload object, there is a nested + # [File](https://platform.openai.com/docs/api-reference/files/object) object that + # is ready to use in the rest of the platform. + # + # You can specify the order of the Parts by passing in an ordered list of the Part + # IDs. + # + # The number of bytes uploaded upon completion must match the number of bytes + # initially specified when creating the Upload object. No Parts may be added after + # an Upload is completed. sig do params( upload_id: String, @@ -39,7 +92,16 @@ module OpenAI ) .returns(OpenAI::Models::Upload) end - def complete(upload_id, part_ids:, md5: nil, request_options: {}) + def complete( + # The ID of the Upload. + upload_id, + # The ordered list of Part IDs. + part_ids:, + # The optional md5 checksum for the file contents to verify if the bytes uploaded + # matches what you expect. + md5: nil, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/uploads/parts.rbi b/rbi/lib/openai/resources/uploads/parts.rbi index e32900d9..6e52432f 100644 --- a/rbi/lib/openai/resources/uploads/parts.rbi +++ b/rbi/lib/openai/resources/uploads/parts.rbi @@ -4,6 +4,17 @@ module OpenAI module Resources class Uploads class Parts + # Adds a + # [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object. + # A Part represents a chunk of bytes from the file you are trying to upload. + # + # Each Part can be at most 64 MB, and you can add Parts until you hit the Upload + # maximum of 8 GB. + # + # It is possible to add multiple Parts in parallel. You can decide the intended + # order of the Parts when you + # [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete). sig do params( upload_id: String, @@ -12,7 +23,13 @@ module OpenAI ) .returns(OpenAI::Models::Uploads::UploadPart) end - def create(upload_id, data:, request_options: {}) + def create( + # The ID of the Upload. + upload_id, + # The chunk of bytes for this Part. + data:, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/resources/vector_stores.rbi b/rbi/lib/openai/resources/vector_stores.rbi index fdbb2632..32aecdbf 100644 --- a/rbi/lib/openai/resources/vector_stores.rbi +++ b/rbi/lib/openai/resources/vector_stores.rbi @@ -11,6 +11,7 @@ module OpenAI def file_batches end + # Create a vector store. sig do params( chunking_strategy: T.any( @@ -26,15 +27,29 @@ module OpenAI .returns(OpenAI::Models::VectorStore) end def create( + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. chunking_strategy: nil, + # The expiration policy for a vector store. expires_after: nil, + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. file_ids: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # The name of the vector store. name: nil, request_options: {} ) end + # Retrieves a vector store. sig do params( vector_store_id: String, @@ -42,9 +57,14 @@ module OpenAI ) .returns(OpenAI::Models::VectorStore) end - def retrieve(vector_store_id, request_options: {}) + def retrieve( + # The ID of the vector store to retrieve. + vector_store_id, + request_options: {} + ) end + # Modifies a vector store. sig do params( vector_store_id: String, @@ -55,9 +75,25 @@ module OpenAI ) .returns(OpenAI::Models::VectorStore) end - def update(vector_store_id, expires_after: nil, metadata: nil, name: nil, request_options: {}) + def update( + # The ID of the vector store to modify. + vector_store_id, + # The expiration policy for a vector store. + expires_after: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The name of the vector store. + name: nil, + request_options: {} + ) end + # Returns a list of vector stores. sig do params( after: String, @@ -68,9 +104,28 @@ module OpenAI ) .returns(OpenAI::CursorPage[OpenAI::Models::VectorStore]) end - def list(after: nil, before: nil, limit: nil, order: nil, request_options: {}) + def list( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) end + # Delete a vector store. sig do params( vector_store_id: String, @@ -78,9 +133,15 @@ module OpenAI ) .returns(OpenAI::Models::VectorStoreDeleted) end - def delete(vector_store_id, request_options: {}) + def delete( + # The ID of the vector store to delete. + vector_store_id, + request_options: {} + ) end + # Search a vector store for relevant chunks based on a query and file attributes + # filter. sig do params( vector_store_id: String, @@ -94,11 +155,18 @@ module OpenAI .returns(OpenAI::Page[OpenAI::Models::VectorStoreSearchResponse]) end def search( + # The ID of the vector store to search. vector_store_id, + # A query string for a search query:, + # A filter to apply based on file attributes. filters: nil, + # The maximum number of results to return. This number should be between 1 and 50 + # inclusive. max_num_results: nil, + # Ranking options for search. ranking_options: nil, + # Whether to rewrite the natural language query for vector search. rewrite_query: nil, request_options: {} ) diff --git a/rbi/lib/openai/resources/vector_stores/file_batches.rbi b/rbi/lib/openai/resources/vector_stores/file_batches.rbi index bcec19cb..ec4e8e7a 100644 --- a/rbi/lib/openai/resources/vector_stores/file_batches.rbi +++ b/rbi/lib/openai/resources/vector_stores/file_batches.rbi @@ -4,6 +4,7 @@ module OpenAI module Resources class VectorStores class FileBatches + # Create a vector store file batch. sig do params( vector_store_id: String, @@ -17,9 +18,27 @@ module OpenAI ) .returns(OpenAI::Models::VectorStores::VectorStoreFileBatch) end - def create(vector_store_id, file_ids:, attributes: nil, chunking_strategy: nil, request_options: {}) + def create( + # The ID of the vector store for which to create a File Batch. + vector_store_id, + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. + file_ids:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + attributes: nil, + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + chunking_strategy: nil, + request_options: {} + ) end + # Retrieves a vector store file batch. sig do params( batch_id: String, @@ -28,9 +47,17 @@ module OpenAI ) .returns(OpenAI::Models::VectorStores::VectorStoreFileBatch) end - def retrieve(batch_id, vector_store_id:, request_options: {}) + def retrieve( + # The ID of the file batch being retrieved. + batch_id, + # The ID of the vector store that the file batch belongs to. + vector_store_id:, + request_options: {} + ) end + # Cancel a vector store file batch. This attempts to cancel the processing of + # files in this batch as soon as possible. sig do params( batch_id: String, @@ -39,9 +66,16 @@ module OpenAI ) .returns(OpenAI::Models::VectorStores::VectorStoreFileBatch) end - def cancel(batch_id, vector_store_id:, request_options: {}) + def cancel( + # The ID of the file batch to cancel. + batch_id, + # The ID of the vector store that the file batch belongs to. + vector_store_id:, + request_options: {} + ) end + # Returns a list of vector store files in a batch. sig do params( batch_id: String, @@ -56,12 +90,28 @@ module OpenAI .returns(OpenAI::CursorPage[OpenAI::Models::VectorStores::VectorStoreFile]) end def list_files( + # Path param: The ID of the file batch that the files belong to. batch_id, + # Path param: The ID of the vector store that the files belong to. vector_store_id:, + # Query param: A cursor for use in pagination. `after` is an object ID that + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, ending with obj_foo, your subsequent call can include + # after=obj_foo in order to fetch the next page of the list. after: nil, + # Query param: A cursor for use in pagination. `before` is an object ID that + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, starting with obj_foo, your subsequent call can include + # before=obj_foo in order to fetch the previous page of the list. before: nil, + # Query param: Filter by file status. One of `in_progress`, `completed`, `failed`, + # `cancelled`. filter: nil, + # Query param: A limit on the number of objects to be returned. Limit can range + # between 1 and 100, and the default is 20. limit: nil, + # Query param: Sort order by the `created_at` timestamp of the objects. `asc` for + # ascending order and `desc` for descending order. order: nil, request_options: {} ) diff --git a/rbi/lib/openai/resources/vector_stores/files.rbi b/rbi/lib/openai/resources/vector_stores/files.rbi index 85c53733..2fc3ae57 100644 --- a/rbi/lib/openai/resources/vector_stores/files.rbi +++ b/rbi/lib/openai/resources/vector_stores/files.rbi @@ -4,6 +4,9 @@ module OpenAI module Resources class VectorStores class Files + # Create a vector store file by attaching a + # [File](https://platform.openai.com/docs/api-reference/files) to a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object). sig do params( vector_store_id: String, @@ -17,9 +20,27 @@ module OpenAI ) .returns(OpenAI::Models::VectorStores::VectorStoreFile) end - def create(vector_store_id, file_id:, attributes: nil, chunking_strategy: nil, request_options: {}) + def create( + # The ID of the vector store for which to create a File. + vector_store_id, + # A [File](https://platform.openai.com/docs/api-reference/files) ID that the + # vector store should use. Useful for tools like `file_search` that can access + # files. + file_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + attributes: nil, + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + chunking_strategy: nil, + request_options: {} + ) end + # Retrieves a vector store file. sig do params( file_id: String, @@ -28,9 +49,16 @@ module OpenAI ) .returns(OpenAI::Models::VectorStores::VectorStoreFile) end - def retrieve(file_id, vector_store_id:, request_options: {}) + def retrieve( + # The ID of the file being retrieved. + file_id, + # The ID of the vector store that the file belongs to. + vector_store_id:, + request_options: {} + ) end + # Update attributes on a vector store file. sig do params( file_id: String, @@ -40,9 +68,22 @@ module OpenAI ) .returns(OpenAI::Models::VectorStores::VectorStoreFile) end - def update(file_id, vector_store_id:, attributes:, request_options: {}) + def update( + # Path param: The ID of the file to update attributes. + file_id, + # Path param: The ID of the vector store the file belongs to. + vector_store_id:, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. Keys are + # strings with a maximum length of 64 characters. Values are strings with a + # maximum length of 512 characters, booleans, or numbers. + attributes:, + request_options: {} + ) end + # Returns a list of vector store files. sig do params( vector_store_id: String, @@ -56,16 +97,34 @@ module OpenAI .returns(OpenAI::CursorPage[OpenAI::Models::VectorStores::VectorStoreFile]) end def list( + # The ID of the vector store that the files belong to. vector_store_id, + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. before: nil, + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. filter: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. order: nil, request_options: {} ) end + # Delete a vector store file. This will remove the file from the vector store but + # the file itself will not be deleted. To delete the file, use the + # [delete file](https://platform.openai.com/docs/api-reference/files/delete) + # endpoint. sig do params( file_id: String, @@ -74,9 +133,16 @@ module OpenAI ) .returns(OpenAI::Models::VectorStores::VectorStoreFileDeleted) end - def delete(file_id, vector_store_id:, request_options: {}) + def delete( + # The ID of the file to delete. + file_id, + # The ID of the vector store that the file belongs to. + vector_store_id:, + request_options: {} + ) end + # Retrieve the parsed contents of a vector store file. sig do params( file_id: String, @@ -85,7 +151,13 @@ module OpenAI ) .returns(OpenAI::Page[OpenAI::Models::VectorStores::FileContentResponse]) end - def content(file_id, vector_store_id:, request_options: {}) + def content( + # The ID of the file within the vector store. + file_id, + # The ID of the vector store. + vector_store_id:, + request_options: {} + ) end sig { params(client: OpenAI::Client).returns(T.attached_class) } diff --git a/rbi/lib/openai/stream.rbi b/rbi/lib/openai/stream.rbi index 3dc46e28..75e469ce 100644 --- a/rbi/lib/openai/stream.rbi +++ b/rbi/lib/openai/stream.rbi @@ -4,24 +4,12 @@ module OpenAI class Stream include OpenAI::BaseStream - Message = type_member(:in) { {fixed: OpenAI::Util::SSEMessage} } + Message = type_member(:in) { {fixed: OpenAI::Util::ServerSentEvent} } Elem = type_member(:out) + # @api private sig { override.returns(T::Enumerable[Elem]) } private def iterator end - - sig do - params( - model: T.any(T::Class[T.anything], OpenAI::Converter), - url: URI::Generic, - status: Integer, - response: Net::HTTPResponse, - messages: T::Enumerable[OpenAI::Util::SSEMessage] - ) - .returns(T.attached_class) - end - def self.new(model:, url:, status:, response:, messages:) - end end end diff --git a/rbi/lib/openai/util.rbi b/rbi/lib/openai/util.rbi index 246a7672..fe12b3ff 100644 --- a/rbi/lib/openai/util.rbi +++ b/rbi/lib/openai/util.rbi @@ -1,42 +1,52 @@ # typed: strong module OpenAI + # @api private module Util + # @api private sig { returns(Float) } def self.monotonic_secs end class << self + # @api private sig { returns(String) } def arch end + # @api private sig { returns(String) } def os end end class << self - sig { params(input: T.anything).returns(T.any(T::Boolean, T.anything)) } + # @api private + sig { params(input: T.anything).returns(T::Boolean) } def primitive?(input) end + # @api private sig { params(input: T.anything).returns(T.any(T::Boolean, T.anything)) } def coerce_boolean(input) end + # @api private sig { params(input: T.anything).returns(T.nilable(T::Boolean)) } def coerce_boolean!(input) end + # @api private sig { params(input: T.anything).returns(T.any(Integer, T.anything)) } def coerce_integer(input) end + # @api private sig { params(input: T.anything).returns(T.any(Float, T.anything)) } def coerce_float(input) end + # @api private sig { params(input: T.anything).returns(T.any(T::Hash[T.anything, T.anything], T.anything)) } def coerce_hash(input) end @@ -45,17 +55,29 @@ module OpenAI OMIT = T.let(T.anything, T.anything) class << self + # @api private sig { params(lhs: T.anything, rhs: T.anything, concat: T::Boolean).returns(T.anything) } private def deep_merge_lr(lhs, rhs, concat: false) end + # @api private + # + # Recursively merge one hash with another. If the values at a given key are not + # both hashes, just take the new value. sig do params(values: T::Array[T.anything], sentinel: T.nilable(T.anything), concat: T::Boolean) .returns(T.anything) end - def deep_merge(*values, sentinel: nil, concat: false) + def deep_merge( + *values, + # the value to return if no values are provided. + sentinel: nil, + # whether to merge sequences by concatenation. + concat: false + ) end + # @api private sig do params( data: T.any(T::Hash[Symbol, T.anything], T::Array[T.anything], T.anything), @@ -70,20 +92,24 @@ module OpenAI end class << self + # @api private sig { params(uri: URI::Generic).returns(String) } def uri_origin(uri) end + # @api private sig { params(path: T.any(String, T::Array[String])).returns(String) } def interpolate_path(path) end end class << self + # @api private sig { params(query: T.nilable(String)).returns(T::Hash[String, T::Array[String]]) } def decode_query(query) end + # @api private sig do params(query: T.nilable(T::Hash[String, T.nilable(T.any(T::Array[String], String))])) .returns(T.nilable(String)) @@ -103,14 +129,17 @@ module OpenAI end class << self + # @api private sig { params(url: T.any(URI::Generic, String)).returns(OpenAI::Util::ParsedUriShape) } def parse_uri(url) end + # @api private sig { params(parsed: OpenAI::Util::ParsedUriShape).returns(URI::Generic) } def unparse_uri(parsed) end + # @api private sig do params(lhs: OpenAI::Util::ParsedUriShape, rhs: OpenAI::Util::ParsedUriShape).returns(URI::Generic) end @@ -119,6 +148,7 @@ module OpenAI end class << self + # @api private sig do params( headers: T::Hash[String, @@ -130,15 +160,21 @@ module OpenAI end end + # @api private + # + # An adapter that satisfies the IO interface required by `::IO.copy_stream` class ReadIOAdapter + # @api private sig { params(max_len: T.nilable(Integer)).returns(String) } private def read_enum(max_len) end + # @api private sig { params(max_len: T.nilable(Integer), out_string: T.nilable(String)).returns(T.nilable(String)) } def read(max_len = nil, out_string = nil) end + # @api private sig do params( stream: T.any(String, IO, StringIO, T::Enumerable[String]), @@ -157,20 +193,24 @@ module OpenAI end class << self + # @api private sig do params(y: Enumerator::Yielder, boundary: String, key: T.any(Symbol, String), val: T.anything).void end private def encode_multipart_formdata(y, boundary:, key:, val:) end + # @api private sig { params(body: T.anything).returns([String, T::Enumerable[String]]) } private def encode_multipart_streaming(body) end + # @api private sig { params(headers: T::Hash[String, String], body: T.anything).returns(T.anything) } def encode_content(headers, body) end + # @api private sig do params( headers: T.any(T::Hash[String, String], Net::HTTPHeader), @@ -184,6 +224,9 @@ module OpenAI end class << self + # @api private + # + # https://doc.rust-lang.org/std/iter/trait.FusedIterator.html sig do params(enum: T::Enumerable[T.anything], external: T::Boolean, close: T.proc.void) .returns(T::Enumerable[T.anything]) @@ -191,30 +234,34 @@ module OpenAI def fused_enum(enum, external: false, &close) end + # @api private sig { params(enum: T.nilable(T::Enumerable[T.anything])).void } def close_fused!(enum) end + # @api private sig do - params( - enum: T.nilable(T::Enumerable[T.anything]), - blk: T.proc.params(arg0: Enumerator::Yielder).void - ).void + params(enum: T.nilable(T::Enumerable[T.anything]), blk: T.proc.params(arg0: Enumerator::Yielder).void) + .returns(T::Enumerable[T.anything]) end def chain_fused(enum, &blk) end end - SSEMessage = T.type_alias do + ServerSentEvent = T.type_alias do {event: T.nilable(String), data: T.nilable(String), id: T.nilable(String), retry: T.nilable(Integer)} end class << self + # @api private sig { params(enum: T::Enumerable[String]).returns(T::Enumerable[String]) } def decode_lines(enum) end - sig { params(lines: T::Enumerable[String]).returns(OpenAI::Util::SSEMessage) } + # @api private + # + # https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream + sig { params(lines: T::Enumerable[String]).returns(OpenAI::Util::ServerSentEvent) } def decode_sse(lines) end end diff --git a/rbi/lib/openai/version.rbi b/rbi/lib/openai/version.rbi index 5366ece5..1f42bc59 100644 --- a/rbi/lib/openai/version.rbi +++ b/rbi/lib/openai/version.rbi @@ -1,5 +1,5 @@ # typed: strong module OpenAI - VERSION = "0.1.0-alpha.1" + VERSION = "0.1.0-alpha.2" end diff --git a/sig/openai/base_client.rbs b/sig/openai/base_client.rbs index 4ca67417..0c19b54e 100644 --- a/sig/openai/base_client.rbs +++ b/sig/openai/base_client.rbs @@ -43,7 +43,12 @@ module OpenAI response_headers: ::Hash[String, String] ) -> OpenAI::BaseClient::request_input - # @private + def self.reap_connection!: ( + Integer | OpenAI::APIConnectionError status, + stream: Enumerable[String]? + ) -> void + + # @api private attr_accessor requester: top def initialize: ( diff --git a/sig/openai/base_model.rbs b/sig/openai/base_model.rbs index 5b709bfc..574847b4 100644 --- a/sig/openai/base_model.rbs +++ b/sig/openai/base_model.rbs @@ -85,9 +85,11 @@ module OpenAI class Union extend OpenAI::Converter - private def self.known_variants: -> ::Array[[Symbol?, Proc]] + private def self.known_variants: -> ::Array[[Symbol?, (^-> OpenAI::Converter::input)]] - def self.variants: -> ::Array[[Symbol?, top]] + def self.derefed_variants: -> ::Array[[Symbol?, top]] + + def self.variants: -> ::Array[top] private def self.discriminator: (Symbol property) -> void @@ -116,7 +118,7 @@ module OpenAI ) -> ([true, top, nil] | [false, bool, Integer]) end - class ArrayOf + class ArrayOf[Elem] include OpenAI::Converter def ===: (top other) -> bool @@ -131,7 +133,7 @@ module OpenAI top value ) -> ([true, top, nil] | [false, bool, Integer]) - def item_type: -> OpenAI::Converter::input + def item_type: -> Elem def initialize: ( ::Hash[Symbol, top] @@ -141,7 +143,7 @@ module OpenAI ) -> void end - class HashOf + class HashOf[Elem] include OpenAI::Converter def ===: (top other) -> bool @@ -156,7 +158,7 @@ module OpenAI top value ) -> ([true, top, nil] | [false, bool, Integer]) - def item_type: -> OpenAI::Converter::input + def item_type: -> Elem def initialize: ( ::Hash[Symbol, top] diff --git a/sig/openai/cursor_page.rbs b/sig/openai/cursor_page.rbs index d7f7e58b..5aa7c659 100644 --- a/sig/openai/cursor_page.rbs +++ b/sig/openai/cursor_page.rbs @@ -2,15 +2,8 @@ module OpenAI class CursorPage[Elem] include OpenAI::BasePage[Elem] - attr_accessor data: ::Array[Elem] + attr_accessor data: ::Array[Elem]? attr_accessor has_more: bool - - def initialize: ( - client: OpenAI::BaseClient, - req: OpenAI::BaseClient::request_components, - headers: ::Hash[String, String], - page_data: ::Hash[Symbol, top] - ) -> void end end diff --git a/sig/openai/errors.rbs b/sig/openai/errors.rbs index 08f05bd0..b5c00826 100644 --- a/sig/openai/errors.rbs +++ b/sig/openai/errors.rbs @@ -1,23 +1,23 @@ module OpenAI class Error < StandardError - attr_reader cause: StandardError? + attr_accessor cause: StandardError? end class ConversionError < OpenAI::Error end class APIError < OpenAI::Error - attr_reader url: URI::Generic + attr_accessor url: URI::Generic - attr_reader status: Integer? + attr_accessor status: Integer? - attr_reader body: top? + attr_accessor body: top? - attr_reader code: String? + attr_accessor code: String? - attr_reader param: String? + attr_accessor param: String? - attr_reader type: String? + attr_accessor type: String? def initialize: ( url: URI::Generic, diff --git a/sig/openai/models/audio/speech_create_params.rbs b/sig/openai/models/audio/speech_create_params.rbs index 4a89f863..c1e0ec60 100644 --- a/sig/openai/models/audio/speech_create_params.rbs +++ b/sig/openai/models/audio/speech_create_params.rbs @@ -50,7 +50,7 @@ module OpenAI type model = String | OpenAI::Models::Audio::speech_model class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::Audio::speech_model]] + def self.variants: -> [String, OpenAI::Models::Audio::speech_model] end type voice = diff --git a/sig/openai/models/audio/transcription_create_params.rbs b/sig/openai/models/audio/transcription_create_params.rbs index 3f154e80..22080806 100644 --- a/sig/openai/models/audio/transcription_create_params.rbs +++ b/sig/openai/models/audio/transcription_create_params.rbs @@ -66,7 +66,7 @@ module OpenAI type model = String | OpenAI::Models::audio_model class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::audio_model]] + def self.variants: -> [String, OpenAI::Models::audio_model] end type timestamp_granularity = :word | :segment diff --git a/sig/openai/models/audio/transcription_create_response.rbs b/sig/openai/models/audio/transcription_create_response.rbs index bae11946..f0179c81 100644 --- a/sig/openai/models/audio/transcription_create_response.rbs +++ b/sig/openai/models/audio/transcription_create_response.rbs @@ -6,7 +6,7 @@ module OpenAI | OpenAI::Models::Audio::TranscriptionVerbose class TranscriptionCreateResponse < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::Audio::Transcription], [nil, OpenAI::Models::Audio::TranscriptionVerbose]] + def self.variants: -> [OpenAI::Models::Audio::Transcription, OpenAI::Models::Audio::TranscriptionVerbose] end end end diff --git a/sig/openai/models/audio/translation_create_params.rbs b/sig/openai/models/audio/translation_create_params.rbs index 3dd3ee9c..916641d5 100644 --- a/sig/openai/models/audio/translation_create_params.rbs +++ b/sig/openai/models/audio/translation_create_params.rbs @@ -52,7 +52,7 @@ module OpenAI type model = String | OpenAI::Models::audio_model class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::audio_model]] + def self.variants: -> [String, OpenAI::Models::audio_model] end end end diff --git a/sig/openai/models/audio/translation_create_response.rbs b/sig/openai/models/audio/translation_create_response.rbs index a8516dab..d80690b9 100644 --- a/sig/openai/models/audio/translation_create_response.rbs +++ b/sig/openai/models/audio/translation_create_response.rbs @@ -6,7 +6,7 @@ module OpenAI | OpenAI::Models::Audio::TranslationVerbose class TranslationCreateResponse < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::Audio::Translation], [nil, OpenAI::Models::Audio::TranslationVerbose]] + def self.variants: -> [OpenAI::Models::Audio::Translation, OpenAI::Models::Audio::TranslationVerbose] end end end diff --git a/sig/openai/models/beta/assistant_create_params.rbs b/sig/openai/models/beta/assistant_create_params.rbs index 63ed4e29..21b76a8c 100644 --- a/sig/openai/models/beta/assistant_create_params.rbs +++ b/sig/openai/models/beta/assistant_create_params.rbs @@ -72,7 +72,7 @@ module OpenAI type model = String | OpenAI::Models::chat_model class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::chat_model]] + def self.variants: -> [String, OpenAI::Models::chat_model] end type tool_resources = @@ -253,7 +253,7 @@ module OpenAI end end - private def self.variants: -> [[:auto, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto], [:static, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static]] + def self.variants: -> [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] end end end diff --git a/sig/openai/models/beta/assistant_response_format_option.rbs b/sig/openai/models/beta/assistant_response_format_option.rbs index c9efaa33..158a3a03 100644 --- a/sig/openai/models/beta/assistant_response_format_option.rbs +++ b/sig/openai/models/beta/assistant_response_format_option.rbs @@ -8,7 +8,7 @@ module OpenAI | OpenAI::Models::ResponseFormatJSONSchema class AssistantResponseFormatOption < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, OpenAI::Models::ResponseFormatText], [nil, OpenAI::Models::ResponseFormatJSONObject], [nil, OpenAI::Models::ResponseFormatJSONSchema]] + def self.variants: -> [:auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema] end end end diff --git a/sig/openai/models/beta/assistant_stream_event.rbs b/sig/openai/models/beta/assistant_stream_event.rbs index a22d0f87..37d03210 100644 --- a/sig/openai/models/beta/assistant_stream_event.rbs +++ b/sig/openai/models/beta/assistant_stream_event.rbs @@ -603,7 +603,7 @@ module OpenAI def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::error_event end - private def self.variants: -> [[:"thread.created", OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated], [:"thread.run.created", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated], [:"thread.run.queued", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued], [:"thread.run.in_progress", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress], [:"thread.run.requires_action", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction], [:"thread.run.completed", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted], [:"thread.run.incomplete", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete], [:"thread.run.failed", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed], [:"thread.run.cancelling", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling], [:"thread.run.cancelled", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled], [:"thread.run.expired", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired], [:"thread.run.step.created", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated], [:"thread.run.step.in_progress", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress], [:"thread.run.step.delta", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta], [:"thread.run.step.completed", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted], [:"thread.run.step.failed", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed], [:"thread.run.step.cancelled", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled], [:"thread.run.step.expired", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired], [:"thread.message.created", OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated], [:"thread.message.in_progress", OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress], [:"thread.message.delta", OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta], [:"thread.message.completed", OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted], [:"thread.message.incomplete", OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete], [:error, OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent]] + def self.variants: -> [OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete, OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent] end end end diff --git a/sig/openai/models/beta/assistant_tool.rbs b/sig/openai/models/beta/assistant_tool.rbs index 054c260d..5421e7bc 100644 --- a/sig/openai/models/beta/assistant_tool.rbs +++ b/sig/openai/models/beta/assistant_tool.rbs @@ -7,7 +7,7 @@ module OpenAI | OpenAI::Models::Beta::FunctionTool class AssistantTool < OpenAI::Union - private def self.variants: -> [[:code_interpreter, OpenAI::Models::Beta::CodeInterpreterTool], [:file_search, OpenAI::Models::Beta::FileSearchTool], [:function, OpenAI::Models::Beta::FunctionTool]] + def self.variants: -> [OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool] end end end diff --git a/sig/openai/models/beta/assistant_tool_choice_option.rbs b/sig/openai/models/beta/assistant_tool_choice_option.rbs index e8243ffd..f7886116 100644 --- a/sig/openai/models/beta/assistant_tool_choice_option.rbs +++ b/sig/openai/models/beta/assistant_tool_choice_option.rbs @@ -16,7 +16,7 @@ module OpenAI def self.values: -> ::Array[OpenAI::Models::Beta::AssistantToolChoiceOption::auto] end - private def self.variants: -> [[nil, OpenAI::Models::Beta::AssistantToolChoiceOption::auto], [nil, OpenAI::Models::Beta::AssistantToolChoice]] + def self.variants: -> [OpenAI::Models::Beta::AssistantToolChoiceOption::auto, OpenAI::Models::Beta::AssistantToolChoice] end end end diff --git a/sig/openai/models/beta/assistant_update_params.rbs b/sig/openai/models/beta/assistant_update_params.rbs index 8e5079c9..39a0d8e2 100644 --- a/sig/openai/models/beta/assistant_update_params.rbs +++ b/sig/openai/models/beta/assistant_update_params.rbs @@ -75,77 +75,68 @@ module OpenAI type model = String - | OpenAI::Models::Beta::AssistantUpdateParams::Model::assistant_supported_models + | :"o3-mini" + | :"o3-mini-2025-01-31" + | :o1 + | :"o1-2024-12-17" + | :"gpt-4o" + | :"gpt-4o-2024-11-20" + | :"gpt-4o-2024-08-06" + | :"gpt-4o-2024-05-13" + | :"gpt-4o-mini" + | :"gpt-4o-mini-2024-07-18" + | :"gpt-4.5-preview" + | :"gpt-4.5-preview-2025-02-27" + | :"gpt-4-turbo" + | :"gpt-4-turbo-2024-04-09" + | :"gpt-4-0125-preview" + | :"gpt-4-turbo-preview" + | :"gpt-4-1106-preview" + | :"gpt-4-vision-preview" + | :"gpt-4" + | :"gpt-4-0314" + | :"gpt-4-0613" + | :"gpt-4-32k" + | :"gpt-4-32k-0314" + | :"gpt-4-32k-0613" + | :"gpt-3.5-turbo" + | :"gpt-3.5-turbo-16k" + | :"gpt-3.5-turbo-0613" + | :"gpt-3.5-turbo-1106" + | :"gpt-3.5-turbo-0125" + | :"gpt-3.5-turbo-16k-0613" class Model < OpenAI::Union - type assistant_supported_models = - :"o3-mini" - | :"o3-mini-2025-01-31" - | :o1 - | :"o1-2024-12-17" - | :"gpt-4o" - | :"gpt-4o-2024-11-20" - | :"gpt-4o-2024-08-06" - | :"gpt-4o-2024-05-13" - | :"gpt-4o-mini" - | :"gpt-4o-mini-2024-07-18" - | :"gpt-4.5-preview" - | :"gpt-4.5-preview-2025-02-27" - | :"gpt-4-turbo" - | :"gpt-4-turbo-2024-04-09" - | :"gpt-4-0125-preview" - | :"gpt-4-turbo-preview" - | :"gpt-4-1106-preview" - | :"gpt-4-vision-preview" - | :"gpt-4" - | :"gpt-4-0314" - | :"gpt-4-0613" - | :"gpt-4-32k" - | :"gpt-4-32k-0314" - | :"gpt-4-32k-0613" - | :"gpt-3.5-turbo" - | :"gpt-3.5-turbo-16k" - | :"gpt-3.5-turbo-0613" - | :"gpt-3.5-turbo-1106" - | :"gpt-3.5-turbo-0125" - | :"gpt-3.5-turbo-16k-0613" - - class AssistantSupportedModels < OpenAI::Enum - O3_MINI: :"o3-mini" - O3_MINI_2025_01_31: :"o3-mini-2025-01-31" - O1: :o1 - O1_2024_12_17: :"o1-2024-12-17" - GPT_4O: :"gpt-4o" - GPT_4O_2024_11_20: :"gpt-4o-2024-11-20" - GPT_4O_2024_08_06: :"gpt-4o-2024-08-06" - GPT_4O_2024_05_13: :"gpt-4o-2024-05-13" - GPT_4O_MINI: :"gpt-4o-mini" - GPT_4O_MINI_2024_07_18: :"gpt-4o-mini-2024-07-18" - GPT_4_5_PREVIEW: :"gpt-4.5-preview" - GPT_4_5_PREVIEW_2025_02_27: :"gpt-4.5-preview-2025-02-27" - GPT_4_TURBO: :"gpt-4-turbo" - GPT_4_TURBO_2024_04_09: :"gpt-4-turbo-2024-04-09" - GPT_4_0125_PREVIEW: :"gpt-4-0125-preview" - GPT_4_TURBO_PREVIEW: :"gpt-4-turbo-preview" - GPT_4_1106_PREVIEW: :"gpt-4-1106-preview" - GPT_4_VISION_PREVIEW: :"gpt-4-vision-preview" - GPT_4: :"gpt-4" - GPT_4_0314: :"gpt-4-0314" - GPT_4_0613: :"gpt-4-0613" - GPT_4_32K: :"gpt-4-32k" - GPT_4_32K_0314: :"gpt-4-32k-0314" - GPT_4_32K_0613: :"gpt-4-32k-0613" - GPT_3_5_TURBO: :"gpt-3.5-turbo" - GPT_3_5_TURBO_16K: :"gpt-3.5-turbo-16k" - GPT_3_5_TURBO_0613: :"gpt-3.5-turbo-0613" - GPT_3_5_TURBO_1106: :"gpt-3.5-turbo-1106" - GPT_3_5_TURBO_0125: :"gpt-3.5-turbo-0125" - GPT_3_5_TURBO_16K_0613: :"gpt-3.5-turbo-16k-0613" - - def self.values: -> ::Array[OpenAI::Models::Beta::AssistantUpdateParams::Model::assistant_supported_models] - end - - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::Beta::AssistantUpdateParams::Model::assistant_supported_models]] + def self.variants: -> [String, (:"o3-mini" + | :"o3-mini-2025-01-31" + | :o1 + | :"o1-2024-12-17" + | :"gpt-4o" + | :"gpt-4o-2024-11-20" + | :"gpt-4o-2024-08-06" + | :"gpt-4o-2024-05-13" + | :"gpt-4o-mini" + | :"gpt-4o-mini-2024-07-18" + | :"gpt-4.5-preview" + | :"gpt-4.5-preview-2025-02-27" + | :"gpt-4-turbo" + | :"gpt-4-turbo-2024-04-09" + | :"gpt-4-0125-preview" + | :"gpt-4-turbo-preview" + | :"gpt-4-1106-preview" + | :"gpt-4-vision-preview" + | :"gpt-4" + | :"gpt-4-0314" + | :"gpt-4-0613" + | :"gpt-4-32k" + | :"gpt-4-32k-0314" + | :"gpt-4-32k-0613" + | :"gpt-3.5-turbo" + | :"gpt-3.5-turbo-16k" + | :"gpt-3.5-turbo-0613" + | :"gpt-3.5-turbo-1106" + | :"gpt-3.5-turbo-0125" + | :"gpt-3.5-turbo-16k-0613")] end type tool_resources = diff --git a/sig/openai/models/beta/message_stream_event.rbs b/sig/openai/models/beta/message_stream_event.rbs index 376e54bc..d46abf27 100644 --- a/sig/openai/models/beta/message_stream_event.rbs +++ b/sig/openai/models/beta/message_stream_event.rbs @@ -129,7 +129,7 @@ module OpenAI def to_hash: -> OpenAI::Models::Beta::MessageStreamEvent::thread_message_incomplete end - private def self.variants: -> [[:"thread.message.created", OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCreated], [:"thread.message.in_progress", OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageInProgress], [:"thread.message.delta", OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageDelta], [:"thread.message.completed", OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCompleted], [:"thread.message.incomplete", OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageIncomplete]] + def self.variants: -> [OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCreated, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageInProgress, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageDelta, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCompleted, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageIncomplete] end end end diff --git a/sig/openai/models/beta/run_step_stream_event.rbs b/sig/openai/models/beta/run_step_stream_event.rbs index d8fa10c9..b0c1a0a3 100644 --- a/sig/openai/models/beta/run_step_stream_event.rbs +++ b/sig/openai/models/beta/run_step_stream_event.rbs @@ -179,7 +179,7 @@ module OpenAI def to_hash: -> OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_expired end - private def self.variants: -> [[:"thread.run.step.created", OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCreated], [:"thread.run.step.in_progress", OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepInProgress], [:"thread.run.step.delta", OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepDelta], [:"thread.run.step.completed", OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCompleted], [:"thread.run.step.failed", OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepFailed], [:"thread.run.step.cancelled", OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCancelled], [:"thread.run.step.expired", OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepExpired]] + def self.variants: -> [OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCreated, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepInProgress, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepDelta, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCompleted, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepFailed, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCancelled, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepExpired] end end end diff --git a/sig/openai/models/beta/run_stream_event.rbs b/sig/openai/models/beta/run_stream_event.rbs index ae28ae5e..650253ce 100644 --- a/sig/openai/models/beta/run_stream_event.rbs +++ b/sig/openai/models/beta/run_stream_event.rbs @@ -254,7 +254,7 @@ module OpenAI def to_hash: -> OpenAI::Models::Beta::RunStreamEvent::thread_run_expired end - private def self.variants: -> [[:"thread.run.created", OpenAI::Models::Beta::RunStreamEvent::ThreadRunCreated], [:"thread.run.queued", OpenAI::Models::Beta::RunStreamEvent::ThreadRunQueued], [:"thread.run.in_progress", OpenAI::Models::Beta::RunStreamEvent::ThreadRunInProgress], [:"thread.run.requires_action", OpenAI::Models::Beta::RunStreamEvent::ThreadRunRequiresAction], [:"thread.run.completed", OpenAI::Models::Beta::RunStreamEvent::ThreadRunCompleted], [:"thread.run.incomplete", OpenAI::Models::Beta::RunStreamEvent::ThreadRunIncomplete], [:"thread.run.failed", OpenAI::Models::Beta::RunStreamEvent::ThreadRunFailed], [:"thread.run.cancelling", OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelling], [:"thread.run.cancelled", OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelled], [:"thread.run.expired", OpenAI::Models::Beta::RunStreamEvent::ThreadRunExpired]] + def self.variants: -> [OpenAI::Models::Beta::RunStreamEvent::ThreadRunCreated, OpenAI::Models::Beta::RunStreamEvent::ThreadRunQueued, OpenAI::Models::Beta::RunStreamEvent::ThreadRunInProgress, OpenAI::Models::Beta::RunStreamEvent::ThreadRunRequiresAction, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCompleted, OpenAI::Models::Beta::RunStreamEvent::ThreadRunIncomplete, OpenAI::Models::Beta::RunStreamEvent::ThreadRunFailed, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelling, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelled, OpenAI::Models::Beta::RunStreamEvent::ThreadRunExpired] end end end diff --git a/sig/openai/models/beta/thread_create_and_run_params.rbs b/sig/openai/models/beta/thread_create_and_run_params.rbs index 0f7e5730..76bb1909 100644 --- a/sig/openai/models/beta/thread_create_and_run_params.rbs +++ b/sig/openai/models/beta/thread_create_and_run_params.rbs @@ -90,7 +90,7 @@ module OpenAI type model = String | OpenAI::Models::chat_model class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::chat_model]] + def self.variants: -> [String, OpenAI::Models::chat_model] end type thread = @@ -165,7 +165,7 @@ module OpenAI MessageContentPartParamArray: message_content_part_param_array - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Beta::Threads::message_content_part_param]]] + def self.variants: -> [String, ::Array[OpenAI::Models::Beta::Threads::message_content_part_param]] end type role = :user | :assistant @@ -226,7 +226,7 @@ module OpenAI def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::file_search end - private def self.variants: -> [[:code_interpreter, OpenAI::Models::Beta::CodeInterpreterTool], [:file_search, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch]] + def self.variants: -> [OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch] end end end @@ -409,7 +409,7 @@ module OpenAI end end - private def self.variants: -> [[:auto, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto], [:static, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static]] + def self.variants: -> [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] end end end @@ -488,7 +488,7 @@ module OpenAI | OpenAI::Models::Beta::FunctionTool class Tool < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::Beta::CodeInterpreterTool], [nil, OpenAI::Models::Beta::FileSearchTool], [nil, OpenAI::Models::Beta::FunctionTool]] + def self.variants: -> [OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool] end type truncation_strategy = diff --git a/sig/openai/models/beta/thread_create_params.rbs b/sig/openai/models/beta/thread_create_params.rbs index cb189c09..8de821f1 100644 --- a/sig/openai/models/beta/thread_create_params.rbs +++ b/sig/openai/models/beta/thread_create_params.rbs @@ -77,7 +77,7 @@ module OpenAI MessageContentPartParamArray: message_content_part_param_array - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Beta::Threads::message_content_part_param]]] + def self.variants: -> [String, ::Array[OpenAI::Models::Beta::Threads::message_content_part_param]] end type role = :user | :assistant @@ -138,7 +138,7 @@ module OpenAI def to_hash: -> OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::file_search end - private def self.variants: -> [[:code_interpreter, OpenAI::Models::Beta::CodeInterpreterTool], [:file_search, OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch]] + def self.variants: -> [OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch] end end end @@ -321,7 +321,7 @@ module OpenAI end end - private def self.variants: -> [[:auto, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto], [:static, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static]] + def self.variants: -> [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] end end end diff --git a/sig/openai/models/beta/threads/annotation.rbs b/sig/openai/models/beta/threads/annotation.rbs index 547b90c2..527a6e58 100644 --- a/sig/openai/models/beta/threads/annotation.rbs +++ b/sig/openai/models/beta/threads/annotation.rbs @@ -7,7 +7,7 @@ module OpenAI | OpenAI::Models::Beta::Threads::FilePathAnnotation class Annotation < OpenAI::Union - private def self.variants: -> [[:file_citation, OpenAI::Models::Beta::Threads::FileCitationAnnotation], [:file_path, OpenAI::Models::Beta::Threads::FilePathAnnotation]] + def self.variants: -> [OpenAI::Models::Beta::Threads::FileCitationAnnotation, OpenAI::Models::Beta::Threads::FilePathAnnotation] end end end diff --git a/sig/openai/models/beta/threads/annotation_delta.rbs b/sig/openai/models/beta/threads/annotation_delta.rbs index 80ab4f91..21dee9df 100644 --- a/sig/openai/models/beta/threads/annotation_delta.rbs +++ b/sig/openai/models/beta/threads/annotation_delta.rbs @@ -7,7 +7,7 @@ module OpenAI | OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation class AnnotationDelta < OpenAI::Union - private def self.variants: -> [[:file_citation, OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation], [:file_path, OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation]] + def self.variants: -> [OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation, OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation] end end end diff --git a/sig/openai/models/beta/threads/message.rbs b/sig/openai/models/beta/threads/message.rbs index f501c1d1..40d2be8d 100644 --- a/sig/openai/models/beta/threads/message.rbs +++ b/sig/openai/models/beta/threads/message.rbs @@ -122,7 +122,7 @@ module OpenAI def to_hash: -> OpenAI::Models::Beta::Threads::Message::Attachment::Tool::assistant_tools_file_search_type_only end - private def self.variants: -> [[nil, OpenAI::Models::Beta::CodeInterpreterTool], [nil, OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly]] + def self.variants: -> [OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly] end end diff --git a/sig/openai/models/beta/threads/message_content.rbs b/sig/openai/models/beta/threads/message_content.rbs index 970c2528..252bb7ff 100644 --- a/sig/openai/models/beta/threads/message_content.rbs +++ b/sig/openai/models/beta/threads/message_content.rbs @@ -9,7 +9,7 @@ module OpenAI | OpenAI::Models::Beta::Threads::RefusalContentBlock class MessageContent < OpenAI::Union - private def self.variants: -> [[:image_file, OpenAI::Models::Beta::Threads::ImageFileContentBlock], [:image_url, OpenAI::Models::Beta::Threads::ImageURLContentBlock], [:text, OpenAI::Models::Beta::Threads::TextContentBlock], [:refusal, OpenAI::Models::Beta::Threads::RefusalContentBlock]] + def self.variants: -> [OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlock, OpenAI::Models::Beta::Threads::RefusalContentBlock] end end end diff --git a/sig/openai/models/beta/threads/message_content_delta.rbs b/sig/openai/models/beta/threads/message_content_delta.rbs index 88d80be8..aab10ba6 100644 --- a/sig/openai/models/beta/threads/message_content_delta.rbs +++ b/sig/openai/models/beta/threads/message_content_delta.rbs @@ -9,7 +9,7 @@ module OpenAI | OpenAI::Models::Beta::Threads::ImageURLDeltaBlock class MessageContentDelta < OpenAI::Union - private def self.variants: -> [[:image_file, OpenAI::Models::Beta::Threads::ImageFileDeltaBlock], [:text, OpenAI::Models::Beta::Threads::TextDeltaBlock], [:refusal, OpenAI::Models::Beta::Threads::RefusalDeltaBlock], [:image_url, OpenAI::Models::Beta::Threads::ImageURLDeltaBlock]] + def self.variants: -> [OpenAI::Models::Beta::Threads::ImageFileDeltaBlock, OpenAI::Models::Beta::Threads::TextDeltaBlock, OpenAI::Models::Beta::Threads::RefusalDeltaBlock, OpenAI::Models::Beta::Threads::ImageURLDeltaBlock] end end end diff --git a/sig/openai/models/beta/threads/message_content_part_param.rbs b/sig/openai/models/beta/threads/message_content_part_param.rbs index 0fa6c1af..39228e33 100644 --- a/sig/openai/models/beta/threads/message_content_part_param.rbs +++ b/sig/openai/models/beta/threads/message_content_part_param.rbs @@ -8,7 +8,7 @@ module OpenAI | OpenAI::Models::Beta::Threads::TextContentBlockParam class MessageContentPartParam < OpenAI::Union - private def self.variants: -> [[:image_file, OpenAI::Models::Beta::Threads::ImageFileContentBlock], [:image_url, OpenAI::Models::Beta::Threads::ImageURLContentBlock], [:text, OpenAI::Models::Beta::Threads::TextContentBlockParam]] + def self.variants: -> [OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam] end end end diff --git a/sig/openai/models/beta/threads/message_create_params.rbs b/sig/openai/models/beta/threads/message_create_params.rbs index d9e425f5..9e3451c3 100644 --- a/sig/openai/models/beta/threads/message_create_params.rbs +++ b/sig/openai/models/beta/threads/message_create_params.rbs @@ -48,7 +48,7 @@ module OpenAI MessageContentPartParamArray: message_content_part_param_array - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Beta::Threads::message_content_part_param]]] + def self.variants: -> [String, ::Array[OpenAI::Models::Beta::Threads::message_content_part_param]] end type role = :user | :assistant @@ -109,7 +109,7 @@ module OpenAI def to_hash: -> OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::file_search end - private def self.variants: -> [[:code_interpreter, OpenAI::Models::Beta::CodeInterpreterTool], [:file_search, OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch]] + def self.variants: -> [OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch] end end end diff --git a/sig/openai/models/beta/threads/run_create_params.rbs b/sig/openai/models/beta/threads/run_create_params.rbs index eac12be2..09105c8f 100644 --- a/sig/openai/models/beta/threads/run_create_params.rbs +++ b/sig/openai/models/beta/threads/run_create_params.rbs @@ -137,7 +137,7 @@ module OpenAI MessageContentPartParamArray: message_content_part_param_array - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Beta::Threads::message_content_part_param]]] + def self.variants: -> [String, ::Array[OpenAI::Models::Beta::Threads::message_content_part_param]] end type role = :user | :assistant @@ -198,7 +198,7 @@ module OpenAI def to_hash: -> OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::file_search end - private def self.variants: -> [[:code_interpreter, OpenAI::Models::Beta::CodeInterpreterTool], [:file_search, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch]] + def self.variants: -> [OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch] end end end @@ -206,7 +206,7 @@ module OpenAI type model = String | OpenAI::Models::chat_model class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::chat_model]] + def self.variants: -> [String, OpenAI::Models::chat_model] end type truncation_strategy = diff --git a/sig/openai/models/beta/threads/runs/code_interpreter_tool_call.rbs b/sig/openai/models/beta/threads/runs/code_interpreter_tool_call.rbs index 3c997331..41731b5a 100644 --- a/sig/openai/models/beta/threads/runs/code_interpreter_tool_call.rbs +++ b/sig/openai/models/beta/threads/runs/code_interpreter_tool_call.rbs @@ -114,7 +114,7 @@ module OpenAI end end - private def self.variants: -> [[:logs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs], [:image, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image]] + def self.variants: -> [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image] end end end diff --git a/sig/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbs b/sig/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbs index aeb01140..7a808c31 100644 --- a/sig/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbs +++ b/sig/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbs @@ -74,7 +74,7 @@ module OpenAI | OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage class Output < OpenAI::Union - private def self.variants: -> [[:logs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs], [:image, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage]] + def self.variants: -> [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage] end end end diff --git a/sig/openai/models/beta/threads/runs/run_step.rbs b/sig/openai/models/beta/threads/runs/run_step.rbs index 69a157aa..af3c7db7 100644 --- a/sig/openai/models/beta/threads/runs/run_step.rbs +++ b/sig/openai/models/beta/threads/runs/run_step.rbs @@ -136,7 +136,7 @@ module OpenAI | OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails class StepDetails < OpenAI::Union - private def self.variants: -> [[:message_creation, OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails], [:tool_calls, OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails]] + def self.variants: -> [OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails] end type type_ = :message_creation | :tool_calls diff --git a/sig/openai/models/beta/threads/runs/run_step_delta.rbs b/sig/openai/models/beta/threads/runs/run_step_delta.rbs index 2d78e758..9c5a49d3 100644 --- a/sig/openai/models/beta/threads/runs/run_step_delta.rbs +++ b/sig/openai/models/beta/threads/runs/run_step_delta.rbs @@ -34,7 +34,7 @@ module OpenAI | OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject class StepDetails < OpenAI::Union - private def self.variants: -> [[:message_creation, OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta], [:tool_calls, OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject]] + def self.variants: -> [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject] end end end diff --git a/sig/openai/models/beta/threads/runs/tool_call.rbs b/sig/openai/models/beta/threads/runs/tool_call.rbs index e4de6ce0..42300b3c 100644 --- a/sig/openai/models/beta/threads/runs/tool_call.rbs +++ b/sig/openai/models/beta/threads/runs/tool_call.rbs @@ -9,7 +9,7 @@ module OpenAI | OpenAI::Models::Beta::Threads::Runs::FunctionToolCall class ToolCall < OpenAI::Union - private def self.variants: -> [[:code_interpreter, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall], [:file_search, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall], [:function, OpenAI::Models::Beta::Threads::Runs::FunctionToolCall]] + def self.variants: -> [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall, OpenAI::Models::Beta::Threads::Runs::FunctionToolCall] end end end diff --git a/sig/openai/models/beta/threads/runs/tool_call_delta.rbs b/sig/openai/models/beta/threads/runs/tool_call_delta.rbs index 49679360..6c3c0ec7 100644 --- a/sig/openai/models/beta/threads/runs/tool_call_delta.rbs +++ b/sig/openai/models/beta/threads/runs/tool_call_delta.rbs @@ -9,7 +9,7 @@ module OpenAI | OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta class ToolCallDelta < OpenAI::Union - private def self.variants: -> [[:code_interpreter, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta], [:file_search, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta], [:function, OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta]] + def self.variants: -> [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta, OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta] end end end diff --git a/sig/openai/models/chat/chat_completion_assistant_message_param.rbs b/sig/openai/models/chat/chat_completion_assistant_message_param.rbs index db1fee4b..f481adde 100644 --- a/sig/openai/models/chat/chat_completion_assistant_message_param.rbs +++ b/sig/openai/models/chat/chat_completion_assistant_message_param.rbs @@ -83,10 +83,10 @@ module OpenAI | OpenAI::Models::Chat::ChatCompletionContentPartRefusal class ArrayOfContentPart < OpenAI::Union - private def self.variants: -> [[:text, OpenAI::Models::Chat::ChatCompletionContentPartText], [:refusal, OpenAI::Models::Chat::ChatCompletionContentPartRefusal]] + def self.variants: -> [OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartRefusal] end - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Content::array_of_content_part]]] + def self.variants: -> [String, ::Array[OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Content::array_of_content_part]] end type function_call = { arguments: String, name: String } diff --git a/sig/openai/models/chat/chat_completion_content_part.rbs b/sig/openai/models/chat/chat_completion_content_part.rbs index f0459960..31333ab5 100644 --- a/sig/openai/models/chat/chat_completion_content_part.rbs +++ b/sig/openai/models/chat/chat_completion_content_part.rbs @@ -60,7 +60,7 @@ module OpenAI end end - private def self.variants: -> [[:text, OpenAI::Models::Chat::ChatCompletionContentPartText], [:image_url, OpenAI::Models::Chat::ChatCompletionContentPartImage], [:input_audio, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio], [:file, OpenAI::Models::Chat::ChatCompletionContentPart::File]] + def self.variants: -> [OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, OpenAI::Models::Chat::ChatCompletionContentPart::File] end end end diff --git a/sig/openai/models/chat/chat_completion_developer_message_param.rbs b/sig/openai/models/chat/chat_completion_developer_message_param.rbs index e528c37a..bcc12871 100644 --- a/sig/openai/models/chat/chat_completion_developer_message_param.rbs +++ b/sig/openai/models/chat/chat_completion_developer_message_param.rbs @@ -42,7 +42,7 @@ module OpenAI ChatCompletionContentPartTextArray: chat_completion_content_part_text_array - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]] + def self.variants: -> [String, ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]] end end end diff --git a/sig/openai/models/chat/chat_completion_message_param.rbs b/sig/openai/models/chat/chat_completion_message_param.rbs index 771ad5ab..a42d88ca 100644 --- a/sig/openai/models/chat/chat_completion_message_param.rbs +++ b/sig/openai/models/chat/chat_completion_message_param.rbs @@ -13,7 +13,7 @@ module OpenAI | OpenAI::Models::Chat::ChatCompletionFunctionMessageParam class ChatCompletionMessageParam < OpenAI::Union - private def self.variants: -> [[:developer, OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam], [:system, OpenAI::Models::Chat::ChatCompletionSystemMessageParam], [:user, OpenAI::Models::Chat::ChatCompletionUserMessageParam], [:assistant, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam], [:tool, OpenAI::Models::Chat::ChatCompletionToolMessageParam], [:function, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam]] + def self.variants: -> [OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, OpenAI::Models::Chat::ChatCompletionSystemMessageParam, OpenAI::Models::Chat::ChatCompletionUserMessageParam, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, OpenAI::Models::Chat::ChatCompletionToolMessageParam, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam] end end end diff --git a/sig/openai/models/chat/chat_completion_prediction_content.rbs b/sig/openai/models/chat/chat_completion_prediction_content.rbs index 4bd18cc2..c04d1cc4 100644 --- a/sig/openai/models/chat/chat_completion_prediction_content.rbs +++ b/sig/openai/models/chat/chat_completion_prediction_content.rbs @@ -36,7 +36,7 @@ module OpenAI ChatCompletionContentPartTextArray: chat_completion_content_part_text_array - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]] + def self.variants: -> [String, ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]] end end end diff --git a/sig/openai/models/chat/chat_completion_system_message_param.rbs b/sig/openai/models/chat/chat_completion_system_message_param.rbs index e5dc4d84..7501e229 100644 --- a/sig/openai/models/chat/chat_completion_system_message_param.rbs +++ b/sig/openai/models/chat/chat_completion_system_message_param.rbs @@ -42,7 +42,7 @@ module OpenAI ChatCompletionContentPartTextArray: chat_completion_content_part_text_array - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]] + def self.variants: -> [String, ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]] end end end diff --git a/sig/openai/models/chat/chat_completion_tool_choice_option.rbs b/sig/openai/models/chat/chat_completion_tool_choice_option.rbs index d5d57aef..e6c246ee 100644 --- a/sig/openai/models/chat/chat_completion_tool_choice_option.rbs +++ b/sig/openai/models/chat/chat_completion_tool_choice_option.rbs @@ -19,7 +19,7 @@ module OpenAI def self.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionToolChoiceOption::auto] end - private def self.variants: -> [[nil, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::auto], [nil, OpenAI::Models::Chat::ChatCompletionNamedToolChoice]] + def self.variants: -> [OpenAI::Models::Chat::ChatCompletionToolChoiceOption::auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice] end end end diff --git a/sig/openai/models/chat/chat_completion_tool_message_param.rbs b/sig/openai/models/chat/chat_completion_tool_message_param.rbs index 5082b0dd..90f917db 100644 --- a/sig/openai/models/chat/chat_completion_tool_message_param.rbs +++ b/sig/openai/models/chat/chat_completion_tool_message_param.rbs @@ -40,7 +40,7 @@ module OpenAI ChatCompletionContentPartTextArray: chat_completion_content_part_text_array - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]] + def self.variants: -> [String, ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]] end end end diff --git a/sig/openai/models/chat/chat_completion_user_message_param.rbs b/sig/openai/models/chat/chat_completion_user_message_param.rbs index f9b825b4..75491108 100644 --- a/sig/openai/models/chat/chat_completion_user_message_param.rbs +++ b/sig/openai/models/chat/chat_completion_user_message_param.rbs @@ -42,7 +42,7 @@ module OpenAI ChatCompletionContentPartArray: chat_completion_content_part_array - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Chat::chat_completion_content_part]]] + def self.variants: -> [String, ::Array[OpenAI::Models::Chat::chat_completion_content_part]] end end end diff --git a/sig/openai/models/chat/completion_create_params.rbs b/sig/openai/models/chat/completion_create_params.rbs index c32fa792..17fd54d4 100644 --- a/sig/openai/models/chat/completion_create_params.rbs +++ b/sig/openai/models/chat/completion_create_params.rbs @@ -172,7 +172,7 @@ module OpenAI type model = String | OpenAI::Models::chat_model class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::chat_model]] + def self.variants: -> [String, OpenAI::Models::chat_model] end type function_call = @@ -189,7 +189,7 @@ module OpenAI def self.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::function_call_mode] end - private def self.variants: -> [[nil, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::function_call_mode], [nil, OpenAI::Models::Chat::ChatCompletionFunctionCallOption]] + def self.variants: -> [OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::function_call_mode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption] end type function = @@ -241,7 +241,7 @@ module OpenAI | OpenAI::Models::ResponseFormatJSONObject class ResponseFormat < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::ResponseFormatText], [nil, OpenAI::Models::ResponseFormatJSONSchema], [nil, OpenAI::Models::ResponseFormatJSONObject]] + def self.variants: -> [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] end type service_tier = :auto | :default @@ -260,7 +260,7 @@ module OpenAI StringArray: string_array - private def self.variants: -> [[nil, String], [nil, ::Array[String]]] + def self.variants: -> [String, ::Array[String]] end type web_search_options = diff --git a/sig/openai/models/comparison_filter.rbs b/sig/openai/models/comparison_filter.rbs index 435d3cc2..02a4b26d 100644 --- a/sig/openai/models/comparison_filter.rbs +++ b/sig/openai/models/comparison_filter.rbs @@ -40,7 +40,7 @@ module OpenAI type value = String | Float | bool class Value < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, Float], [nil, bool]] + def self.variants: -> [String, Float, bool] end end end diff --git a/sig/openai/models/completion_create_params.rbs b/sig/openai/models/completion_create_params.rbs index 362d945c..131c7bd7 100644 --- a/sig/openai/models/completion_create_params.rbs +++ b/sig/openai/models/completion_create_params.rbs @@ -90,21 +90,12 @@ module OpenAI def to_hash: -> OpenAI::Models::completion_create_params type model = - String | OpenAI::Models::CompletionCreateParams::Model::preset + String | :"gpt-3.5-turbo-instruct" | :"davinci-002" | :"babbage-002" class Model < OpenAI::Union - type preset = - :"gpt-3.5-turbo-instruct" | :"davinci-002" | :"babbage-002" - - class Preset < OpenAI::Enum - GPT_3_5_TURBO_INSTRUCT: :"gpt-3.5-turbo-instruct" - DAVINCI_002: :"davinci-002" - BABBAGE_002: :"babbage-002" - - def self.values: -> ::Array[OpenAI::Models::CompletionCreateParams::Model::preset] - end - - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::CompletionCreateParams::Model::preset]] + def self.variants: -> [String, (:"gpt-3.5-turbo-instruct" + | :"davinci-002" + | :"babbage-002")] end type prompt = @@ -123,7 +114,7 @@ module OpenAI ArrayOfToken2DArray: array_of_token2_d_array - private def self.variants: -> [[nil, String], [nil, ::Array[String]], [nil, ::Array[Integer]], [nil, ::Array[::Array[Integer]]]] + def self.variants: -> [String, ::Array[String], ::Array[Integer], ::Array[::Array[Integer]]] end type stop = (String | ::Array[String])? @@ -133,7 +124,7 @@ module OpenAI StringArray: string_array - private def self.variants: -> [[nil, String], [nil, ::Array[String]]] + def self.variants: -> [String, ::Array[String]] end end end diff --git a/sig/openai/models/compound_filter.rbs b/sig/openai/models/compound_filter.rbs index 620ab14b..ddbe75e7 100644 --- a/sig/openai/models/compound_filter.rbs +++ b/sig/openai/models/compound_filter.rbs @@ -23,7 +23,7 @@ module OpenAI type filter = OpenAI::Models::ComparisonFilter | top class Filter < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::ComparisonFilter], [nil, top]] + def self.variants: -> [OpenAI::Models::ComparisonFilter, top] end type type_ = :and | :or diff --git a/sig/openai/models/embedding_create_params.rbs b/sig/openai/models/embedding_create_params.rbs index fdddf90e..4e32f5ba 100644 --- a/sig/openai/models/embedding_create_params.rbs +++ b/sig/openai/models/embedding_create_params.rbs @@ -63,13 +63,13 @@ module OpenAI ArrayOfToken2DArray: array_of_token2_d_array - private def self.variants: -> [[nil, String], [nil, ::Array[String]], [nil, ::Array[Integer]], [nil, ::Array[::Array[Integer]]]] + def self.variants: -> [String, ::Array[String], ::Array[Integer], ::Array[::Array[Integer]]] end type model = String | OpenAI::Models::embedding_model class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::embedding_model]] + def self.variants: -> [String, OpenAI::Models::embedding_model] end type encoding_format = :float | :base64 diff --git a/sig/openai/models/file_chunking_strategy.rbs b/sig/openai/models/file_chunking_strategy.rbs index 48e1a062..d287b675 100644 --- a/sig/openai/models/file_chunking_strategy.rbs +++ b/sig/openai/models/file_chunking_strategy.rbs @@ -5,7 +5,7 @@ module OpenAI | OpenAI::Models::OtherFileChunkingStrategyObject class FileChunkingStrategy < OpenAI::Union - private def self.variants: -> [[:static, OpenAI::Models::StaticFileChunkingStrategyObject], [:other, OpenAI::Models::OtherFileChunkingStrategyObject]] + def self.variants: -> [OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject] end end end diff --git a/sig/openai/models/file_chunking_strategy_param.rbs b/sig/openai/models/file_chunking_strategy_param.rbs index 85961a3f..f5f9f28a 100644 --- a/sig/openai/models/file_chunking_strategy_param.rbs +++ b/sig/openai/models/file_chunking_strategy_param.rbs @@ -5,7 +5,7 @@ module OpenAI | OpenAI::Models::StaticFileChunkingStrategyObjectParam class FileChunkingStrategyParam < OpenAI::Union - private def self.variants: -> [[:auto, OpenAI::Models::AutoFileChunkingStrategyParam], [:static, OpenAI::Models::StaticFileChunkingStrategyObjectParam]] + def self.variants: -> [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] end end end diff --git a/sig/openai/models/fine_tuning/fine_tuning_job.rbs b/sig/openai/models/fine_tuning/fine_tuning_job.rbs index 44ab2010..12dfc6a9 100644 --- a/sig/openai/models/fine_tuning/fine_tuning_job.rbs +++ b/sig/openai/models/fine_tuning/fine_tuning_job.rbs @@ -160,19 +160,19 @@ module OpenAI type batch_size = :auto | Integer class BatchSize < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + def self.variants: -> [:auto, Integer] end type learning_rate_multiplier = :auto | Float class LearningRateMultiplier < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] + def self.variants: -> [:auto, Float] end type n_epochs = :auto | Integer class NEpochs < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + def self.variants: -> [:auto, Integer] end end @@ -307,25 +307,25 @@ module OpenAI type batch_size = :auto | Integer class BatchSize < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + def self.variants: -> [:auto, Integer] end type beta = :auto | Float class Beta < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] + def self.variants: -> [:auto, Float] end type learning_rate_multiplier = :auto | Float class LearningRateMultiplier < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] + def self.variants: -> [:auto, Float] end type n_epochs = :auto | Integer class NEpochs < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + def self.variants: -> [:auto, Integer] end end end @@ -395,19 +395,19 @@ module OpenAI type batch_size = :auto | Integer class BatchSize < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + def self.variants: -> [:auto, Integer] end type learning_rate_multiplier = :auto | Float class LearningRateMultiplier < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] + def self.variants: -> [:auto, Float] end type n_epochs = :auto | Integer class NEpochs < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + def self.variants: -> [:auto, Integer] end end end diff --git a/sig/openai/models/fine_tuning/job_create_params.rbs b/sig/openai/models/fine_tuning/job_create_params.rbs index b8be5ee1..468dee15 100644 --- a/sig/openai/models/fine_tuning/job_create_params.rbs +++ b/sig/openai/models/fine_tuning/job_create_params.rbs @@ -66,22 +66,17 @@ module OpenAI def to_hash: -> OpenAI::Models::FineTuning::job_create_params type model = - String | OpenAI::Models::FineTuning::JobCreateParams::Model::preset + String + | :"babbage-002" + | :"davinci-002" + | :"gpt-3.5-turbo" + | :"gpt-4o-mini" class Model < OpenAI::Union - type preset = - :"babbage-002" | :"davinci-002" | :"gpt-3.5-turbo" | :"gpt-4o-mini" - - class Preset < OpenAI::Enum - BABBAGE_002: :"babbage-002" - DAVINCI_002: :"davinci-002" - GPT_3_5_TURBO: :"gpt-3.5-turbo" - GPT_4O_MINI: :"gpt-4o-mini" - - def self.values: -> ::Array[OpenAI::Models::FineTuning::JobCreateParams::Model::preset] - end - - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::FineTuning::JobCreateParams::Model::preset]] + def self.variants: -> [String, (:"babbage-002" + | :"davinci-002" + | :"gpt-3.5-turbo" + | :"gpt-4o-mini")] end type hyperparameters = @@ -126,19 +121,19 @@ module OpenAI type batch_size = :auto | Integer class BatchSize < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + def self.variants: -> [:auto, Integer] end type learning_rate_multiplier = :auto | Float class LearningRateMultiplier < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] + def self.variants: -> [:auto, Float] end type n_epochs = :auto | Integer class NEpochs < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + def self.variants: -> [:auto, Integer] end end @@ -312,25 +307,25 @@ module OpenAI type batch_size = :auto | Integer class BatchSize < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + def self.variants: -> [:auto, Integer] end type beta = :auto | Float class Beta < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] + def self.variants: -> [:auto, Float] end type learning_rate_multiplier = :auto | Float class LearningRateMultiplier < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] + def self.variants: -> [:auto, Float] end type n_epochs = :auto | Integer class NEpochs < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + def self.variants: -> [:auto, Integer] end end end @@ -400,19 +395,19 @@ module OpenAI type batch_size = :auto | Integer class BatchSize < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + def self.variants: -> [:auto, Integer] end type learning_rate_multiplier = :auto | Float class LearningRateMultiplier < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] + def self.variants: -> [:auto, Float] end type n_epochs = :auto | Integer class NEpochs < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + def self.variants: -> [:auto, Integer] end end end diff --git a/sig/openai/models/image_create_variation_params.rbs b/sig/openai/models/image_create_variation_params.rbs index 983f3cea..2cb6174c 100644 --- a/sig/openai/models/image_create_variation_params.rbs +++ b/sig/openai/models/image_create_variation_params.rbs @@ -49,7 +49,7 @@ module OpenAI type model = String | OpenAI::Models::image_model class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::image_model]] + def self.variants: -> [String, OpenAI::Models::image_model] end type response_format = :url | :b64_json diff --git a/sig/openai/models/image_edit_params.rbs b/sig/openai/models/image_edit_params.rbs index e791162c..17f6d2a5 100644 --- a/sig/openai/models/image_edit_params.rbs +++ b/sig/openai/models/image_edit_params.rbs @@ -56,7 +56,7 @@ module OpenAI type model = String | OpenAI::Models::image_model class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::image_model]] + def self.variants: -> [String, OpenAI::Models::image_model] end type response_format = :url | :b64_json diff --git a/sig/openai/models/image_generate_params.rbs b/sig/openai/models/image_generate_params.rbs index a91c05d0..ebeabef9 100644 --- a/sig/openai/models/image_generate_params.rbs +++ b/sig/openai/models/image_generate_params.rbs @@ -60,7 +60,7 @@ module OpenAI type model = String | OpenAI::Models::image_model class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::image_model]] + def self.variants: -> [String, OpenAI::Models::image_model] end type quality = :standard | :hd diff --git a/sig/openai/models/moderation_create_params.rbs b/sig/openai/models/moderation_create_params.rbs index 0ea15edd..e5c23167 100644 --- a/sig/openai/models/moderation_create_params.rbs +++ b/sig/openai/models/moderation_create_params.rbs @@ -46,13 +46,13 @@ module OpenAI ModerationMultiModalInputArray: moderation_multi_modal_input_array - private def self.variants: -> [[nil, String], [nil, ::Array[String]], [nil, ::Array[OpenAI::Models::moderation_multi_modal_input]]] + def self.variants: -> [String, ::Array[String], ::Array[OpenAI::Models::moderation_multi_modal_input]] end type model = String | OpenAI::Models::moderation_model class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::moderation_model]] + def self.variants: -> [String, OpenAI::Models::moderation_model] end end end diff --git a/sig/openai/models/moderation_multi_modal_input.rbs b/sig/openai/models/moderation_multi_modal_input.rbs index af2b3a37..c98cd3a4 100644 --- a/sig/openai/models/moderation_multi_modal_input.rbs +++ b/sig/openai/models/moderation_multi_modal_input.rbs @@ -5,7 +5,7 @@ module OpenAI | OpenAI::Models::ModerationTextInput class ModerationMultiModalInput < OpenAI::Union - private def self.variants: -> [[:image_url, OpenAI::Models::ModerationImageURLInput], [:text, OpenAI::Models::ModerationTextInput]] + def self.variants: -> [OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput] end end end diff --git a/sig/openai/models/responses/easy_input_message.rbs b/sig/openai/models/responses/easy_input_message.rbs index 4f7d7d6b..7347e123 100644 --- a/sig/openai/models/responses/easy_input_message.rbs +++ b/sig/openai/models/responses/easy_input_message.rbs @@ -37,7 +37,7 @@ module OpenAI | OpenAI::Models::Responses::response_input_message_content_list class Content < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::Responses::response_input_message_content_list]] + def self.variants: -> [String, OpenAI::Models::Responses::response_input_message_content_list] end type role = :user | :assistant | :system | :developer diff --git a/sig/openai/models/responses/file_search_tool.rbs b/sig/openai/models/responses/file_search_tool.rbs index 2c1ab9da..32c08269 100644 --- a/sig/openai/models/responses/file_search_tool.rbs +++ b/sig/openai/models/responses/file_search_tool.rbs @@ -50,7 +50,7 @@ module OpenAI OpenAI::Models::ComparisonFilter | OpenAI::Models::CompoundFilter class Filters < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::ComparisonFilter], [nil, OpenAI::Models::CompoundFilter]] + def self.variants: -> [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter] end type ranking_options = diff --git a/sig/openai/models/responses/response.rbs b/sig/openai/models/responses/response.rbs index 7bee86f9..2fa4632c 100644 --- a/sig/openai/models/responses/response.rbs +++ b/sig/openai/models/responses/response.rbs @@ -153,7 +153,7 @@ module OpenAI type model = String | OpenAI::Models::chat_model class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::chat_model]] + def self.variants: -> [String, OpenAI::Models::chat_model] end type tool_choice = @@ -162,7 +162,7 @@ module OpenAI | OpenAI::Models::Responses::ToolChoiceFunction class ToolChoice < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::Responses::tool_choice_options], [nil, OpenAI::Models::Responses::ToolChoiceTypes], [nil, OpenAI::Models::Responses::ToolChoiceFunction]] + def self.variants: -> [OpenAI::Models::Responses::tool_choice_options, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] end type truncation = :auto | :disabled diff --git a/sig/openai/models/responses/response_code_interpreter_tool_call.rbs b/sig/openai/models/responses/response_code_interpreter_tool_call.rbs index 4f0b66ba..f57c376a 100644 --- a/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +++ b/sig/openai/models/responses/response_code_interpreter_tool_call.rbs @@ -99,7 +99,7 @@ module OpenAI end end - private def self.variants: -> [[:logs, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Logs], [:files, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files]] + def self.variants: -> [OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Logs, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files] end type status = :in_progress | :interpreting | :completed diff --git a/sig/openai/models/responses/response_computer_tool_call.rbs b/sig/openai/models/responses/response_computer_tool_call.rbs index 49069bb3..7cc62b09 100644 --- a/sig/openai/models/responses/response_computer_tool_call.rbs +++ b/sig/openai/models/responses/response_computer_tool_call.rbs @@ -275,7 +275,7 @@ module OpenAI def to_hash: -> OpenAI::Models::Responses::ResponseComputerToolCall::Action::wait end - private def self.variants: -> [[:click, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click], [:double_click, OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick], [:drag, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag], [:keypress, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress], [:move, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move], [:screenshot, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot], [:scroll, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll], [:type, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type], [:wait, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait]] + def self.variants: -> [OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait] end type pending_safety_check = diff --git a/sig/openai/models/responses/response_content.rbs b/sig/openai/models/responses/response_content.rbs index 246a0708..c7bb377e 100644 --- a/sig/openai/models/responses/response_content.rbs +++ b/sig/openai/models/responses/response_content.rbs @@ -9,7 +9,7 @@ module OpenAI | OpenAI::Models::Responses::ResponseOutputRefusal class ResponseContent < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::Responses::ResponseInputText], [nil, OpenAI::Models::Responses::ResponseInputImage], [nil, OpenAI::Models::Responses::ResponseInputFile], [nil, OpenAI::Models::Responses::ResponseOutputText], [nil, OpenAI::Models::Responses::ResponseOutputRefusal]] + def self.variants: -> [OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal] end end end diff --git a/sig/openai/models/responses/response_content_part_added_event.rbs b/sig/openai/models/responses/response_content_part_added_event.rbs index 37deb50b..5bbbdac3 100644 --- a/sig/openai/models/responses/response_content_part_added_event.rbs +++ b/sig/openai/models/responses/response_content_part_added_event.rbs @@ -41,7 +41,7 @@ module OpenAI | OpenAI::Models::Responses::ResponseOutputRefusal class Part < OpenAI::Union - private def self.variants: -> [[:output_text, OpenAI::Models::Responses::ResponseOutputText], [:refusal, OpenAI::Models::Responses::ResponseOutputRefusal]] + def self.variants: -> [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal] end end end diff --git a/sig/openai/models/responses/response_content_part_done_event.rbs b/sig/openai/models/responses/response_content_part_done_event.rbs index d8830b30..b915c001 100644 --- a/sig/openai/models/responses/response_content_part_done_event.rbs +++ b/sig/openai/models/responses/response_content_part_done_event.rbs @@ -41,7 +41,7 @@ module OpenAI | OpenAI::Models::Responses::ResponseOutputRefusal class Part < OpenAI::Union - private def self.variants: -> [[:output_text, OpenAI::Models::Responses::ResponseOutputText], [:refusal, OpenAI::Models::Responses::ResponseOutputRefusal]] + def self.variants: -> [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal] end end end diff --git a/sig/openai/models/responses/response_create_params.rbs b/sig/openai/models/responses/response_create_params.rbs index 785ee05f..d9049e41 100644 --- a/sig/openai/models/responses/response_create_params.rbs +++ b/sig/openai/models/responses/response_create_params.rbs @@ -106,13 +106,13 @@ module OpenAI type input = String | OpenAI::Models::Responses::response_input class Input < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::Responses::response_input]] + def self.variants: -> [String, OpenAI::Models::Responses::response_input] end type model = String | OpenAI::Models::chat_model class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::chat_model]] + def self.variants: -> [String, OpenAI::Models::chat_model] end type tool_choice = @@ -121,7 +121,7 @@ module OpenAI | OpenAI::Models::Responses::ToolChoiceFunction class ToolChoice < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::Responses::tool_choice_options], [nil, OpenAI::Models::Responses::ToolChoiceTypes], [nil, OpenAI::Models::Responses::ToolChoiceFunction]] + def self.variants: -> [OpenAI::Models::Responses::tool_choice_options, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] end type truncation = :auto | :disabled diff --git a/sig/openai/models/responses/response_file_search_tool_call.rbs b/sig/openai/models/responses/response_file_search_tool_call.rbs index bff8946c..42d71f9c 100644 --- a/sig/openai/models/responses/response_file_search_tool_call.rbs +++ b/sig/openai/models/responses/response_file_search_tool_call.rbs @@ -95,7 +95,7 @@ module OpenAI type attribute = String | Float | bool class Attribute < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, Float], [nil, bool]] + def self.variants: -> [String, Float, bool] end end end diff --git a/sig/openai/models/responses/response_format_text_config.rbs b/sig/openai/models/responses/response_format_text_config.rbs index bdd7473c..7a38cb8e 100644 --- a/sig/openai/models/responses/response_format_text_config.rbs +++ b/sig/openai/models/responses/response_format_text_config.rbs @@ -7,7 +7,7 @@ module OpenAI | OpenAI::Models::ResponseFormatJSONObject class ResponseFormatTextConfig < OpenAI::Union - private def self.variants: -> [[:text, OpenAI::Models::ResponseFormatText], [:json_schema, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig], [:json_object, OpenAI::Models::ResponseFormatJSONObject]] + def self.variants: -> [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] end end end diff --git a/sig/openai/models/responses/response_input_content.rbs b/sig/openai/models/responses/response_input_content.rbs index d12edcd5..004cfa5c 100644 --- a/sig/openai/models/responses/response_input_content.rbs +++ b/sig/openai/models/responses/response_input_content.rbs @@ -7,7 +7,7 @@ module OpenAI | OpenAI::Models::Responses::ResponseInputFile class ResponseInputContent < OpenAI::Union - private def self.variants: -> [[:input_text, OpenAI::Models::Responses::ResponseInputText], [:input_image, OpenAI::Models::Responses::ResponseInputImage], [:input_file, OpenAI::Models::Responses::ResponseInputFile]] + def self.variants: -> [OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile] end end end diff --git a/sig/openai/models/responses/response_input_item.rbs b/sig/openai/models/responses/response_input_item.rbs index 6f23ae1b..b60c6d6a 100644 --- a/sig/openai/models/responses/response_input_item.rbs +++ b/sig/openai/models/responses/response_input_item.rbs @@ -260,7 +260,7 @@ module OpenAI def to_hash: -> OpenAI::Models::Responses::ResponseInputItem::item_reference end - private def self.variants: -> [[:message, OpenAI::Models::Responses::EasyInputMessage], [:message, OpenAI::Models::Responses::ResponseInputItem::Message], [:message, OpenAI::Models::Responses::ResponseOutputMessage], [:file_search_call, OpenAI::Models::Responses::ResponseFileSearchToolCall], [:computer_call, OpenAI::Models::Responses::ResponseComputerToolCall], [:computer_call_output, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput], [:web_search_call, OpenAI::Models::Responses::ResponseFunctionWebSearch], [:function_call, OpenAI::Models::Responses::ResponseFunctionToolCall], [:function_call_output, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput], [:reasoning, OpenAI::Models::Responses::ResponseReasoningItem], [:item_reference, OpenAI::Models::Responses::ResponseInputItem::ItemReference]] + def self.variants: -> [OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ItemReference] end end end diff --git a/sig/openai/models/responses/response_item_list.rbs b/sig/openai/models/responses/response_item_list.rbs index faf8e1a5..a8d0d3fc 100644 --- a/sig/openai/models/responses/response_item_list.rbs +++ b/sig/openai/models/responses/response_item_list.rbs @@ -278,7 +278,7 @@ module OpenAI end end - private def self.variants: -> [[:message, OpenAI::Models::Responses::ResponseItemList::Data::Message], [:message, OpenAI::Models::Responses::ResponseOutputMessage], [:file_search_call, OpenAI::Models::Responses::ResponseFileSearchToolCall], [:computer_call, OpenAI::Models::Responses::ResponseComputerToolCall], [:computer_call_output, OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput], [:web_search_call, OpenAI::Models::Responses::ResponseFunctionWebSearch], [:function_call, OpenAI::Models::Responses::ResponseFunctionToolCall], [:function_call_output, OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput]] + def self.variants: -> [OpenAI::Models::Responses::ResponseItemList::Data::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput] end end end diff --git a/sig/openai/models/responses/response_output_item.rbs b/sig/openai/models/responses/response_output_item.rbs index cb441e32..e01fe1b2 100644 --- a/sig/openai/models/responses/response_output_item.rbs +++ b/sig/openai/models/responses/response_output_item.rbs @@ -10,7 +10,7 @@ module OpenAI | OpenAI::Models::Responses::ResponseReasoningItem class ResponseOutputItem < OpenAI::Union - private def self.variants: -> [[:message, OpenAI::Models::Responses::ResponseOutputMessage], [:file_search_call, OpenAI::Models::Responses::ResponseFileSearchToolCall], [:function_call, OpenAI::Models::Responses::ResponseFunctionToolCall], [:web_search_call, OpenAI::Models::Responses::ResponseFunctionWebSearch], [:computer_call, OpenAI::Models::Responses::ResponseComputerToolCall], [:reasoning, OpenAI::Models::Responses::ResponseReasoningItem]] + def self.variants: -> [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem] end end end diff --git a/sig/openai/models/responses/response_output_message.rbs b/sig/openai/models/responses/response_output_message.rbs index dc20c3b5..c1bbed4a 100644 --- a/sig/openai/models/responses/response_output_message.rbs +++ b/sig/openai/models/responses/response_output_message.rbs @@ -41,7 +41,7 @@ module OpenAI | OpenAI::Models::Responses::ResponseOutputRefusal class Content < OpenAI::Union - private def self.variants: -> [[:output_text, OpenAI::Models::Responses::ResponseOutputText], [:refusal, OpenAI::Models::Responses::ResponseOutputRefusal]] + def self.variants: -> [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal] end type status = :in_progress | :completed | :incomplete diff --git a/sig/openai/models/responses/response_output_text.rbs b/sig/openai/models/responses/response_output_text.rbs index 6b128276..f8da61b0 100644 --- a/sig/openai/models/responses/response_output_text.rbs +++ b/sig/openai/models/responses/response_output_text.rbs @@ -109,7 +109,7 @@ module OpenAI def to_hash: -> OpenAI::Models::Responses::ResponseOutputText::Annotation::file_path end - private def self.variants: -> [[:file_citation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation], [:url_citation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation], [:file_path, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath]] + def self.variants: -> [OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath] end end end diff --git a/sig/openai/models/responses/response_stream_event.rbs b/sig/openai/models/responses/response_stream_event.rbs index 10d54108..0d48dfd6 100644 --- a/sig/openai/models/responses/response_stream_event.rbs +++ b/sig/openai/models/responses/response_stream_event.rbs @@ -36,7 +36,7 @@ module OpenAI | OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent class ResponseStreamEvent < OpenAI::Union - private def self.variants: -> [[:"response.audio.delta", OpenAI::Models::Responses::ResponseAudioDeltaEvent], [:"response.audio.done", OpenAI::Models::Responses::ResponseAudioDoneEvent], [:"response.audio.transcript.delta", OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent], [:"response.audio.transcript.done", OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent], [:"response.code_interpreter_call.code.delta", OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent], [:"response.code_interpreter_call.code.done", OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent], [:"response.code_interpreter_call.completed", OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent], [:"response.code_interpreter_call.in_progress", OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent], [:"response.code_interpreter_call.interpreting", OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent], [:"response.completed", OpenAI::Models::Responses::ResponseCompletedEvent], [:"response.content_part.added", OpenAI::Models::Responses::ResponseContentPartAddedEvent], [:"response.content_part.done", OpenAI::Models::Responses::ResponseContentPartDoneEvent], [:"response.created", OpenAI::Models::Responses::ResponseCreatedEvent], [:error, OpenAI::Models::Responses::ResponseErrorEvent], [:"response.file_search_call.completed", OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent], [:"response.file_search_call.in_progress", OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent], [:"response.file_search_call.searching", OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent], [:"response.function_call_arguments.delta", OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent], [:"response.function_call_arguments.done", OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent], [:"response.in_progress", OpenAI::Models::Responses::ResponseInProgressEvent], [:"response.failed", OpenAI::Models::Responses::ResponseFailedEvent], [:"response.incomplete", OpenAI::Models::Responses::ResponseIncompleteEvent], [:"response.output_item.added", OpenAI::Models::Responses::ResponseOutputItemAddedEvent], [:"response.output_item.done", OpenAI::Models::Responses::ResponseOutputItemDoneEvent], [:"response.refusal.delta", OpenAI::Models::Responses::ResponseRefusalDeltaEvent], [:"response.refusal.done", OpenAI::Models::Responses::ResponseRefusalDoneEvent], [:"response.output_text.annotation.added", OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent], [:"response.output_text.delta", OpenAI::Models::Responses::ResponseTextDeltaEvent], [:"response.output_text.done", OpenAI::Models::Responses::ResponseTextDoneEvent], [:"response.web_search_call.completed", OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent], [:"response.web_search_call.in_progress", OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent], [:"response.web_search_call.searching", OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent]] + def self.variants: -> [OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent] end end end diff --git a/sig/openai/models/responses/response_text_annotation_delta_event.rbs b/sig/openai/models/responses/response_text_annotation_delta_event.rbs index f3f165b9..9030b65e 100644 --- a/sig/openai/models/responses/response_text_annotation_delta_event.rbs +++ b/sig/openai/models/responses/response_text_annotation_delta_event.rbs @@ -121,7 +121,7 @@ module OpenAI def to_hash: -> OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::file_path end - private def self.variants: -> [[:file_citation, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation], [:url_citation, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation], [:file_path, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath]] + def self.variants: -> [OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath] end end end diff --git a/sig/openai/models/responses/tool.rbs b/sig/openai/models/responses/tool.rbs index 62a0dd21..642f7196 100644 --- a/sig/openai/models/responses/tool.rbs +++ b/sig/openai/models/responses/tool.rbs @@ -8,7 +8,7 @@ module OpenAI | OpenAI::Models::Responses::WebSearchTool class Tool < OpenAI::Union - private def self.variants: -> [[:file_search, OpenAI::Models::Responses::FileSearchTool], [:function, OpenAI::Models::Responses::FunctionTool], [:computer_use_preview, OpenAI::Models::Responses::ComputerTool], [nil, OpenAI::Models::Responses::WebSearchTool]] + def self.variants: -> [OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::WebSearchTool] end end end diff --git a/sig/openai/models/vector_store_search_params.rbs b/sig/openai/models/vector_store_search_params.rbs index 3f179fa9..2ac6032b 100644 --- a/sig/openai/models/vector_store_search_params.rbs +++ b/sig/openai/models/vector_store_search_params.rbs @@ -58,14 +58,14 @@ module OpenAI StringArray: string_array - private def self.variants: -> [[nil, String], [nil, ::Array[String]]] + def self.variants: -> [String, ::Array[String]] end type filters = OpenAI::Models::ComparisonFilter | OpenAI::Models::CompoundFilter class Filters < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::ComparisonFilter], [nil, OpenAI::Models::CompoundFilter]] + def self.variants: -> [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter] end type ranking_options = diff --git a/sig/openai/models/vector_store_search_response.rbs b/sig/openai/models/vector_store_search_response.rbs index f9014f12..9207fde5 100644 --- a/sig/openai/models/vector_store_search_response.rbs +++ b/sig/openai/models/vector_store_search_response.rbs @@ -37,7 +37,7 @@ module OpenAI type attribute = String | Float | bool class Attribute < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, Float], [nil, bool]] + def self.variants: -> [String, Float, bool] end type content = diff --git a/sig/openai/models/vector_stores/file_batch_create_params.rbs b/sig/openai/models/vector_stores/file_batch_create_params.rbs index 394219fe..963a641f 100644 --- a/sig/openai/models/vector_stores/file_batch_create_params.rbs +++ b/sig/openai/models/vector_stores/file_batch_create_params.rbs @@ -40,7 +40,7 @@ module OpenAI type attribute = String | Float | bool class Attribute < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, Float], [nil, bool]] + def self.variants: -> [String, Float, bool] end end end diff --git a/sig/openai/models/vector_stores/file_create_params.rbs b/sig/openai/models/vector_stores/file_create_params.rbs index 52eb4cfe..471cc8a7 100644 --- a/sig/openai/models/vector_stores/file_create_params.rbs +++ b/sig/openai/models/vector_stores/file_create_params.rbs @@ -40,7 +40,7 @@ module OpenAI type attribute = String | Float | bool class Attribute < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, Float], [nil, bool]] + def self.variants: -> [String, Float, bool] end end end diff --git a/sig/openai/models/vector_stores/file_update_params.rbs b/sig/openai/models/vector_stores/file_update_params.rbs index 7320f7b8..946352ec 100644 --- a/sig/openai/models/vector_stores/file_update_params.rbs +++ b/sig/openai/models/vector_stores/file_update_params.rbs @@ -32,7 +32,7 @@ module OpenAI type attribute = String | Float | bool class Attribute < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, Float], [nil, bool]] + def self.variants: -> [String, Float, bool] end end end diff --git a/sig/openai/models/vector_stores/vector_store_file.rbs b/sig/openai/models/vector_stores/vector_store_file.rbs index 3bc27550..c25e3ea0 100644 --- a/sig/openai/models/vector_stores/vector_store_file.rbs +++ b/sig/openai/models/vector_stores/vector_store_file.rbs @@ -107,7 +107,7 @@ module OpenAI type attribute = String | Float | bool class Attribute < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, Float], [nil, bool]] + def self.variants: -> [String, Float, bool] end end end diff --git a/sig/openai/page.rbs b/sig/openai/page.rbs index 94bcf79a..420e2716 100644 --- a/sig/openai/page.rbs +++ b/sig/openai/page.rbs @@ -2,15 +2,8 @@ module OpenAI class Page[Elem] include OpenAI::BasePage[Elem] - attr_accessor data: ::Array[Elem] + attr_accessor data: ::Array[Elem]? attr_accessor object: String - - def initialize: ( - client: OpenAI::BaseClient, - req: OpenAI::BaseClient::request_components, - headers: ::Hash[String, String], - page_data: ::Array[top] - ) -> void end end diff --git a/sig/openai/pooled_net_requester.rbs b/sig/openai/pooled_net_requester.rbs index 9e7daafb..c9f6520d 100644 --- a/sig/openai/pooled_net_requester.rbs +++ b/sig/openai/pooled_net_requester.rbs @@ -19,11 +19,16 @@ module OpenAI (String arg0) -> void } -> top - private def with_pool: (URI::Generic url) { (top arg0) -> void } -> void + private def with_pool: ( + URI::Generic url, + deadline: Float + ) { + (top arg0) -> void + } -> void def execute: ( OpenAI::PooledNetRequester::request request - ) -> [top, Enumerable[String]] + ) -> [Integer, top, Enumerable[String]] def initialize: (size: Integer) -> void end diff --git a/sig/openai/stream.rbs b/sig/openai/stream.rbs index 675ecb74..7474463b 100644 --- a/sig/openai/stream.rbs +++ b/sig/openai/stream.rbs @@ -1,15 +1,7 @@ module OpenAI class Stream[Elem] - include OpenAI::BaseStream[OpenAI::Util::sse_message, Elem] + include OpenAI::BaseStream[OpenAI::Util::server_sent_event, Elem] private def iterator: -> Enumerable[Elem] - - def initialize: ( - model: Class | OpenAI::Converter, - url: URI::Generic, - status: Integer, - response: top, - messages: Enumerable[OpenAI::Util::sse_message] - ) -> void end end diff --git a/sig/openai/util.rbs b/sig/openai/util.rbs index 2ee3b4df..375f8324 100644 --- a/sig/openai/util.rbs +++ b/sig/openai/util.rbs @@ -6,7 +6,7 @@ module OpenAI def self?.os: -> String - def self?.primitive?: (top input) -> (bool | top) + def self?.primitive?: (top input) -> bool def self?.coerce_boolean: (top input) -> (bool | top) @@ -118,15 +118,15 @@ module OpenAI Enumerable[top]? enum ) { (Enumerator::Yielder arg0) -> void - } -> void + } -> Enumerable[top] - type sse_message = + type server_sent_event = { event: String?, data: String?, id: String?, retry: Integer? } def self?.decode_lines: (Enumerable[String] enum) -> Enumerable[String] def self?.decode_sse: ( Enumerable[String] lines - ) -> OpenAI::Util::sse_message + ) -> OpenAI::Util::server_sent_event end end diff --git a/sig/openai/version.rbs b/sig/openai/version.rbs index adde5d9f..e4f5239e 100644 --- a/sig/openai/version.rbs +++ b/sig/openai/version.rbs @@ -1,3 +1,3 @@ module OpenAI - VERSION: "0.0.1-alpha.0" + VERSION: "0.1.0-alpha.1" end diff --git a/test/openai/client_test.rb b/test/openai/client_test.rb index ced591bb..9a3f400f 100644 --- a/test/openai/client_test.rb +++ b/test/openai/client_test.rb @@ -18,35 +18,6 @@ def test_raises_on_missing_non_nullable_opts assert_match(/is required/, e.message) end - class MockResponse - # @return [Integer] - attr_reader :code - - # @param code [Integer] - # @param headers [Hash{String=>String}] - # - def initialize(code, headers) - @code = code - @headers = {"content-type" => "application/json", **headers} - end - - # @param header [String] - # - # @return [String, nil] - # - def [](header) - @headers[header] - end - - # @param header [String] - # - # @return [Boolean] - # - def key?(header) - @headers.key?(header) - end - end - class MockRequester # @return [Integer] attr_reader :response_code @@ -63,7 +34,6 @@ class MockRequester # @param response_code [Integer] # @param response_headers [Hash{String=>String}] # @param response_data [Object] - # def initialize(response_code, response_headers, response_data) @response_code = response_code @response_headers = response_headers @@ -72,11 +42,11 @@ def initialize(response_code, response_headers, response_data) end # @param req [Hash{Symbol=>Object}] - # def execute(req) # Deep copy the request because it is mutated on each retry. attempts.push(Marshal.load(Marshal.dump(req))) - [MockResponse.new(response_code, response_headers), response_data.grapheme_clusters] + headers = {"content-type" => "application/json", **response_headers} + [response_code, headers, response_data.grapheme_clusters] end end @@ -88,7 +58,7 @@ def test_client_default_request_default_retry_attempts assert_raises(OpenAI::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true ) end @@ -104,7 +74,7 @@ def test_client_given_request_default_retry_attempts assert_raises(OpenAI::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true ) end @@ -120,7 +90,7 @@ def test_client_default_request_given_retry_attempts assert_raises(OpenAI::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true, request_options: {max_retries: 3} ) @@ -137,7 +107,7 @@ def test_client_given_request_given_retry_attempts assert_raises(OpenAI::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true, request_options: {max_retries: 4} ) @@ -154,7 +124,7 @@ def test_client_retry_after_seconds assert_raises(OpenAI::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true ) end @@ -172,7 +142,7 @@ def test_client_retry_after_date Thread.current.thread_variable_set(:time_now, Time.now) openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true ) Thread.current.thread_variable_set(:time_now, nil) @@ -190,7 +160,7 @@ def test_client_retry_after_ms assert_raises(OpenAI::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true ) end @@ -207,7 +177,7 @@ def test_retry_count_header assert_raises(OpenAI::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true ) end @@ -224,7 +194,7 @@ def test_omit_retry_count_header assert_raises(OpenAI::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true, request_options: {extra_headers: {"x-stainless-retry-count" => nil}} ) @@ -242,7 +212,7 @@ def test_overwrite_retry_count_header assert_raises(OpenAI::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true, request_options: {extra_headers: {"x-stainless-retry-count" => "42"}} ) @@ -260,7 +230,7 @@ def test_client_redirect_307 assert_raises(OpenAI::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true, request_options: {extra_headers: {}} ) @@ -283,7 +253,7 @@ def test_client_redirect_303 assert_raises(OpenAI::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true, request_options: {extra_headers: {}} ) @@ -303,7 +273,7 @@ def test_client_redirect_auth_keep_same_origin assert_raises(OpenAI::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true, request_options: {extra_headers: {"Authorization" => "Bearer xyz"}} ) @@ -323,7 +293,7 @@ def test_client_redirect_auth_strip_cross_origin assert_raises(OpenAI::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true, request_options: {extra_headers: {"Authorization" => "Bearer xyz"}} ) @@ -338,7 +308,7 @@ def test_default_headers openai.requester = requester openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true ) headers = requester.attempts.first[:headers] diff --git a/test/openai/resources/audio/speech_test.rb b/test/openai/resources/audio/speech_test.rb index e2d6de20..9718aa1a 100644 --- a/test/openai/resources/audio/speech_test.rb +++ b/test/openai/resources/audio/speech_test.rb @@ -4,7 +4,7 @@ class OpenAI::Test::Resources::Audio::SpeechTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.audio.speech.create(input: "input", model: "string", voice: :alloy) + response = @openai.audio.speech.create(input: "input", model: :"tts-1", voice: :alloy) assert_pattern do response => OpenAI::Unknown diff --git a/test/openai/resources/beta/assistants_test.rb b/test/openai/resources/beta/assistants_test.rb index 175fb0d5..e0ffb63a 100644 --- a/test/openai/resources/beta/assistants_test.rb +++ b/test/openai/resources/beta/assistants_test.rb @@ -4,7 +4,7 @@ class OpenAI::Test::Resources::Beta::AssistantsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.beta.assistants.create(model: :"gpt-4o") + response = @openai.beta.assistants.create(model: :"o3-mini") assert_pattern do response => OpenAI::Models::Beta::Assistant diff --git a/test/openai/resources/chat/completions_test.rb b/test/openai/resources/chat/completions_test.rb index c45ef70f..4b353ac2 100644 --- a/test/openai/resources/chat/completions_test.rb +++ b/test/openai/resources/chat/completions_test.rb @@ -6,7 +6,7 @@ class OpenAI::Test::Resources::Chat::CompletionsTest < OpenAI::Test::ResourceTes def test_create_required_params response = @openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", + model: :"o3-mini", stream: true ) diff --git a/test/openai/resources/completions_test.rb b/test/openai/resources/completions_test.rb index 0ff1c63c..6fb99107 100644 --- a/test/openai/resources/completions_test.rb +++ b/test/openai/resources/completions_test.rb @@ -4,7 +4,11 @@ class OpenAI::Test::Resources::CompletionsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.completions.create(model: "string", prompt: "This is a test.", stream: true) + response = @openai.completions.create( + model: :"gpt-3.5-turbo-instruct", + prompt: "This is a test.", + stream: true + ) assert_pattern do response => OpenAI::Models::Completion diff --git a/test/openai/resources/embeddings_test.rb b/test/openai/resources/embeddings_test.rb index 59218d06..244d5817 100644 --- a/test/openai/resources/embeddings_test.rb +++ b/test/openai/resources/embeddings_test.rb @@ -6,7 +6,7 @@ class OpenAI::Test::Resources::EmbeddingsTest < OpenAI::Test::ResourceTest def test_create_required_params response = @openai.embeddings.create( input: "The quick brown fox jumped over the lazy dog", - model: :"text-embedding-3-small" + model: :"text-embedding-ada-002" ) assert_pattern do diff --git a/test/openai/resources/fine_tuning/jobs_test.rb b/test/openai/resources/fine_tuning/jobs_test.rb index c085d073..2b35089a 100644 --- a/test/openai/resources/fine_tuning/jobs_test.rb +++ b/test/openai/resources/fine_tuning/jobs_test.rb @@ -4,7 +4,7 @@ class OpenAI::Test::Resources::FineTuning::JobsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.fine_tuning.jobs.create(model: :"gpt-4o-mini", training_file: "file-abc123") + response = @openai.fine_tuning.jobs.create(model: :"babbage-002", training_file: "file-abc123") assert_pattern do response => OpenAI::Models::FineTuning::FineTuningJob diff --git a/test/openai/resources/responses_test.rb b/test/openai/resources/responses_test.rb index bd1bcaf3..6ecd70a3 100644 --- a/test/openai/resources/responses_test.rb +++ b/test/openai/resources/responses_test.rb @@ -4,7 +4,7 @@ class OpenAI::Test::Resources::ResponsesTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.responses.create(input: "string", model: :"gpt-4o", stream: true) + response = @openai.responses.create(input: "string", model: :"o3-mini", stream: true) assert_pattern do response => OpenAI::Models::Responses::Response diff --git a/test/openai/util_test.rb b/test/openai/util_test.rb index d319e2f9..476e16af 100644 --- a/test/openai/util_test.rb +++ b/test/openai/util_test.rb @@ -161,7 +161,9 @@ class OpenAI::Test::UtilFormDataEncodingTest < Minitest::Test class FakeCGI < CGI def initialize(headers, io) @ctype = headers["content-type"] + # rubocop:disable Lint/EmptyBlock @io = OpenAI::Util::ReadIOAdapter.new(io) {} + # rubocop:enable Lint/EmptyBlock @c_len = io.to_a.join.bytesize.to_s super() end @@ -217,7 +219,9 @@ def test_copy_read } cases.each do |input, expected| io = StringIO.new + # rubocop:disable Lint/EmptyBlock adapter = OpenAI::Util::ReadIOAdapter.new(input) {} + # rubocop:enable Lint/EmptyBlock IO.copy_stream(adapter, io) assert_equal(expected, io.string) end