diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3b07edf5..a26ebfc1 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.13.1" + ".": "0.14.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 4f41de4f..ca458135 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 109 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-79dcb0ae501ac17004f50aecb112a798290ab3727fbe7c7d1b34299e38ed4f8e.yml -openapi_spec_hash: c8d54bd1ae3d704f6b6f72ffd2f876d8 -config_hash: 167ad0ca036d0f023c78e6496b4311e8 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-670ea0d2cc44f52a87dd3cadea45632953283e0636ba30788fdbdb22a232ccac.yml +openapi_spec_hash: d8b7d38911fead545adf3e4297956410 +config_hash: 5525bda35e48ea6387c6175c4d1651fa diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a106bef..a10c072d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 0.14.0 (2025-07-16) + +Full Changelog: [v0.13.1...v0.14.0](https://github.com/openai/openai-ruby/compare/v0.13.1...v0.14.0) + +### Features + +* **api:** manual updates ([b749baf](https://github.com/openai/openai-ruby/commit/b749baf0d1b52c35ff6e50b889301aa7b8ee2ba1)) + ## 0.13.1 (2025-07-15) Full Changelog: [v0.13.0...v0.13.1](https://github.com/openai/openai-ruby/compare/v0.13.0...v0.13.1) diff --git a/Gemfile.lock b/Gemfile.lock index 68834dea..0fe355fc 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -11,7 +11,7 @@ GIT PATH remote: . specs: - openai (0.13.1) + openai (0.14.0) connection_pool GEM diff --git a/README.md b/README.md index d1c98b07..98dda664 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application ```ruby -gem "openai", "~> 0.13.1" +gem "openai", "~> 0.14.0" ``` diff --git a/examples/image_stream.rb b/examples/image_stream.rb new file mode 100755 index 00000000..ea373b62 --- /dev/null +++ b/examples/image_stream.rb @@ -0,0 +1,44 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true +# typed: strong + +require_relative "../lib/openai" +require "base64" + +client = OpenAI::Client.new + +puts "Starting image streaming example..." + +stream = client.images.generate_stream_raw( + model: "gpt-image-1", + prompt: "A cute baby sea otter", + n: 1, + size: "1024x1024", + partial_images: 3 +) + +stream.each do |event| + case event + when OpenAI::Models::ImageGenPartialImageEvent + puts(" Partial image #{event.partial_image_index + 1}/3 received") + puts(" Size: #{event.b64_json.length} characters (base64)") + + # Save partial image to file + filename = "partial_#{event.partial_image_index + 1}.png" + image_data = Base64.decode64(event.b64_json) + File.write(filename, image_data) + puts(" Saved to: #{File.expand_path(filename)}") + + when OpenAI::Models::ImageGenCompletedEvent + puts("\n✅ Final image completed!") + puts(" Size: #{event.b64_json.length} characters (base64)") + + # Save final image to file + filename = "final_image.png" + image_data = Base64.decode64(event.b64_json) + File.write(filename, image_data) + puts(" Saved to: #{File.expand_path(filename)}") + end +end + +puts "Image streaming completed!" diff --git a/lib/openai.rb b/lib/openai.rb index 4141ab99..7abe9d2f 100644 --- a/lib/openai.rb +++ b/lib/openai.rb @@ -320,8 +320,14 @@ require_relative "openai/models/graders/string_check_grader" require_relative "openai/models/image" require_relative "openai/models/image_create_variation_params" +require_relative "openai/models/image_edit_completed_event" require_relative "openai/models/image_edit_params" +require_relative "openai/models/image_edit_partial_image_event" +require_relative "openai/models/image_edit_stream_event" +require_relative "openai/models/image_gen_completed_event" require_relative "openai/models/image_generate_params" +require_relative "openai/models/image_gen_partial_image_event" +require_relative "openai/models/image_gen_stream_event" require_relative "openai/models/image_model" require_relative "openai/models/images_response" require_relative "openai/models/metadata" diff --git a/lib/openai/models.rb b/lib/openai/models.rb index 3261259a..26243742 100644 --- a/lib/openai/models.rb +++ b/lib/openai/models.rb @@ -152,10 +152,22 @@ module OpenAI ImageCreateVariationParams = OpenAI::Models::ImageCreateVariationParams + ImageEditCompletedEvent = OpenAI::Models::ImageEditCompletedEvent + ImageEditParams = OpenAI::Models::ImageEditParams + ImageEditPartialImageEvent = OpenAI::Models::ImageEditPartialImageEvent + + ImageEditStreamEvent = OpenAI::Models::ImageEditStreamEvent + + ImageGenCompletedEvent = OpenAI::Models::ImageGenCompletedEvent + ImageGenerateParams = OpenAI::Models::ImageGenerateParams + ImageGenPartialImageEvent = OpenAI::Models::ImageGenPartialImageEvent + + ImageGenStreamEvent = OpenAI::Models::ImageGenStreamEvent + ImageModel = OpenAI::Models::ImageModel ImagesResponse = OpenAI::Models::ImagesResponse diff --git a/lib/openai/models/image_edit_completed_event.rb b/lib/openai/models/image_edit_completed_event.rb new file mode 100644 index 00000000..2038c5f9 --- /dev/null +++ b/lib/openai/models/image_edit_completed_event.rb @@ -0,0 +1,198 @@ +# frozen_string_literal: true + +module OpenAI + module Models + class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel + # @!attribute b64_json + # Base64-encoded final edited image data, suitable for rendering as an image. + # + # @return [String] + required :b64_json, String + + # @!attribute background + # The background setting for the edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Background] + required :background, enum: -> { OpenAI::ImageEditCompletedEvent::Background } + + # @!attribute created_at + # The Unix timestamp when the event was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute output_format + # The output format for the edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::OutputFormat] + required :output_format, enum: -> { OpenAI::ImageEditCompletedEvent::OutputFormat } + + # @!attribute quality + # The quality setting for the edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Quality] + required :quality, enum: -> { OpenAI::ImageEditCompletedEvent::Quality } + + # @!attribute size + # The size of the edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Size] + required :size, enum: -> { OpenAI::ImageEditCompletedEvent::Size } + + # @!attribute type + # The type of the event. Always `image_edit.completed`. + # + # @return [Symbol, :"image_edit.completed"] + required :type, const: :"image_edit.completed" + + # @!attribute usage + # For `gpt-image-1` only, the token usage information for the image generation. + # + # @return [OpenAI::Models::ImageEditCompletedEvent::Usage] + required :usage, -> { OpenAI::ImageEditCompletedEvent::Usage } + + # @!method initialize(b64_json:, background:, created_at:, output_format:, quality:, size:, usage:, type: :"image_edit.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageEditCompletedEvent} for more details. + # + # Emitted when image editing has completed and the final image is available. + # + # @param b64_json [String] Base64-encoded final edited image data, suitable for rendering as an image. + # + # @param background [Symbol, OpenAI::Models::ImageEditCompletedEvent::Background] The background setting for the edited image. + # + # @param created_at [Integer] The Unix timestamp when the event was created. + # + # @param output_format [Symbol, OpenAI::Models::ImageEditCompletedEvent::OutputFormat] The output format for the edited image. + # + # @param quality [Symbol, OpenAI::Models::ImageEditCompletedEvent::Quality] The quality setting for the edited image. + # + # @param size [Symbol, OpenAI::Models::ImageEditCompletedEvent::Size] The size of the edited image. + # + # @param usage [OpenAI::Models::ImageEditCompletedEvent::Usage] For `gpt-image-1` only, the token usage information for the image generation. + # + # @param type [Symbol, :"image_edit.completed"] The type of the event. Always `image_edit.completed`. + + # The background setting for the edited image. + # + # @see OpenAI::Models::ImageEditCompletedEvent#background + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT = :transparent + OPAQUE = :opaque + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The output format for the edited image. + # + # @see OpenAI::Models::ImageEditCompletedEvent#output_format + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG = :png + WEBP = :webp + JPEG = :jpeg + + # @!method self.values + # @return [Array] + end + + # The quality setting for the edited image. + # + # @see OpenAI::Models::ImageEditCompletedEvent#quality + module Quality + extend OpenAI::Internal::Type::Enum + + LOW = :low + MEDIUM = :medium + HIGH = :high + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The size of the edited image. + # + # @see OpenAI::Models::ImageEditCompletedEvent#size + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024 = :"1024x1024" + SIZE_1024X1536 = :"1024x1536" + SIZE_1536X1024 = :"1536x1024" + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # @see OpenAI::Models::ImageEditCompletedEvent#usage + class Usage < OpenAI::Internal::Type::BaseModel + # @!attribute input_tokens + # The number of tokens (images and text) in the input prompt. + # + # @return [Integer] + required :input_tokens, Integer + + # @!attribute input_tokens_details + # The input tokens detailed information for the image generation. + # + # @return [OpenAI::Models::ImageEditCompletedEvent::Usage::InputTokensDetails] + required :input_tokens_details, -> { OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails } + + # @!attribute output_tokens + # The number of image tokens in the output image. + # + # @return [Integer] + required :output_tokens, Integer + + # @!attribute total_tokens + # The total number of tokens (images and text) used for the image generation. + # + # @return [Integer] + required :total_tokens, Integer + + # @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, total_tokens:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageEditCompletedEvent::Usage} for more details. + # + # For `gpt-image-1` only, the token usage information for the image generation. + # + # @param input_tokens [Integer] The number of tokens (images and text) in the input prompt. + # + # @param input_tokens_details [OpenAI::Models::ImageEditCompletedEvent::Usage::InputTokensDetails] The input tokens detailed information for the image generation. + # + # @param output_tokens [Integer] The number of image tokens in the output image. + # + # @param total_tokens [Integer] The total number of tokens (images and text) used for the image generation. + + # @see OpenAI::Models::ImageEditCompletedEvent::Usage#input_tokens_details + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + # @!attribute image_tokens + # The number of image tokens in the input prompt. + # + # @return [Integer] + required :image_tokens, Integer + + # @!attribute text_tokens + # The number of text tokens in the input prompt. + # + # @return [Integer] + required :text_tokens, Integer + + # @!method initialize(image_tokens:, text_tokens:) + # The input tokens detailed information for the image generation. + # + # @param image_tokens [Integer] The number of image tokens in the input prompt. + # + # @param text_tokens [Integer] The number of text tokens in the input prompt. + end + end + end + end +end diff --git a/lib/openai/models/image_edit_params.rb b/lib/openai/models/image_edit_params.rb index 833d7a5a..05014dd9 100644 --- a/lib/openai/models/image_edit_params.rb +++ b/lib/openai/models/image_edit_params.rb @@ -3,6 +3,8 @@ module OpenAI module Models # @see OpenAI::Resources::Images#edit + # + # @see OpenAI::Resources::Images#stream_raw class ImageEditParams < OpenAI::Internal::Type::BaseModel extend OpenAI::Internal::Type::RequestParameters::Converter include OpenAI::Internal::Type::RequestParameters @@ -38,6 +40,14 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel # @return [Symbol, OpenAI::Models::ImageEditParams::Background, nil] optional :background, enum: -> { OpenAI::ImageEditParams::Background }, nil?: true + # @!attribute input_fidelity + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + # + # @return [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] + optional :input_fidelity, enum: -> { OpenAI::ImageEditParams::InputFidelity }, nil?: true + # @!attribute mask # An additional image whose fully transparent areas (e.g. where alpha is zero) # indicate where `image` should be edited. If there are multiple images provided, @@ -77,6 +87,14 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel # @return [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] optional :output_format, enum: -> { OpenAI::ImageEditParams::OutputFormat }, nil?: true + # @!attribute partial_images + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + # + # @return [Integer, nil] + optional :partial_images, Integer, nil?: true + # @!attribute quality # The quality of the image that will be generated. `high`, `medium` and `low` are # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. @@ -110,7 +128,7 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel # @return [String, nil] optional :user, String - # @!method initialize(image:, prompt:, background: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {}) + # @!method initialize(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {}) # Some parameter documentations has been truncated, see # {OpenAI::Models::ImageEditParams} for more details. # @@ -120,6 +138,8 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel # # @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s). # + # @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features, + # # @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind # # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are sup @@ -130,6 +150,8 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel # # @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is # + # @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for + # # @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are # # @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or ` @@ -179,6 +201,19 @@ module Background # @return [Array] end + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + module InputFidelity + extend OpenAI::Internal::Type::Enum + + HIGH = :high + LOW = :low + + # @!method self.values + # @return [Array] + end + # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` # is used. diff --git a/lib/openai/models/image_edit_partial_image_event.rb b/lib/openai/models/image_edit_partial_image_event.rb new file mode 100644 index 00000000..95d5bd96 --- /dev/null +++ b/lib/openai/models/image_edit_partial_image_event.rb @@ -0,0 +1,135 @@ +# frozen_string_literal: true + +module OpenAI + module Models + class ImageEditPartialImageEvent < OpenAI::Internal::Type::BaseModel + # @!attribute b64_json + # Base64-encoded partial image data, suitable for rendering as an image. + # + # @return [String] + required :b64_json, String + + # @!attribute background + # The background setting for the requested edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Background] + required :background, enum: -> { OpenAI::ImageEditPartialImageEvent::Background } + + # @!attribute created_at + # The Unix timestamp when the event was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute output_format + # The output format for the requested edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::OutputFormat] + required :output_format, enum: -> { OpenAI::ImageEditPartialImageEvent::OutputFormat } + + # @!attribute partial_image_index + # 0-based index for the partial image (streaming). + # + # @return [Integer] + required :partial_image_index, Integer + + # @!attribute quality + # The quality setting for the requested edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Quality] + required :quality, enum: -> { OpenAI::ImageEditPartialImageEvent::Quality } + + # @!attribute size + # The size of the requested edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Size] + required :size, enum: -> { OpenAI::ImageEditPartialImageEvent::Size } + + # @!attribute type + # The type of the event. Always `image_edit.partial_image`. + # + # @return [Symbol, :"image_edit.partial_image"] + required :type, const: :"image_edit.partial_image" + + # @!method initialize(b64_json:, background:, created_at:, output_format:, partial_image_index:, quality:, size:, type: :"image_edit.partial_image") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageEditPartialImageEvent} for more details. + # + # Emitted when a partial image is available during image editing streaming. + # + # @param b64_json [String] Base64-encoded partial image data, suitable for rendering as an image. + # + # @param background [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Background] The background setting for the requested edited image. + # + # @param created_at [Integer] The Unix timestamp when the event was created. + # + # @param output_format [Symbol, OpenAI::Models::ImageEditPartialImageEvent::OutputFormat] The output format for the requested edited image. + # + # @param partial_image_index [Integer] 0-based index for the partial image (streaming). + # + # @param quality [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Quality] The quality setting for the requested edited image. + # + # @param size [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Size] The size of the requested edited image. + # + # @param type [Symbol, :"image_edit.partial_image"] The type of the event. Always `image_edit.partial_image`. + + # The background setting for the requested edited image. + # + # @see OpenAI::Models::ImageEditPartialImageEvent#background + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT = :transparent + OPAQUE = :opaque + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The output format for the requested edited image. + # + # @see OpenAI::Models::ImageEditPartialImageEvent#output_format + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG = :png + WEBP = :webp + JPEG = :jpeg + + # @!method self.values + # @return [Array] + end + + # The quality setting for the requested edited image. + # + # @see OpenAI::Models::ImageEditPartialImageEvent#quality + module Quality + extend OpenAI::Internal::Type::Enum + + LOW = :low + MEDIUM = :medium + HIGH = :high + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The size of the requested edited image. + # + # @see OpenAI::Models::ImageEditPartialImageEvent#size + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024 = :"1024x1024" + SIZE_1024X1536 = :"1024x1536" + SIZE_1536X1024 = :"1536x1024" + AUTO = :auto + + # @!method self.values + # @return [Array] + end + end + end +end diff --git a/lib/openai/models/image_edit_stream_event.rb b/lib/openai/models/image_edit_stream_event.rb new file mode 100644 index 00000000..b72d2c27 --- /dev/null +++ b/lib/openai/models/image_edit_stream_event.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # Emitted when a partial image is available during image editing streaming. + module ImageEditStreamEvent + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Emitted when a partial image is available during image editing streaming. + variant :"image_edit.partial_image", -> { OpenAI::ImageEditPartialImageEvent } + + # Emitted when image editing has completed and the final image is available. + variant :"image_edit.completed", -> { OpenAI::ImageEditCompletedEvent } + + # @!method self.variants + # @return [Array(OpenAI::Models::ImageEditPartialImageEvent, OpenAI::Models::ImageEditCompletedEvent)] + end + end +end diff --git a/lib/openai/models/image_gen_completed_event.rb b/lib/openai/models/image_gen_completed_event.rb new file mode 100644 index 00000000..8a730653 --- /dev/null +++ b/lib/openai/models/image_gen_completed_event.rb @@ -0,0 +1,198 @@ +# frozen_string_literal: true + +module OpenAI + module Models + class ImageGenCompletedEvent < OpenAI::Internal::Type::BaseModel + # @!attribute b64_json + # Base64-encoded image data, suitable for rendering as an image. + # + # @return [String] + required :b64_json, String + + # @!attribute background + # The background setting for the generated image. + # + # @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::Background] + required :background, enum: -> { OpenAI::ImageGenCompletedEvent::Background } + + # @!attribute created_at + # The Unix timestamp when the event was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute output_format + # The output format for the generated image. + # + # @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::OutputFormat] + required :output_format, enum: -> { OpenAI::ImageGenCompletedEvent::OutputFormat } + + # @!attribute quality + # The quality setting for the generated image. + # + # @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::Quality] + required :quality, enum: -> { OpenAI::ImageGenCompletedEvent::Quality } + + # @!attribute size + # The size of the generated image. + # + # @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::Size] + required :size, enum: -> { OpenAI::ImageGenCompletedEvent::Size } + + # @!attribute type + # The type of the event. Always `image_generation.completed`. + # + # @return [Symbol, :"image_generation.completed"] + required :type, const: :"image_generation.completed" + + # @!attribute usage + # For `gpt-image-1` only, the token usage information for the image generation. + # + # @return [OpenAI::Models::ImageGenCompletedEvent::Usage] + required :usage, -> { OpenAI::ImageGenCompletedEvent::Usage } + + # @!method initialize(b64_json:, background:, created_at:, output_format:, quality:, size:, usage:, type: :"image_generation.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageGenCompletedEvent} for more details. + # + # Emitted when image generation has completed and the final image is available. + # + # @param b64_json [String] Base64-encoded image data, suitable for rendering as an image. + # + # @param background [Symbol, OpenAI::Models::ImageGenCompletedEvent::Background] The background setting for the generated image. + # + # @param created_at [Integer] The Unix timestamp when the event was created. + # + # @param output_format [Symbol, OpenAI::Models::ImageGenCompletedEvent::OutputFormat] The output format for the generated image. + # + # @param quality [Symbol, OpenAI::Models::ImageGenCompletedEvent::Quality] The quality setting for the generated image. + # + # @param size [Symbol, OpenAI::Models::ImageGenCompletedEvent::Size] The size of the generated image. + # + # @param usage [OpenAI::Models::ImageGenCompletedEvent::Usage] For `gpt-image-1` only, the token usage information for the image generation. + # + # @param type [Symbol, :"image_generation.completed"] The type of the event. Always `image_generation.completed`. + + # The background setting for the generated image. + # + # @see OpenAI::Models::ImageGenCompletedEvent#background + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT = :transparent + OPAQUE = :opaque + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The output format for the generated image. + # + # @see OpenAI::Models::ImageGenCompletedEvent#output_format + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG = :png + WEBP = :webp + JPEG = :jpeg + + # @!method self.values + # @return [Array] + end + + # The quality setting for the generated image. + # + # @see OpenAI::Models::ImageGenCompletedEvent#quality + module Quality + extend OpenAI::Internal::Type::Enum + + LOW = :low + MEDIUM = :medium + HIGH = :high + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The size of the generated image. + # + # @see OpenAI::Models::ImageGenCompletedEvent#size + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024 = :"1024x1024" + SIZE_1024X1536 = :"1024x1536" + SIZE_1536X1024 = :"1536x1024" + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # @see OpenAI::Models::ImageGenCompletedEvent#usage + class Usage < OpenAI::Internal::Type::BaseModel + # @!attribute input_tokens + # The number of tokens (images and text) in the input prompt. + # + # @return [Integer] + required :input_tokens, Integer + + # @!attribute input_tokens_details + # The input tokens detailed information for the image generation. + # + # @return [OpenAI::Models::ImageGenCompletedEvent::Usage::InputTokensDetails] + required :input_tokens_details, -> { OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails } + + # @!attribute output_tokens + # The number of image tokens in the output image. + # + # @return [Integer] + required :output_tokens, Integer + + # @!attribute total_tokens + # The total number of tokens (images and text) used for the image generation. + # + # @return [Integer] + required :total_tokens, Integer + + # @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, total_tokens:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageGenCompletedEvent::Usage} for more details. + # + # For `gpt-image-1` only, the token usage information for the image generation. + # + # @param input_tokens [Integer] The number of tokens (images and text) in the input prompt. + # + # @param input_tokens_details [OpenAI::Models::ImageGenCompletedEvent::Usage::InputTokensDetails] The input tokens detailed information for the image generation. + # + # @param output_tokens [Integer] The number of image tokens in the output image. + # + # @param total_tokens [Integer] The total number of tokens (images and text) used for the image generation. + + # @see OpenAI::Models::ImageGenCompletedEvent::Usage#input_tokens_details + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + # @!attribute image_tokens + # The number of image tokens in the input prompt. + # + # @return [Integer] + required :image_tokens, Integer + + # @!attribute text_tokens + # The number of text tokens in the input prompt. + # + # @return [Integer] + required :text_tokens, Integer + + # @!method initialize(image_tokens:, text_tokens:) + # The input tokens detailed information for the image generation. + # + # @param image_tokens [Integer] The number of image tokens in the input prompt. + # + # @param text_tokens [Integer] The number of text tokens in the input prompt. + end + end + end + end +end diff --git a/lib/openai/models/image_gen_partial_image_event.rb b/lib/openai/models/image_gen_partial_image_event.rb new file mode 100644 index 00000000..33601e7c --- /dev/null +++ b/lib/openai/models/image_gen_partial_image_event.rb @@ -0,0 +1,135 @@ +# frozen_string_literal: true + +module OpenAI + module Models + class ImageGenPartialImageEvent < OpenAI::Internal::Type::BaseModel + # @!attribute b64_json + # Base64-encoded partial image data, suitable for rendering as an image. + # + # @return [String] + required :b64_json, String + + # @!attribute background + # The background setting for the requested image. + # + # @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Background] + required :background, enum: -> { OpenAI::ImageGenPartialImageEvent::Background } + + # @!attribute created_at + # The Unix timestamp when the event was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute output_format + # The output format for the requested image. + # + # @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::OutputFormat] + required :output_format, enum: -> { OpenAI::ImageGenPartialImageEvent::OutputFormat } + + # @!attribute partial_image_index + # 0-based index for the partial image (streaming). + # + # @return [Integer] + required :partial_image_index, Integer + + # @!attribute quality + # The quality setting for the requested image. + # + # @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Quality] + required :quality, enum: -> { OpenAI::ImageGenPartialImageEvent::Quality } + + # @!attribute size + # The size of the requested image. + # + # @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Size] + required :size, enum: -> { OpenAI::ImageGenPartialImageEvent::Size } + + # @!attribute type + # The type of the event. Always `image_generation.partial_image`. + # + # @return [Symbol, :"image_generation.partial_image"] + required :type, const: :"image_generation.partial_image" + + # @!method initialize(b64_json:, background:, created_at:, output_format:, partial_image_index:, quality:, size:, type: :"image_generation.partial_image") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageGenPartialImageEvent} for more details. + # + # Emitted when a partial image is available during image generation streaming. + # + # @param b64_json [String] Base64-encoded partial image data, suitable for rendering as an image. + # + # @param background [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Background] The background setting for the requested image. + # + # @param created_at [Integer] The Unix timestamp when the event was created. + # + # @param output_format [Symbol, OpenAI::Models::ImageGenPartialImageEvent::OutputFormat] The output format for the requested image. + # + # @param partial_image_index [Integer] 0-based index for the partial image (streaming). + # + # @param quality [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Quality] The quality setting for the requested image. + # + # @param size [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Size] The size of the requested image. + # + # @param type [Symbol, :"image_generation.partial_image"] The type of the event. Always `image_generation.partial_image`. + + # The background setting for the requested image. + # + # @see OpenAI::Models::ImageGenPartialImageEvent#background + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT = :transparent + OPAQUE = :opaque + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The output format for the requested image. + # + # @see OpenAI::Models::ImageGenPartialImageEvent#output_format + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG = :png + WEBP = :webp + JPEG = :jpeg + + # @!method self.values + # @return [Array] + end + + # The quality setting for the requested image. + # + # @see OpenAI::Models::ImageGenPartialImageEvent#quality + module Quality + extend OpenAI::Internal::Type::Enum + + LOW = :low + MEDIUM = :medium + HIGH = :high + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The size of the requested image. + # + # @see OpenAI::Models::ImageGenPartialImageEvent#size + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024 = :"1024x1024" + SIZE_1024X1536 = :"1024x1536" + SIZE_1536X1024 = :"1536x1024" + AUTO = :auto + + # @!method self.values + # @return [Array] + end + end + end +end diff --git a/lib/openai/models/image_gen_stream_event.rb b/lib/openai/models/image_gen_stream_event.rb new file mode 100644 index 00000000..91af7984 --- /dev/null +++ b/lib/openai/models/image_gen_stream_event.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # Emitted when a partial image is available during image generation streaming. + module ImageGenStreamEvent + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Emitted when a partial image is available during image generation streaming. + variant :"image_generation.partial_image", -> { OpenAI::ImageGenPartialImageEvent } + + # Emitted when image generation has completed and the final image is available. + variant :"image_generation.completed", -> { OpenAI::ImageGenCompletedEvent } + + # @!method self.variants + # @return [Array(OpenAI::Models::ImageGenPartialImageEvent, OpenAI::Models::ImageGenCompletedEvent)] + end + end +end diff --git a/lib/openai/models/image_generate_params.rb b/lib/openai/models/image_generate_params.rb index b0b47d41..d63bfdf7 100644 --- a/lib/openai/models/image_generate_params.rb +++ b/lib/openai/models/image_generate_params.rb @@ -3,6 +3,8 @@ module OpenAI module Models # @see OpenAI::Resources::Images#generate + # + # @see OpenAI::Resources::Images#stream_raw class ImageGenerateParams < OpenAI::Internal::Type::BaseModel extend OpenAI::Internal::Type::RequestParameters::Converter include OpenAI::Internal::Type::RequestParameters @@ -64,6 +66,14 @@ class ImageGenerateParams < OpenAI::Internal::Type::BaseModel # @return [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil] optional :output_format, enum: -> { OpenAI::ImageGenerateParams::OutputFormat }, nil?: true + # @!attribute partial_images + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + # + # @return [Integer, nil] + optional :partial_images, Integer, nil?: true + # @!attribute quality # The quality of the image that will be generated. # @@ -111,7 +121,7 @@ class ImageGenerateParams < OpenAI::Internal::Type::BaseModel # @return [String, nil] optional :user, String - # @!method initialize(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {}) + # @!method initialize(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {}) # Some parameter documentations has been truncated, see # {OpenAI::Models::ImageGenerateParams} for more details. # @@ -129,6 +139,8 @@ class ImageGenerateParams < OpenAI::Internal::Type::BaseModel # # @param output_format [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is only su # + # @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for + # # @param quality [Symbol, OpenAI::Models::ImageGenerateParams::Quality, nil] The quality of the image that will be generated. # # @param response_format [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] The format in which generated images with `dall-e-2` and `dall-e-3` are returned diff --git a/lib/openai/models/images_response.rb b/lib/openai/models/images_response.rb index d6f6e63d..725d2727 100644 --- a/lib/openai/models/images_response.rb +++ b/lib/openai/models/images_response.rb @@ -152,6 +152,9 @@ class Usage < OpenAI::Internal::Type::BaseModel required :total_tokens, Integer # @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, total_tokens:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImagesResponse::Usage} for more details. + # # For `gpt-image-1` only, the token usage information for the image generation. # # @param input_tokens [Integer] The number of tokens (images and text) in the input prompt. diff --git a/lib/openai/models/responses/response_output_refusal.rb b/lib/openai/models/responses/response_output_refusal.rb index 62316c99..bf6272e6 100644 --- a/lib/openai/models/responses/response_output_refusal.rb +++ b/lib/openai/models/responses/response_output_refusal.rb @@ -5,7 +5,7 @@ module Models module Responses class ResponseOutputRefusal < OpenAI::Internal::Type::BaseModel # @!attribute refusal - # The refusal explanationfrom the model. + # The refusal explanation from the model. # # @return [String] required :refusal, String @@ -19,7 +19,7 @@ class ResponseOutputRefusal < OpenAI::Internal::Type::BaseModel # @!method initialize(refusal:, type: :refusal) # A refusal from the model. # - # @param refusal [String] The refusal explanationfrom the model. + # @param refusal [String] The refusal explanation from the model. # # @param type [Symbol, :refusal] The type of the refusal. Always `refusal`. end diff --git a/lib/openai/models/responses/tool.rb b/lib/openai/models/responses/tool.rb index e9fe3fbd..46add9fc 100644 --- a/lib/openai/models/responses/tool.rb +++ b/lib/openai/models/responses/tool.rb @@ -305,6 +305,18 @@ class ImageGeneration < OpenAI::Internal::Type::BaseModel # @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Background, nil] optional :background, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Background } + # @!attribute input_fidelity + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + # + # @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::InputFidelity, nil] + optional :input_fidelity, + enum: -> { + OpenAI::Responses::Tool::ImageGeneration::InputFidelity + }, + nil?: true + # @!attribute input_image_mask # Optional mask for inpainting. Contains `image_url` (string, optional) and # `file_id` (string, optional). @@ -358,7 +370,7 @@ class ImageGeneration < OpenAI::Internal::Type::BaseModel # @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Size, nil] optional :size, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Size } - # @!method initialize(background: nil, input_image_mask: nil, model: nil, moderation: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, size: nil, type: :image_generation) + # @!method initialize(background: nil, input_fidelity: nil, input_image_mask: nil, model: nil, moderation: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, size: nil, type: :image_generation) # Some parameter documentations has been truncated, see # {OpenAI::Models::Responses::Tool::ImageGeneration} for more details. # @@ -366,6 +378,8 @@ class ImageGeneration < OpenAI::Internal::Type::BaseModel # # @param background [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Background] Background type for the generated image. One of `transparent`, # + # @param input_fidelity [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::InputFidelity, nil] Control how much effort the model will exert to match the style and features, + # # @param input_image_mask [OpenAI::Models::Responses::Tool::ImageGeneration::InputImageMask] Optional mask for inpainting. Contains `image_url` # # @param model [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Model] The image generation model to use. Default: `gpt-image-1`. @@ -399,6 +413,21 @@ module Background # @return [Array] end + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + # + # @see OpenAI::Models::Responses::Tool::ImageGeneration#input_fidelity + module InputFidelity + extend OpenAI::Internal::Type::Enum + + HIGH = :high + LOW = :low + + # @!method self.values + # @return [Array] + end + # @see OpenAI::Models::Responses::Tool::ImageGeneration#input_image_mask class InputImageMask < OpenAI::Internal::Type::BaseModel # @!attribute file_id diff --git a/lib/openai/resources/images.rb b/lib/openai/resources/images.rb index e1d26736..07fae67b 100644 --- a/lib/openai/resources/images.rb +++ b/lib/openai/resources/images.rb @@ -39,13 +39,15 @@ def create_variation(params) ) end + # See {OpenAI::Resources::Images#stream_raw} for streaming counterpart. + # # Some parameter documentations has been truncated, see # {OpenAI::Models::ImageEditParams} for more details. # # Creates an edited or extended image given one or more source images and a # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`. # - # @overload edit(image:, prompt:, background: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {}) + # @overload edit(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {}) # # @param image [Pathname, StringIO, IO, String, OpenAI::FilePart, Array] The image(s) to edit. Must be a supported image file or an array of images. # @@ -53,6 +55,8 @@ def create_variation(params) # # @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s). # + # @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features, + # # @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind # # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are sup @@ -63,6 +67,8 @@ def create_variation(params) # # @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is # + # @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for + # # @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are # # @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or ` @@ -78,6 +84,10 @@ def create_variation(params) # @see OpenAI::Models::ImageEditParams def edit(params) parsed, options = OpenAI::ImageEditParams.dump_request(params) + if parsed[:stream] + message = "Please use `#stream_raw` for the streaming use case." + raise ArgumentError.new(message) + end @client.request( method: :post, path: "images/edits", @@ -88,13 +98,76 @@ def edit(params) ) end + # See {OpenAI::Resources::Images#edit} for non-streaming counterpart. + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageEditParams} for more details. + # + # Creates an edited or extended image given one or more source images and a + # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`. + # + # @overload stream_raw(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {}) + # + # @param image [Pathname, StringIO, IO, String, OpenAI::FilePart, Array] The image(s) to edit. Must be a supported image file or an array of images. + # + # @param prompt [String] A text description of the desired image(s). The maximum length is 1000 character + # + # @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s). + # + # @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features, + # + # @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind + # + # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are sup + # + # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. + # + # @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter + # + # @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is + # + # @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for + # + # @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are + # + # @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or ` + # + # @param size [Symbol, OpenAI::Models::ImageEditParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands + # + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Internal::Stream] + # + # @see OpenAI::Models::ImageEditParams + def edit_stream_raw(params) + parsed, options = OpenAI::ImageEditParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#edit` for the non-streaming use case." + raise ArgumentError.new(message) + end + parsed.store(:stream, true) + @client.request( + method: :post, + path: "images/edits", + headers: {"content-type" => "multipart/form-data", "accept" => "text/event-stream"}, + body: parsed, + stream: OpenAI::Internal::Stream, + model: OpenAI::ImageEditStreamEvent, + options: options + ) + end + + # See {OpenAI::Resources::Images#stream_raw} for streaming counterpart. + # # Some parameter documentations has been truncated, see # {OpenAI::Models::ImageGenerateParams} for more details. # # Creates an image given a prompt. # [Learn more](https://platform.openai.com/docs/guides/images). # - # @overload generate(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {}) + # @overload generate(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {}) # # @param prompt [String] A text description of the desired image(s). The maximum length is 32000 characte # @@ -110,6 +183,8 @@ def edit(params) # # @param output_format [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is only su # + # @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for + # # @param quality [Symbol, OpenAI::Models::ImageGenerateParams::Quality, nil] The quality of the image that will be generated. # # @param response_format [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] The format in which generated images with `dall-e-2` and `dall-e-3` are returned @@ -127,6 +202,10 @@ def edit(params) # @see OpenAI::Models::ImageGenerateParams def generate(params) parsed, options = OpenAI::ImageGenerateParams.dump_request(params) + if parsed[:stream] + message = "Please use `#stream_raw` for the streaming use case." + raise ArgumentError.new(message) + end @client.request( method: :post, path: "images/generations", @@ -136,6 +215,65 @@ def generate(params) ) end + # See {OpenAI::Resources::Images#generate} for non-streaming counterpart. + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageGenerateParams} for more details. + # + # Creates an image given a prompt. + # [Learn more](https://platform.openai.com/docs/guides/images). + # + # @overload stream_raw(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {}) + # + # @param prompt [String] A text description of the desired image(s). The maximum length is 32000 characte + # + # @param background [Symbol, OpenAI::Models::ImageGenerateParams::Background, nil] Allows to set transparency for the background of the generated image(s). + # + # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-im + # + # @param moderation [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil] Control the content-moderation level for images generated by `gpt-image-1`. Must + # + # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # + # @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter is only + # + # @param output_format [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is only su + # + # @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for + # + # @param quality [Symbol, OpenAI::Models::ImageGenerateParams::Quality, nil] The quality of the image that will be generated. + # + # @param response_format [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] The format in which generated images with `dall-e-2` and `dall-e-3` are returned + # + # @param size [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands + # + # @param style [Symbol, OpenAI::Models::ImageGenerateParams::Style, nil] The style of the generated images. This parameter is only supported for `dall-e- + # + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Internal::Stream] + # + # @see OpenAI::Models::ImageGenerateParams + def generate_stream_raw(params) + parsed, options = OpenAI::ImageGenerateParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#generate` for the non-streaming use case." + raise ArgumentError.new(message) + end + parsed.store(:stream, true) + @client.request( + method: :post, + path: "images/generations", + headers: {"accept" => "text/event-stream"}, + body: parsed, + stream: OpenAI::Internal::Stream, + model: OpenAI::ImageGenStreamEvent, + options: options + ) + end + # @api private # # @param client [OpenAI::Client] diff --git a/lib/openai/version.rb b/lib/openai/version.rb index ddfc5bda..ef837985 100644 --- a/lib/openai/version.rb +++ b/lib/openai/version.rb @@ -1,5 +1,5 @@ # frozen_string_literal: true module OpenAI - VERSION = "0.13.1" + VERSION = "0.14.0" end diff --git a/rbi/openai/models.rbi b/rbi/openai/models.rbi index 78cb3785..c5a9d836 100644 --- a/rbi/openai/models.rbi +++ b/rbi/openai/models.rbi @@ -115,10 +115,22 @@ module OpenAI ImageCreateVariationParams = OpenAI::Models::ImageCreateVariationParams + ImageEditCompletedEvent = OpenAI::Models::ImageEditCompletedEvent + ImageEditParams = OpenAI::Models::ImageEditParams + ImageEditPartialImageEvent = OpenAI::Models::ImageEditPartialImageEvent + + ImageEditStreamEvent = OpenAI::Models::ImageEditStreamEvent + + ImageGenCompletedEvent = OpenAI::Models::ImageGenCompletedEvent + ImageGenerateParams = OpenAI::Models::ImageGenerateParams + ImageGenPartialImageEvent = OpenAI::Models::ImageGenPartialImageEvent + + ImageGenStreamEvent = OpenAI::Models::ImageGenStreamEvent + ImageModel = OpenAI::Models::ImageModel ImagesResponse = OpenAI::Models::ImagesResponse diff --git a/rbi/openai/models/image_edit_completed_event.rbi b/rbi/openai/models/image_edit_completed_event.rbi new file mode 100644 index 00000000..07cfa0ae --- /dev/null +++ b/rbi/openai/models/image_edit_completed_event.rbi @@ -0,0 +1,346 @@ +# typed: strong + +module OpenAI + module Models + class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ImageEditCompletedEvent, OpenAI::Internal::AnyHash) + end + + # Base64-encoded final edited image data, suitable for rendering as an image. + sig { returns(String) } + attr_accessor :b64_json + + # The background setting for the edited image. + sig { returns(OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol) } + attr_accessor :background + + # The Unix timestamp when the event was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The output format for the edited image. + sig do + returns(OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol) + end + attr_accessor :output_format + + # The quality setting for the edited image. + sig { returns(OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol) } + attr_accessor :quality + + # The size of the edited image. + sig { returns(OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol) } + attr_accessor :size + + # The type of the event. Always `image_edit.completed`. + sig { returns(Symbol) } + attr_accessor :type + + # For `gpt-image-1` only, the token usage information for the image generation. + sig { returns(OpenAI::ImageEditCompletedEvent::Usage) } + attr_reader :usage + + sig { params(usage: OpenAI::ImageEditCompletedEvent::Usage::OrHash).void } + attr_writer :usage + + # Emitted when image editing has completed and the final image is available. + sig do + params( + b64_json: String, + background: OpenAI::ImageEditCompletedEvent::Background::OrSymbol, + created_at: Integer, + output_format: + OpenAI::ImageEditCompletedEvent::OutputFormat::OrSymbol, + quality: OpenAI::ImageEditCompletedEvent::Quality::OrSymbol, + size: OpenAI::ImageEditCompletedEvent::Size::OrSymbol, + usage: OpenAI::ImageEditCompletedEvent::Usage::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Base64-encoded final edited image data, suitable for rendering as an image. + b64_json:, + # The background setting for the edited image. + background:, + # The Unix timestamp when the event was created. + created_at:, + # The output format for the edited image. + output_format:, + # The quality setting for the edited image. + quality:, + # The size of the edited image. + size:, + # For `gpt-image-1` only, the token usage information for the image generation. + usage:, + # The type of the event. Always `image_edit.completed`. + type: :"image_edit.completed" + ) + end + + sig do + override.returns( + { + b64_json: String, + background: + OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol, + created_at: Integer, + output_format: + OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol, + quality: OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol, + size: OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol, + type: Symbol, + usage: OpenAI::ImageEditCompletedEvent::Usage + } + ) + end + def to_hash + end + + # The background setting for the edited image. + module Background + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditCompletedEvent::Background) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TRANSPARENT = + T.let( + :transparent, + OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol + ) + OPAQUE = + T.let( + :opaque, + OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol] + ) + end + def self.values + end + end + + # The output format for the edited image. + module OutputFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditCompletedEvent::OutputFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + PNG = + T.let( + :png, + OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol + ) + WEBP = + T.let( + :webp, + OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol + ) + JPEG = + T.let( + :jpeg, + OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The quality setting for the edited image. + module Quality + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditCompletedEvent::Quality) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let(:low, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol) + MEDIUM = + T.let(:medium, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol) + HIGH = + T.let(:high, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol) + AUTO = + T.let(:auto, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol] + ) + end + def self.values + end + end + + # The size of the edited image. + module Size + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImageEditCompletedEvent::Size) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SIZE_1024X1024 = + T.let( + :"1024x1024", + OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol + ) + SIZE_1024X1536 = + T.let( + :"1024x1536", + OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol + ) + SIZE_1536X1024 = + T.let( + :"1536x1024", + OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol + ) + AUTO = T.let(:auto, OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol] + ) + end + def self.values + end + end + + class Usage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::ImageEditCompletedEvent::Usage, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens (images and text) in the input prompt. + sig { returns(Integer) } + attr_accessor :input_tokens + + # The input tokens detailed information for the image generation. + sig do + returns(OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails) + end + attr_reader :input_tokens_details + + sig do + params( + input_tokens_details: + OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails::OrHash + ).void + end + attr_writer :input_tokens_details + + # The number of image tokens in the output image. + sig { returns(Integer) } + attr_accessor :output_tokens + + # The total number of tokens (images and text) used for the image generation. + sig { returns(Integer) } + attr_accessor :total_tokens + + # For `gpt-image-1` only, the token usage information for the image generation. + sig do + params( + input_tokens: Integer, + input_tokens_details: + OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails::OrHash, + output_tokens: Integer, + total_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens (images and text) in the input prompt. + input_tokens:, + # The input tokens detailed information for the image generation. + input_tokens_details:, + # The number of image tokens in the output image. + output_tokens:, + # The total number of tokens (images and text) used for the image generation. + total_tokens: + ) + end + + sig do + override.returns( + { + input_tokens: Integer, + input_tokens_details: + OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + ) + end + def to_hash + end + + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails, + OpenAI::Internal::AnyHash + ) + end + + # The number of image tokens in the input prompt. + sig { returns(Integer) } + attr_accessor :image_tokens + + # The number of text tokens in the input prompt. + sig { returns(Integer) } + attr_accessor :text_tokens + + # The input tokens detailed information for the image generation. + sig do + params(image_tokens: Integer, text_tokens: Integer).returns( + T.attached_class + ) + end + def self.new( + # The number of image tokens in the input prompt. + image_tokens:, + # The number of text tokens in the input prompt. + text_tokens: + ) + end + + sig do + override.returns({ image_tokens: Integer, text_tokens: Integer }) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/image_edit_params.rbi b/rbi/openai/models/image_edit_params.rbi index 0e0957b1..6354595b 100644 --- a/rbi/openai/models/image_edit_params.rbi +++ b/rbi/openai/models/image_edit_params.rbi @@ -36,6 +36,14 @@ module OpenAI sig { returns(T.nilable(OpenAI::ImageEditParams::Background::OrSymbol)) } attr_accessor :background + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + sig do + returns(T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol)) + end + attr_accessor :input_fidelity + # An additional image whose fully transparent areas (e.g. where alpha is zero) # indicate where `image` should be edited. If there are multiple images provided, # the mask will be applied on the first image. Must be a valid PNG file, less than @@ -70,6 +78,12 @@ module OpenAI end attr_accessor :output_format + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + sig { returns(T.nilable(Integer)) } + attr_accessor :partial_images + # The quality of the image that will be generated. `high`, `medium` and `low` are # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. # Defaults to `auto`. @@ -105,12 +119,15 @@ module OpenAI image: OpenAI::ImageEditParams::Image::Variants, prompt: String, background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol), + input_fidelity: + T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol), mask: OpenAI::Internal::FileInput, model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), n: T.nilable(Integer), output_compression: T.nilable(Integer), output_format: T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol), response_format: T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol), @@ -139,6 +156,10 @@ module OpenAI # If `transparent`, the output format needs to support transparency, so it should # be set to either `png` (default value) or `webp`. background: nil, + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + input_fidelity: nil, # An additional image whose fully transparent areas (e.g. where alpha is zero) # indicate where `image` should be edited. If there are multiple images provided, # the mask will be applied on the first image. Must be a valid PNG file, less than @@ -158,6 +179,10 @@ module OpenAI # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The # default value is `png`. output_format: nil, + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + partial_images: nil, # The quality of the image that will be generated. `high`, `medium` and `low` are # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. # Defaults to `auto`. @@ -186,12 +211,15 @@ module OpenAI prompt: String, background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol), + input_fidelity: + T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol), mask: OpenAI::Internal::FileInput, model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), n: T.nilable(Integer), output_compression: T.nilable(Integer), output_format: T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol), response_format: T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol), @@ -258,6 +286,29 @@ module OpenAI end end + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + module InputFidelity + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImageEditParams::InputFidelity) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + HIGH = + T.let(:high, OpenAI::ImageEditParams::InputFidelity::TaggedSymbol) + LOW = T.let(:low, OpenAI::ImageEditParams::InputFidelity::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageEditParams::InputFidelity::TaggedSymbol] + ) + end + def self.values + end + end + # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` # is used. diff --git a/rbi/openai/models/image_edit_partial_image_event.rbi b/rbi/openai/models/image_edit_partial_image_event.rbi new file mode 100644 index 00000000..f4ae7d8b --- /dev/null +++ b/rbi/openai/models/image_edit_partial_image_event.rbi @@ -0,0 +1,249 @@ +# typed: strong + +module OpenAI + module Models + class ImageEditPartialImageEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ImageEditPartialImageEvent, OpenAI::Internal::AnyHash) + end + + # Base64-encoded partial image data, suitable for rendering as an image. + sig { returns(String) } + attr_accessor :b64_json + + # The background setting for the requested edited image. + sig do + returns(OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol) + end + attr_accessor :background + + # The Unix timestamp when the event was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The output format for the requested edited image. + sig do + returns(OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol) + end + attr_accessor :output_format + + # 0-based index for the partial image (streaming). + sig { returns(Integer) } + attr_accessor :partial_image_index + + # The quality setting for the requested edited image. + sig { returns(OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol) } + attr_accessor :quality + + # The size of the requested edited image. + sig { returns(OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol) } + attr_accessor :size + + # The type of the event. Always `image_edit.partial_image`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a partial image is available during image editing streaming. + sig do + params( + b64_json: String, + background: OpenAI::ImageEditPartialImageEvent::Background::OrSymbol, + created_at: Integer, + output_format: + OpenAI::ImageEditPartialImageEvent::OutputFormat::OrSymbol, + partial_image_index: Integer, + quality: OpenAI::ImageEditPartialImageEvent::Quality::OrSymbol, + size: OpenAI::ImageEditPartialImageEvent::Size::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Base64-encoded partial image data, suitable for rendering as an image. + b64_json:, + # The background setting for the requested edited image. + background:, + # The Unix timestamp when the event was created. + created_at:, + # The output format for the requested edited image. + output_format:, + # 0-based index for the partial image (streaming). + partial_image_index:, + # The quality setting for the requested edited image. + quality:, + # The size of the requested edited image. + size:, + # The type of the event. Always `image_edit.partial_image`. + type: :"image_edit.partial_image" + ) + end + + sig do + override.returns( + { + b64_json: String, + background: + OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol, + created_at: Integer, + output_format: + OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol, + partial_image_index: Integer, + quality: OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol, + size: OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # The background setting for the requested edited image. + module Background + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditPartialImageEvent::Background) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TRANSPARENT = + T.let( + :transparent, + OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol + ) + OPAQUE = + T.let( + :opaque, + OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The output format for the requested edited image. + module OutputFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditPartialImageEvent::OutputFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + PNG = + T.let( + :png, + OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol + ) + WEBP = + T.let( + :webp, + OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol + ) + JPEG = + T.let( + :jpeg, + OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The quality setting for the requested edited image. + module Quality + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditPartialImageEvent::Quality) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let(:low, OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol) + MEDIUM = + T.let( + :medium, + OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol] + ) + end + def self.values + end + end + + # The size of the requested edited image. + module Size + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditPartialImageEvent::Size) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SIZE_1024X1024 = + T.let( + :"1024x1024", + OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol + ) + SIZE_1024X1536 = + T.let( + :"1024x1536", + OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol + ) + SIZE_1536X1024 = + T.let( + :"1536x1024", + OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol + ) + AUTO = + T.let(:auto, OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/image_edit_stream_event.rbi b/rbi/openai/models/image_edit_stream_event.rbi new file mode 100644 index 00000000..5bfaed0a --- /dev/null +++ b/rbi/openai/models/image_edit_stream_event.rbi @@ -0,0 +1,22 @@ +# typed: strong + +module OpenAI + module Models + # Emitted when a partial image is available during image editing streaming. + module ImageEditStreamEvent + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::ImageEditPartialImageEvent, + OpenAI::ImageEditCompletedEvent + ) + end + + sig { override.returns(T::Array[OpenAI::ImageEditStreamEvent::Variants]) } + def self.variants + end + end + end +end diff --git a/rbi/openai/models/image_gen_completed_event.rbi b/rbi/openai/models/image_gen_completed_event.rbi new file mode 100644 index 00000000..922b39b7 --- /dev/null +++ b/rbi/openai/models/image_gen_completed_event.rbi @@ -0,0 +1,339 @@ +# typed: strong + +module OpenAI + module Models + class ImageGenCompletedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ImageGenCompletedEvent, OpenAI::Internal::AnyHash) + end + + # Base64-encoded image data, suitable for rendering as an image. + sig { returns(String) } + attr_accessor :b64_json + + # The background setting for the generated image. + sig { returns(OpenAI::ImageGenCompletedEvent::Background::TaggedSymbol) } + attr_accessor :background + + # The Unix timestamp when the event was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The output format for the generated image. + sig do + returns(OpenAI::ImageGenCompletedEvent::OutputFormat::TaggedSymbol) + end + attr_accessor :output_format + + # The quality setting for the generated image. + sig { returns(OpenAI::ImageGenCompletedEvent::Quality::TaggedSymbol) } + attr_accessor :quality + + # The size of the generated image. + sig { returns(OpenAI::ImageGenCompletedEvent::Size::TaggedSymbol) } + attr_accessor :size + + # The type of the event. Always `image_generation.completed`. + sig { returns(Symbol) } + attr_accessor :type + + # For `gpt-image-1` only, the token usage information for the image generation. + sig { returns(OpenAI::ImageGenCompletedEvent::Usage) } + attr_reader :usage + + sig { params(usage: OpenAI::ImageGenCompletedEvent::Usage::OrHash).void } + attr_writer :usage + + # Emitted when image generation has completed and the final image is available. + sig do + params( + b64_json: String, + background: OpenAI::ImageGenCompletedEvent::Background::OrSymbol, + created_at: Integer, + output_format: OpenAI::ImageGenCompletedEvent::OutputFormat::OrSymbol, + quality: OpenAI::ImageGenCompletedEvent::Quality::OrSymbol, + size: OpenAI::ImageGenCompletedEvent::Size::OrSymbol, + usage: OpenAI::ImageGenCompletedEvent::Usage::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Base64-encoded image data, suitable for rendering as an image. + b64_json:, + # The background setting for the generated image. + background:, + # The Unix timestamp when the event was created. + created_at:, + # The output format for the generated image. + output_format:, + # The quality setting for the generated image. + quality:, + # The size of the generated image. + size:, + # For `gpt-image-1` only, the token usage information for the image generation. + usage:, + # The type of the event. Always `image_generation.completed`. + type: :"image_generation.completed" + ) + end + + sig do + override.returns( + { + b64_json: String, + background: + OpenAI::ImageGenCompletedEvent::Background::TaggedSymbol, + created_at: Integer, + output_format: + OpenAI::ImageGenCompletedEvent::OutputFormat::TaggedSymbol, + quality: OpenAI::ImageGenCompletedEvent::Quality::TaggedSymbol, + size: OpenAI::ImageGenCompletedEvent::Size::TaggedSymbol, + type: Symbol, + usage: OpenAI::ImageGenCompletedEvent::Usage + } + ) + end + def to_hash + end + + # The background setting for the generated image. + module Background + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenCompletedEvent::Background) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TRANSPARENT = + T.let( + :transparent, + OpenAI::ImageGenCompletedEvent::Background::TaggedSymbol + ) + OPAQUE = + T.let( + :opaque, + OpenAI::ImageGenCompletedEvent::Background::TaggedSymbol + ) + AUTO = + T.let(:auto, OpenAI::ImageGenCompletedEvent::Background::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenCompletedEvent::Background::TaggedSymbol] + ) + end + def self.values + end + end + + # The output format for the generated image. + module OutputFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenCompletedEvent::OutputFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + PNG = + T.let( + :png, + OpenAI::ImageGenCompletedEvent::OutputFormat::TaggedSymbol + ) + WEBP = + T.let( + :webp, + OpenAI::ImageGenCompletedEvent::OutputFormat::TaggedSymbol + ) + JPEG = + T.let( + :jpeg, + OpenAI::ImageGenCompletedEvent::OutputFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::ImageGenCompletedEvent::OutputFormat::TaggedSymbol] + ) + end + def self.values + end + end + + # The quality setting for the generated image. + module Quality + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenCompletedEvent::Quality) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = T.let(:low, OpenAI::ImageGenCompletedEvent::Quality::TaggedSymbol) + MEDIUM = + T.let(:medium, OpenAI::ImageGenCompletedEvent::Quality::TaggedSymbol) + HIGH = + T.let(:high, OpenAI::ImageGenCompletedEvent::Quality::TaggedSymbol) + AUTO = + T.let(:auto, OpenAI::ImageGenCompletedEvent::Quality::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenCompletedEvent::Quality::TaggedSymbol] + ) + end + def self.values + end + end + + # The size of the generated image. + module Size + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImageGenCompletedEvent::Size) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SIZE_1024X1024 = + T.let( + :"1024x1024", + OpenAI::ImageGenCompletedEvent::Size::TaggedSymbol + ) + SIZE_1024X1536 = + T.let( + :"1024x1536", + OpenAI::ImageGenCompletedEvent::Size::TaggedSymbol + ) + SIZE_1536X1024 = + T.let( + :"1536x1024", + OpenAI::ImageGenCompletedEvent::Size::TaggedSymbol + ) + AUTO = T.let(:auto, OpenAI::ImageGenCompletedEvent::Size::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenCompletedEvent::Size::TaggedSymbol] + ) + end + def self.values + end + end + + class Usage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::ImageGenCompletedEvent::Usage, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens (images and text) in the input prompt. + sig { returns(Integer) } + attr_accessor :input_tokens + + # The input tokens detailed information for the image generation. + sig do + returns(OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails) + end + attr_reader :input_tokens_details + + sig do + params( + input_tokens_details: + OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails::OrHash + ).void + end + attr_writer :input_tokens_details + + # The number of image tokens in the output image. + sig { returns(Integer) } + attr_accessor :output_tokens + + # The total number of tokens (images and text) used for the image generation. + sig { returns(Integer) } + attr_accessor :total_tokens + + # For `gpt-image-1` only, the token usage information for the image generation. + sig do + params( + input_tokens: Integer, + input_tokens_details: + OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails::OrHash, + output_tokens: Integer, + total_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens (images and text) in the input prompt. + input_tokens:, + # The input tokens detailed information for the image generation. + input_tokens_details:, + # The number of image tokens in the output image. + output_tokens:, + # The total number of tokens (images and text) used for the image generation. + total_tokens: + ) + end + + sig do + override.returns( + { + input_tokens: Integer, + input_tokens_details: + OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + ) + end + def to_hash + end + + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails, + OpenAI::Internal::AnyHash + ) + end + + # The number of image tokens in the input prompt. + sig { returns(Integer) } + attr_accessor :image_tokens + + # The number of text tokens in the input prompt. + sig { returns(Integer) } + attr_accessor :text_tokens + + # The input tokens detailed information for the image generation. + sig do + params(image_tokens: Integer, text_tokens: Integer).returns( + T.attached_class + ) + end + def self.new( + # The number of image tokens in the input prompt. + image_tokens:, + # The number of text tokens in the input prompt. + text_tokens: + ) + end + + sig do + override.returns({ image_tokens: Integer, text_tokens: Integer }) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/image_gen_partial_image_event.rbi b/rbi/openai/models/image_gen_partial_image_event.rbi new file mode 100644 index 00000000..c582e9c9 --- /dev/null +++ b/rbi/openai/models/image_gen_partial_image_event.rbi @@ -0,0 +1,243 @@ +# typed: strong + +module OpenAI + module Models + class ImageGenPartialImageEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ImageGenPartialImageEvent, OpenAI::Internal::AnyHash) + end + + # Base64-encoded partial image data, suitable for rendering as an image. + sig { returns(String) } + attr_accessor :b64_json + + # The background setting for the requested image. + sig do + returns(OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol) + end + attr_accessor :background + + # The Unix timestamp when the event was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The output format for the requested image. + sig do + returns(OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol) + end + attr_accessor :output_format + + # 0-based index for the partial image (streaming). + sig { returns(Integer) } + attr_accessor :partial_image_index + + # The quality setting for the requested image. + sig { returns(OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol) } + attr_accessor :quality + + # The size of the requested image. + sig { returns(OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol) } + attr_accessor :size + + # The type of the event. Always `image_generation.partial_image`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a partial image is available during image generation streaming. + sig do + params( + b64_json: String, + background: OpenAI::ImageGenPartialImageEvent::Background::OrSymbol, + created_at: Integer, + output_format: + OpenAI::ImageGenPartialImageEvent::OutputFormat::OrSymbol, + partial_image_index: Integer, + quality: OpenAI::ImageGenPartialImageEvent::Quality::OrSymbol, + size: OpenAI::ImageGenPartialImageEvent::Size::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Base64-encoded partial image data, suitable for rendering as an image. + b64_json:, + # The background setting for the requested image. + background:, + # The Unix timestamp when the event was created. + created_at:, + # The output format for the requested image. + output_format:, + # 0-based index for the partial image (streaming). + partial_image_index:, + # The quality setting for the requested image. + quality:, + # The size of the requested image. + size:, + # The type of the event. Always `image_generation.partial_image`. + type: :"image_generation.partial_image" + ) + end + + sig do + override.returns( + { + b64_json: String, + background: + OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol, + created_at: Integer, + output_format: + OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol, + partial_image_index: Integer, + quality: OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol, + size: OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # The background setting for the requested image. + module Background + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenPartialImageEvent::Background) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TRANSPARENT = + T.let( + :transparent, + OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol + ) + OPAQUE = + T.let( + :opaque, + OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The output format for the requested image. + module OutputFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenPartialImageEvent::OutputFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + PNG = + T.let( + :png, + OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol + ) + WEBP = + T.let( + :webp, + OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol + ) + JPEG = + T.let( + :jpeg, + OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The quality setting for the requested image. + module Quality + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenPartialImageEvent::Quality) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let(:low, OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol) + MEDIUM = + T.let( + :medium, + OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol + ) + HIGH = + T.let(:high, OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol) + AUTO = + T.let(:auto, OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol] + ) + end + def self.values + end + end + + # The size of the requested image. + module Size + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenPartialImageEvent::Size) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SIZE_1024X1024 = + T.let( + :"1024x1024", + OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol + ) + SIZE_1024X1536 = + T.let( + :"1024x1536", + OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol + ) + SIZE_1536X1024 = + T.let( + :"1536x1024", + OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol + ) + AUTO = + T.let(:auto, OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/image_gen_stream_event.rbi b/rbi/openai/models/image_gen_stream_event.rbi new file mode 100644 index 00000000..2309b993 --- /dev/null +++ b/rbi/openai/models/image_gen_stream_event.rbi @@ -0,0 +1,22 @@ +# typed: strong + +module OpenAI + module Models + # Emitted when a partial image is available during image generation streaming. + module ImageGenStreamEvent + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::ImageGenPartialImageEvent, + OpenAI::ImageGenCompletedEvent + ) + end + + sig { override.returns(T::Array[OpenAI::ImageGenStreamEvent::Variants]) } + def self.variants + end + end + end +end diff --git a/rbi/openai/models/image_generate_params.rbi b/rbi/openai/models/image_generate_params.rbi index c81dbbcf..29b5a6e2 100644 --- a/rbi/openai/models/image_generate_params.rbi +++ b/rbi/openai/models/image_generate_params.rbi @@ -60,6 +60,12 @@ module OpenAI end attr_accessor :output_format + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + sig { returns(T.nilable(Integer)) } + attr_accessor :partial_images + # The quality of the image that will be generated. # # - `auto` (default value) will automatically select the best quality for the @@ -116,6 +122,7 @@ module OpenAI output_compression: T.nilable(Integer), output_format: T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol), response_format: T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol), @@ -155,6 +162,10 @@ module OpenAI # The format in which the generated images are returned. This parameter is only # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. output_format: nil, + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + partial_images: nil, # The quality of the image that will be generated. # # - `auto` (default value) will automatically select the best quality for the @@ -199,6 +210,7 @@ module OpenAI output_compression: T.nilable(Integer), output_format: T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol), response_format: T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol), diff --git a/rbi/openai/models/responses/response_output_refusal.rbi b/rbi/openai/models/responses/response_output_refusal.rbi index ef08df31..b206e330 100644 --- a/rbi/openai/models/responses/response_output_refusal.rbi +++ b/rbi/openai/models/responses/response_output_refusal.rbi @@ -12,7 +12,7 @@ module OpenAI ) end - # The refusal explanationfrom the model. + # The refusal explanation from the model. sig { returns(String) } attr_accessor :refusal @@ -23,7 +23,7 @@ module OpenAI # A refusal from the model. sig { params(refusal: String, type: Symbol).returns(T.attached_class) } def self.new( - # The refusal explanationfrom the model. + # The refusal explanation from the model. refusal:, # The type of the refusal. Always `refusal`. type: :refusal diff --git a/rbi/openai/models/responses/tool.rbi b/rbi/openai/models/responses/tool.rbi index e3467167..fa1bac73 100644 --- a/rbi/openai/models/responses/tool.rbi +++ b/rbi/openai/models/responses/tool.rbi @@ -565,6 +565,18 @@ module OpenAI end attr_writer :background + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + sig do + returns( + T.nilable( + OpenAI::Responses::Tool::ImageGeneration::InputFidelity::OrSymbol + ) + ) + end + attr_accessor :input_fidelity + # Optional mask for inpainting. Contains `image_url` (string, optional) and # `file_id` (string, optional). sig do @@ -695,6 +707,10 @@ module OpenAI params( background: OpenAI::Responses::Tool::ImageGeneration::Background::OrSymbol, + input_fidelity: + T.nilable( + OpenAI::Responses::Tool::ImageGeneration::InputFidelity::OrSymbol + ), input_image_mask: OpenAI::Responses::Tool::ImageGeneration::InputImageMask::OrHash, model: OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol, @@ -714,6 +730,10 @@ module OpenAI # Background type for the generated image. One of `transparent`, `opaque`, or # `auto`. Default: `auto`. background: nil, + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + input_fidelity: nil, # Optional mask for inpainting. Contains `image_url` (string, optional) and # `file_id` (string, optional). input_image_mask: nil, @@ -746,6 +766,10 @@ module OpenAI type: Symbol, background: OpenAI::Responses::Tool::ImageGeneration::Background::OrSymbol, + input_fidelity: + T.nilable( + OpenAI::Responses::Tool::ImageGeneration::InputFidelity::OrSymbol + ), input_image_mask: OpenAI::Responses::Tool::ImageGeneration::InputImageMask, model: @@ -806,6 +830,43 @@ module OpenAI end end + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + module InputFidelity + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::Tool::ImageGeneration::InputFidelity + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + HIGH = + T.let( + :high, + OpenAI::Responses::Tool::ImageGeneration::InputFidelity::TaggedSymbol + ) + LOW = + T.let( + :low, + OpenAI::Responses::Tool::ImageGeneration::InputFidelity::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Tool::ImageGeneration::InputFidelity::TaggedSymbol + ] + ) + end + def self.values + end + end + class InputImageMask < OpenAI::Internal::Type::BaseModel OrHash = T.type_alias do diff --git a/rbi/openai/resources/images.rbi b/rbi/openai/resources/images.rbi index f4f67d10..fc7a3207 100644 --- a/rbi/openai/resources/images.rbi +++ b/rbi/openai/resources/images.rbi @@ -42,6 +42,8 @@ module OpenAI ) end + # See {OpenAI::Resources::Images#stream_raw} for streaming counterpart. + # # Creates an edited or extended image given one or more source images and a # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`. sig do @@ -49,17 +51,21 @@ module OpenAI image: OpenAI::ImageEditParams::Image::Variants, prompt: String, background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol), + input_fidelity: + T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol), mask: OpenAI::Internal::FileInput, model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), n: T.nilable(Integer), output_compression: T.nilable(Integer), output_format: T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol), response_format: T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol), size: T.nilable(OpenAI::ImageEditParams::Size::OrSymbol), user: String, + stream: T.noreturn, request_options: OpenAI::RequestOptions::OrHash ).returns(OpenAI::ImagesResponse) end @@ -83,6 +89,10 @@ module OpenAI # If `transparent`, the output format needs to support transparency, so it should # be set to either `png` (default value) or `webp`. background: nil, + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + input_fidelity: nil, # An additional image whose fully transparent areas (e.g. where alpha is zero) # indicate where `image` should be edited. If there are multiple images provided, # the mask will be applied on the first image. Must be a valid PNG file, less than @@ -102,6 +112,10 @@ module OpenAI # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The # default value is `png`. output_format: nil, + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + partial_images: nil, # The quality of the image that will be generated. `high`, `medium` and `low` are # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. # Defaults to `auto`. @@ -119,10 +133,115 @@ module OpenAI # and detect abuse. # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, + # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#edit` for + # streaming and non-streaming use cases, respectively. + stream: false, request_options: {} ) end + # See {OpenAI::Resources::Images#edit} for non-streaming counterpart. + # + # Creates an edited or extended image given one or more source images and a + # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`. + sig do + params( + image: OpenAI::ImageEditParams::Image::Variants, + prompt: String, + background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol), + input_fidelity: + T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol), + mask: OpenAI::Internal::FileInput, + model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), + n: T.nilable(Integer), + output_compression: T.nilable(Integer), + output_format: + T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), + quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol), + response_format: + T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol), + size: T.nilable(OpenAI::ImageEditParams::Size::OrSymbol), + user: String, + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::Stream[OpenAI::ImageEditStreamEvent::Variants] + ) + end + def edit_stream_raw( + # The image(s) to edit. Must be a supported image file or an array of images. + # + # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + # 50MB. You can provide up to 16 images. + # + # For `dall-e-2`, you can only provide one image, and it should be a square `png` + # file less than 4MB. + image:, + # A text description of the desired image(s). The maximum length is 1000 + # characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + prompt:, + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. + # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + background: nil, + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + input_fidelity: nil, + # An additional image whose fully transparent areas (e.g. where alpha is zero) + # indicate where `image` should be edited. If there are multiple images provided, + # the mask will be applied on the first image. Must be a valid PNG file, less than + # 4MB, and have the same dimensions as `image`. + mask: nil, + # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + # is used. + model: nil, + # The number of images to generate. Must be between 1 and 10. + n: nil, + # The compression level (0-100%) for the generated images. This parameter is only + # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + # defaults to 100. + output_compression: nil, + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + # default value is `png`. + output_format: nil, + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + partial_images: nil, + # The quality of the image that will be generated. `high`, `medium` and `low` are + # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + # Defaults to `auto`. + quality: nil, + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + # will always return base64-encoded images. + response_format: nil, + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + size: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#edit` for + # streaming and non-streaming use cases, respectively. + stream: true, + request_options: {} + ) + end + + # See {OpenAI::Resources::Images#stream_raw} for streaming counterpart. + # # Creates an image given a prompt. # [Learn more](https://platform.openai.com/docs/guides/images). sig do @@ -137,12 +256,14 @@ module OpenAI output_compression: T.nilable(Integer), output_format: T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol), response_format: T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol), size: T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol), style: T.nilable(OpenAI::ImageGenerateParams::Style::OrSymbol), user: String, + stream: T.noreturn, request_options: OpenAI::RequestOptions::OrHash ).returns(OpenAI::ImagesResponse) end @@ -176,6 +297,107 @@ module OpenAI # The format in which the generated images are returned. This parameter is only # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. output_format: nil, + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + partial_images: nil, + # The quality of the image that will be generated. + # + # - `auto` (default value) will automatically select the best quality for the + # given model. + # - `high`, `medium` and `low` are supported for `gpt-image-1`. + # - `hd` and `standard` are supported for `dall-e-3`. + # - `standard` is the only option for `dall-e-2`. + quality: nil, + # The format in which generated images with `dall-e-2` and `dall-e-3` are + # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + # after the image has been generated. This parameter isn't supported for + # `gpt-image-1` which will always return base64-encoded images. + response_format: nil, + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + size: nil, + # The style of the generated images. This parameter is only supported for + # `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + # towards generating hyper-real and dramatic images. Natural causes the model to + # produce more natural, less hyper-real looking images. + style: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#generate` + # for streaming and non-streaming use cases, respectively. + stream: false, + request_options: {} + ) + end + + # See {OpenAI::Resources::Images#generate} for non-streaming counterpart. + # + # Creates an image given a prompt. + # [Learn more](https://platform.openai.com/docs/guides/images). + sig do + params( + prompt: String, + background: + T.nilable(OpenAI::ImageGenerateParams::Background::OrSymbol), + model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), + moderation: + T.nilable(OpenAI::ImageGenerateParams::Moderation::OrSymbol), + n: T.nilable(Integer), + output_compression: T.nilable(Integer), + output_format: + T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), + quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol), + response_format: + T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol), + size: T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol), + style: T.nilable(OpenAI::ImageGenerateParams::Style::OrSymbol), + user: String, + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::Stream[OpenAI::ImageGenStreamEvent::Variants] + ) + end + def generate_stream_raw( + # A text description of the desired image(s). The maximum length is 32000 + # characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + # for `dall-e-3`. + prompt:, + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. + # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + background: nil, + # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + # `gpt-image-1` is used. + model: nil, + # Control the content-moderation level for images generated by `gpt-image-1`. Must + # be either `low` for less restrictive filtering or `auto` (default value). + moderation: nil, + # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # `n=1` is supported. + n: nil, + # The compression level (0-100%) for the generated images. This parameter is only + # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + # defaults to 100. + output_compression: nil, + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + output_format: nil, + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + partial_images: nil, # The quality of the image that will be generated. # # - `auto` (default value) will automatically select the best quality for the @@ -203,6 +425,9 @@ module OpenAI # and detect abuse. # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). user: nil, + # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#generate` + # for streaming and non-streaming use cases, respectively. + stream: true, request_options: {} ) end diff --git a/sig/openai/models.rbs b/sig/openai/models.rbs index 997e6f93..1c5e1e9d 100644 --- a/sig/openai/models.rbs +++ b/sig/openai/models.rbs @@ -111,10 +111,22 @@ module OpenAI class ImageCreateVariationParams = OpenAI::Models::ImageCreateVariationParams + class ImageEditCompletedEvent = OpenAI::Models::ImageEditCompletedEvent + class ImageEditParams = OpenAI::Models::ImageEditParams + class ImageEditPartialImageEvent = OpenAI::Models::ImageEditPartialImageEvent + + module ImageEditStreamEvent = OpenAI::Models::ImageEditStreamEvent + + class ImageGenCompletedEvent = OpenAI::Models::ImageGenCompletedEvent + class ImageGenerateParams = OpenAI::Models::ImageGenerateParams + class ImageGenPartialImageEvent = OpenAI::Models::ImageGenPartialImageEvent + + module ImageGenStreamEvent = OpenAI::Models::ImageGenStreamEvent + module ImageModel = OpenAI::Models::ImageModel class ImagesResponse = OpenAI::Models::ImagesResponse diff --git a/sig/openai/models/image_edit_completed_event.rbs b/sig/openai/models/image_edit_completed_event.rbs new file mode 100644 index 00000000..e21a10d8 --- /dev/null +++ b/sig/openai/models/image_edit_completed_event.rbs @@ -0,0 +1,150 @@ +module OpenAI + module Models + type image_edit_completed_event = + { + :b64_json => String, + background: OpenAI::Models::ImageEditCompletedEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageEditCompletedEvent::output_format, + quality: OpenAI::Models::ImageEditCompletedEvent::quality, + size: OpenAI::Models::ImageEditCompletedEvent::size, + type: :"image_edit.completed", + usage: OpenAI::ImageEditCompletedEvent::Usage + } + + class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor b64_json: String + + attr_accessor background: OpenAI::Models::ImageEditCompletedEvent::background + + attr_accessor created_at: Integer + + attr_accessor output_format: OpenAI::Models::ImageEditCompletedEvent::output_format + + attr_accessor quality: OpenAI::Models::ImageEditCompletedEvent::quality + + attr_accessor size: OpenAI::Models::ImageEditCompletedEvent::size + + attr_accessor type: :"image_edit.completed" + + attr_accessor usage: OpenAI::ImageEditCompletedEvent::Usage + + def initialize: ( + b64_json: String, + background: OpenAI::Models::ImageEditCompletedEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageEditCompletedEvent::output_format, + quality: OpenAI::Models::ImageEditCompletedEvent::quality, + size: OpenAI::Models::ImageEditCompletedEvent::size, + usage: OpenAI::ImageEditCompletedEvent::Usage, + ?type: :"image_edit.completed" + ) -> void + + def to_hash: -> { + :b64_json => String, + background: OpenAI::Models::ImageEditCompletedEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageEditCompletedEvent::output_format, + quality: OpenAI::Models::ImageEditCompletedEvent::quality, + size: OpenAI::Models::ImageEditCompletedEvent::size, + type: :"image_edit.completed", + usage: OpenAI::ImageEditCompletedEvent::Usage + } + + type background = :transparent | :opaque | :auto + + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT: :transparent + OPAQUE: :opaque + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::background] + end + + type output_format = :png | :webp | :jpeg + + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG: :png + WEBP: :webp + JPEG: :jpeg + + def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::output_format] + end + + type quality = :low | :medium | :high | :auto + + module Quality + extend OpenAI::Internal::Type::Enum + + LOW: :low + MEDIUM: :medium + HIGH: :high + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::quality] + end + + type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto + + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024: :"1024x1024" + SIZE_1024X1536: :"1024x1536" + SIZE_1536X1024: :"1536x1024" + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::size] + end + + type usage = + { + input_tokens: Integer, + input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + + class Usage < OpenAI::Internal::Type::BaseModel + attr_accessor input_tokens: Integer + + attr_accessor input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails + + attr_accessor output_tokens: Integer + + attr_accessor total_tokens: Integer + + def initialize: ( + input_tokens: Integer, + input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + ) -> void + + def to_hash: -> { + input_tokens: Integer, + input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + + type input_tokens_details = + { image_tokens: Integer, text_tokens: Integer } + + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + attr_accessor image_tokens: Integer + + attr_accessor text_tokens: Integer + + def initialize: (image_tokens: Integer, text_tokens: Integer) -> void + + def to_hash: -> { image_tokens: Integer, text_tokens: Integer } + end + end + end + end +end diff --git a/sig/openai/models/image_edit_params.rbs b/sig/openai/models/image_edit_params.rbs index ca2820e9..66f4812f 100644 --- a/sig/openai/models/image_edit_params.rbs +++ b/sig/openai/models/image_edit_params.rbs @@ -5,11 +5,13 @@ module OpenAI image: OpenAI::Models::ImageEditParams::image, prompt: String, background: OpenAI::Models::ImageEditParams::background?, + input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?, mask: OpenAI::Internal::file_input, model: OpenAI::Models::ImageEditParams::model?, n: Integer?, output_compression: Integer?, output_format: OpenAI::Models::ImageEditParams::output_format?, + partial_images: Integer?, quality: OpenAI::Models::ImageEditParams::quality?, response_format: OpenAI::Models::ImageEditParams::response_format?, size: OpenAI::Models::ImageEditParams::size?, @@ -27,6 +29,8 @@ module OpenAI attr_accessor background: OpenAI::Models::ImageEditParams::background? + attr_accessor input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity? + attr_reader mask: OpenAI::Internal::file_input? def mask=: (OpenAI::Internal::file_input) -> OpenAI::Internal::file_input @@ -39,6 +43,8 @@ module OpenAI attr_accessor output_format: OpenAI::Models::ImageEditParams::output_format? + attr_accessor partial_images: Integer? + attr_accessor quality: OpenAI::Models::ImageEditParams::quality? attr_accessor response_format: OpenAI::Models::ImageEditParams::response_format? @@ -53,11 +59,13 @@ module OpenAI image: OpenAI::Models::ImageEditParams::image, prompt: String, ?background: OpenAI::Models::ImageEditParams::background?, + ?input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?, ?mask: OpenAI::Internal::file_input, ?model: OpenAI::Models::ImageEditParams::model?, ?n: Integer?, ?output_compression: Integer?, ?output_format: OpenAI::Models::ImageEditParams::output_format?, + ?partial_images: Integer?, ?quality: OpenAI::Models::ImageEditParams::quality?, ?response_format: OpenAI::Models::ImageEditParams::response_format?, ?size: OpenAI::Models::ImageEditParams::size?, @@ -69,11 +77,13 @@ module OpenAI image: OpenAI::Models::ImageEditParams::image, prompt: String, background: OpenAI::Models::ImageEditParams::background?, + input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?, mask: OpenAI::Internal::file_input, model: OpenAI::Models::ImageEditParams::model?, n: Integer?, output_compression: Integer?, output_format: OpenAI::Models::ImageEditParams::output_format?, + partial_images: Integer?, quality: OpenAI::Models::ImageEditParams::quality?, response_format: OpenAI::Models::ImageEditParams::response_format?, size: OpenAI::Models::ImageEditParams::size?, @@ -104,6 +114,17 @@ module OpenAI def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::background] end + type input_fidelity = :high | :low + + module InputFidelity + extend OpenAI::Internal::Type::Enum + + HIGH: :high + LOW: :low + + def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::input_fidelity] + end + type model = String | OpenAI::Models::image_model module Model diff --git a/sig/openai/models/image_edit_partial_image_event.rbs b/sig/openai/models/image_edit_partial_image_event.rbs new file mode 100644 index 00000000..1a96d108 --- /dev/null +++ b/sig/openai/models/image_edit_partial_image_event.rbs @@ -0,0 +1,105 @@ +module OpenAI + module Models + type image_edit_partial_image_event = + { + :b64_json => String, + background: OpenAI::Models::ImageEditPartialImageEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageEditPartialImageEvent::output_format, + partial_image_index: Integer, + quality: OpenAI::Models::ImageEditPartialImageEvent::quality, + size: OpenAI::Models::ImageEditPartialImageEvent::size, + type: :"image_edit.partial_image" + } + + class ImageEditPartialImageEvent < OpenAI::Internal::Type::BaseModel + attr_accessor b64_json: String + + attr_accessor background: OpenAI::Models::ImageEditPartialImageEvent::background + + attr_accessor created_at: Integer + + attr_accessor output_format: OpenAI::Models::ImageEditPartialImageEvent::output_format + + attr_accessor partial_image_index: Integer + + attr_accessor quality: OpenAI::Models::ImageEditPartialImageEvent::quality + + attr_accessor size: OpenAI::Models::ImageEditPartialImageEvent::size + + attr_accessor type: :"image_edit.partial_image" + + def initialize: ( + b64_json: String, + background: OpenAI::Models::ImageEditPartialImageEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageEditPartialImageEvent::output_format, + partial_image_index: Integer, + quality: OpenAI::Models::ImageEditPartialImageEvent::quality, + size: OpenAI::Models::ImageEditPartialImageEvent::size, + ?type: :"image_edit.partial_image" + ) -> void + + def to_hash: -> { + :b64_json => String, + background: OpenAI::Models::ImageEditPartialImageEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageEditPartialImageEvent::output_format, + partial_image_index: Integer, + quality: OpenAI::Models::ImageEditPartialImageEvent::quality, + size: OpenAI::Models::ImageEditPartialImageEvent::size, + type: :"image_edit.partial_image" + } + + type background = :transparent | :opaque | :auto + + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT: :transparent + OPAQUE: :opaque + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditPartialImageEvent::background] + end + + type output_format = :png | :webp | :jpeg + + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG: :png + WEBP: :webp + JPEG: :jpeg + + def self?.values: -> ::Array[OpenAI::Models::ImageEditPartialImageEvent::output_format] + end + + type quality = :low | :medium | :high | :auto + + module Quality + extend OpenAI::Internal::Type::Enum + + LOW: :low + MEDIUM: :medium + HIGH: :high + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditPartialImageEvent::quality] + end + + type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto + + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024: :"1024x1024" + SIZE_1024X1536: :"1024x1536" + SIZE_1536X1024: :"1536x1024" + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditPartialImageEvent::size] + end + end + end +end diff --git a/sig/openai/models/image_edit_stream_event.rbs b/sig/openai/models/image_edit_stream_event.rbs new file mode 100644 index 00000000..0b0b65ce --- /dev/null +++ b/sig/openai/models/image_edit_stream_event.rbs @@ -0,0 +1,12 @@ +module OpenAI + module Models + type image_edit_stream_event = + OpenAI::ImageEditPartialImageEvent | OpenAI::ImageEditCompletedEvent + + module ImageEditStreamEvent + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::image_edit_stream_event] + end + end +end diff --git a/sig/openai/models/image_gen_completed_event.rbs b/sig/openai/models/image_gen_completed_event.rbs new file mode 100644 index 00000000..c47de644 --- /dev/null +++ b/sig/openai/models/image_gen_completed_event.rbs @@ -0,0 +1,150 @@ +module OpenAI + module Models + type image_gen_completed_event = + { + :b64_json => String, + background: OpenAI::Models::ImageGenCompletedEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageGenCompletedEvent::output_format, + quality: OpenAI::Models::ImageGenCompletedEvent::quality, + size: OpenAI::Models::ImageGenCompletedEvent::size, + type: :"image_generation.completed", + usage: OpenAI::ImageGenCompletedEvent::Usage + } + + class ImageGenCompletedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor b64_json: String + + attr_accessor background: OpenAI::Models::ImageGenCompletedEvent::background + + attr_accessor created_at: Integer + + attr_accessor output_format: OpenAI::Models::ImageGenCompletedEvent::output_format + + attr_accessor quality: OpenAI::Models::ImageGenCompletedEvent::quality + + attr_accessor size: OpenAI::Models::ImageGenCompletedEvent::size + + attr_accessor type: :"image_generation.completed" + + attr_accessor usage: OpenAI::ImageGenCompletedEvent::Usage + + def initialize: ( + b64_json: String, + background: OpenAI::Models::ImageGenCompletedEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageGenCompletedEvent::output_format, + quality: OpenAI::Models::ImageGenCompletedEvent::quality, + size: OpenAI::Models::ImageGenCompletedEvent::size, + usage: OpenAI::ImageGenCompletedEvent::Usage, + ?type: :"image_generation.completed" + ) -> void + + def to_hash: -> { + :b64_json => String, + background: OpenAI::Models::ImageGenCompletedEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageGenCompletedEvent::output_format, + quality: OpenAI::Models::ImageGenCompletedEvent::quality, + size: OpenAI::Models::ImageGenCompletedEvent::size, + type: :"image_generation.completed", + usage: OpenAI::ImageGenCompletedEvent::Usage + } + + type background = :transparent | :opaque | :auto + + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT: :transparent + OPAQUE: :opaque + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageGenCompletedEvent::background] + end + + type output_format = :png | :webp | :jpeg + + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG: :png + WEBP: :webp + JPEG: :jpeg + + def self?.values: -> ::Array[OpenAI::Models::ImageGenCompletedEvent::output_format] + end + + type quality = :low | :medium | :high | :auto + + module Quality + extend OpenAI::Internal::Type::Enum + + LOW: :low + MEDIUM: :medium + HIGH: :high + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageGenCompletedEvent::quality] + end + + type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto + + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024: :"1024x1024" + SIZE_1024X1536: :"1024x1536" + SIZE_1536X1024: :"1536x1024" + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageGenCompletedEvent::size] + end + + type usage = + { + input_tokens: Integer, + input_tokens_details: OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + + class Usage < OpenAI::Internal::Type::BaseModel + attr_accessor input_tokens: Integer + + attr_accessor input_tokens_details: OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails + + attr_accessor output_tokens: Integer + + attr_accessor total_tokens: Integer + + def initialize: ( + input_tokens: Integer, + input_tokens_details: OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + ) -> void + + def to_hash: -> { + input_tokens: Integer, + input_tokens_details: OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + + type input_tokens_details = + { image_tokens: Integer, text_tokens: Integer } + + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + attr_accessor image_tokens: Integer + + attr_accessor text_tokens: Integer + + def initialize: (image_tokens: Integer, text_tokens: Integer) -> void + + def to_hash: -> { image_tokens: Integer, text_tokens: Integer } + end + end + end + end +end diff --git a/sig/openai/models/image_gen_partial_image_event.rbs b/sig/openai/models/image_gen_partial_image_event.rbs new file mode 100644 index 00000000..bffb443d --- /dev/null +++ b/sig/openai/models/image_gen_partial_image_event.rbs @@ -0,0 +1,105 @@ +module OpenAI + module Models + type image_gen_partial_image_event = + { + :b64_json => String, + background: OpenAI::Models::ImageGenPartialImageEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageGenPartialImageEvent::output_format, + partial_image_index: Integer, + quality: OpenAI::Models::ImageGenPartialImageEvent::quality, + size: OpenAI::Models::ImageGenPartialImageEvent::size, + type: :"image_generation.partial_image" + } + + class ImageGenPartialImageEvent < OpenAI::Internal::Type::BaseModel + attr_accessor b64_json: String + + attr_accessor background: OpenAI::Models::ImageGenPartialImageEvent::background + + attr_accessor created_at: Integer + + attr_accessor output_format: OpenAI::Models::ImageGenPartialImageEvent::output_format + + attr_accessor partial_image_index: Integer + + attr_accessor quality: OpenAI::Models::ImageGenPartialImageEvent::quality + + attr_accessor size: OpenAI::Models::ImageGenPartialImageEvent::size + + attr_accessor type: :"image_generation.partial_image" + + def initialize: ( + b64_json: String, + background: OpenAI::Models::ImageGenPartialImageEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageGenPartialImageEvent::output_format, + partial_image_index: Integer, + quality: OpenAI::Models::ImageGenPartialImageEvent::quality, + size: OpenAI::Models::ImageGenPartialImageEvent::size, + ?type: :"image_generation.partial_image" + ) -> void + + def to_hash: -> { + :b64_json => String, + background: OpenAI::Models::ImageGenPartialImageEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageGenPartialImageEvent::output_format, + partial_image_index: Integer, + quality: OpenAI::Models::ImageGenPartialImageEvent::quality, + size: OpenAI::Models::ImageGenPartialImageEvent::size, + type: :"image_generation.partial_image" + } + + type background = :transparent | :opaque | :auto + + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT: :transparent + OPAQUE: :opaque + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageGenPartialImageEvent::background] + end + + type output_format = :png | :webp | :jpeg + + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG: :png + WEBP: :webp + JPEG: :jpeg + + def self?.values: -> ::Array[OpenAI::Models::ImageGenPartialImageEvent::output_format] + end + + type quality = :low | :medium | :high | :auto + + module Quality + extend OpenAI::Internal::Type::Enum + + LOW: :low + MEDIUM: :medium + HIGH: :high + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageGenPartialImageEvent::quality] + end + + type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto + + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024: :"1024x1024" + SIZE_1024X1536: :"1024x1536" + SIZE_1536X1024: :"1536x1024" + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageGenPartialImageEvent::size] + end + end + end +end diff --git a/sig/openai/models/image_gen_stream_event.rbs b/sig/openai/models/image_gen_stream_event.rbs new file mode 100644 index 00000000..b1489c24 --- /dev/null +++ b/sig/openai/models/image_gen_stream_event.rbs @@ -0,0 +1,12 @@ +module OpenAI + module Models + type image_gen_stream_event = + OpenAI::ImageGenPartialImageEvent | OpenAI::ImageGenCompletedEvent + + module ImageGenStreamEvent + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::image_gen_stream_event] + end + end +end diff --git a/sig/openai/models/image_generate_params.rbs b/sig/openai/models/image_generate_params.rbs index e9b0e2e0..e870a248 100644 --- a/sig/openai/models/image_generate_params.rbs +++ b/sig/openai/models/image_generate_params.rbs @@ -9,6 +9,7 @@ module OpenAI n: Integer?, output_compression: Integer?, output_format: OpenAI::Models::ImageGenerateParams::output_format?, + partial_images: Integer?, quality: OpenAI::Models::ImageGenerateParams::quality?, response_format: OpenAI::Models::ImageGenerateParams::response_format?, size: OpenAI::Models::ImageGenerateParams::size?, @@ -35,6 +36,8 @@ module OpenAI attr_accessor output_format: OpenAI::Models::ImageGenerateParams::output_format? + attr_accessor partial_images: Integer? + attr_accessor quality: OpenAI::Models::ImageGenerateParams::quality? attr_accessor response_format: OpenAI::Models::ImageGenerateParams::response_format? @@ -55,6 +58,7 @@ module OpenAI ?n: Integer?, ?output_compression: Integer?, ?output_format: OpenAI::Models::ImageGenerateParams::output_format?, + ?partial_images: Integer?, ?quality: OpenAI::Models::ImageGenerateParams::quality?, ?response_format: OpenAI::Models::ImageGenerateParams::response_format?, ?size: OpenAI::Models::ImageGenerateParams::size?, @@ -71,6 +75,7 @@ module OpenAI n: Integer?, output_compression: Integer?, output_format: OpenAI::Models::ImageGenerateParams::output_format?, + partial_images: Integer?, quality: OpenAI::Models::ImageGenerateParams::quality?, response_format: OpenAI::Models::ImageGenerateParams::response_format?, size: OpenAI::Models::ImageGenerateParams::size?, diff --git a/sig/openai/models/responses/tool.rbs b/sig/openai/models/responses/tool.rbs index 87e9506b..fffb117b 100644 --- a/sig/openai/models/responses/tool.rbs +++ b/sig/openai/models/responses/tool.rbs @@ -213,6 +213,7 @@ module OpenAI { type: :image_generation, background: OpenAI::Models::Responses::Tool::ImageGeneration::background, + input_fidelity: OpenAI::Models::Responses::Tool::ImageGeneration::input_fidelity?, input_image_mask: OpenAI::Responses::Tool::ImageGeneration::InputImageMask, model: OpenAI::Models::Responses::Tool::ImageGeneration::model, moderation: OpenAI::Models::Responses::Tool::ImageGeneration::moderation, @@ -232,6 +233,8 @@ module OpenAI OpenAI::Models::Responses::Tool::ImageGeneration::background ) -> OpenAI::Models::Responses::Tool::ImageGeneration::background + attr_accessor input_fidelity: OpenAI::Models::Responses::Tool::ImageGeneration::input_fidelity? + attr_reader input_image_mask: OpenAI::Responses::Tool::ImageGeneration::InputImageMask? def input_image_mask=: ( @@ -278,6 +281,7 @@ module OpenAI def initialize: ( ?background: OpenAI::Models::Responses::Tool::ImageGeneration::background, + ?input_fidelity: OpenAI::Models::Responses::Tool::ImageGeneration::input_fidelity?, ?input_image_mask: OpenAI::Responses::Tool::ImageGeneration::InputImageMask, ?model: OpenAI::Models::Responses::Tool::ImageGeneration::model, ?moderation: OpenAI::Models::Responses::Tool::ImageGeneration::moderation, @@ -292,6 +296,7 @@ module OpenAI def to_hash: -> { type: :image_generation, background: OpenAI::Models::Responses::Tool::ImageGeneration::background, + input_fidelity: OpenAI::Models::Responses::Tool::ImageGeneration::input_fidelity?, input_image_mask: OpenAI::Responses::Tool::ImageGeneration::InputImageMask, model: OpenAI::Models::Responses::Tool::ImageGeneration::model, moderation: OpenAI::Models::Responses::Tool::ImageGeneration::moderation, @@ -314,6 +319,17 @@ module OpenAI def self?.values: -> ::Array[OpenAI::Models::Responses::Tool::ImageGeneration::background] end + type input_fidelity = :high | :low + + module InputFidelity + extend OpenAI::Internal::Type::Enum + + HIGH: :high + LOW: :low + + def self?.values: -> ::Array[OpenAI::Models::Responses::Tool::ImageGeneration::input_fidelity] + end + type input_image_mask = { file_id: String, image_url: String } class InputImageMask < OpenAI::Internal::Type::BaseModel diff --git a/sig/openai/resources/images.rbs b/sig/openai/resources/images.rbs index 43595baa..bd5dfbcf 100644 --- a/sig/openai/resources/images.rbs +++ b/sig/openai/resources/images.rbs @@ -15,11 +15,13 @@ module OpenAI image: OpenAI::Models::ImageEditParams::image, prompt: String, ?background: OpenAI::Models::ImageEditParams::background?, + ?input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?, ?mask: OpenAI::Internal::file_input, ?model: OpenAI::Models::ImageEditParams::model?, ?n: Integer?, ?output_compression: Integer?, ?output_format: OpenAI::Models::ImageEditParams::output_format?, + ?partial_images: Integer?, ?quality: OpenAI::Models::ImageEditParams::quality?, ?response_format: OpenAI::Models::ImageEditParams::response_format?, ?size: OpenAI::Models::ImageEditParams::size?, @@ -27,6 +29,24 @@ module OpenAI ?request_options: OpenAI::request_opts ) -> OpenAI::ImagesResponse + def edit_stream_raw: ( + image: OpenAI::Models::ImageEditParams::image, + prompt: String, + ?background: OpenAI::Models::ImageEditParams::background?, + ?input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?, + ?mask: OpenAI::Internal::file_input, + ?model: OpenAI::Models::ImageEditParams::model?, + ?n: Integer?, + ?output_compression: Integer?, + ?output_format: OpenAI::Models::ImageEditParams::output_format?, + ?partial_images: Integer?, + ?quality: OpenAI::Models::ImageEditParams::quality?, + ?response_format: OpenAI::Models::ImageEditParams::response_format?, + ?size: OpenAI::Models::ImageEditParams::size?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Stream[OpenAI::Models::image_edit_stream_event] + def generate: ( prompt: String, ?background: OpenAI::Models::ImageGenerateParams::background?, @@ -35,6 +55,7 @@ module OpenAI ?n: Integer?, ?output_compression: Integer?, ?output_format: OpenAI::Models::ImageGenerateParams::output_format?, + ?partial_images: Integer?, ?quality: OpenAI::Models::ImageGenerateParams::quality?, ?response_format: OpenAI::Models::ImageGenerateParams::response_format?, ?size: OpenAI::Models::ImageGenerateParams::size?, @@ -43,6 +64,23 @@ module OpenAI ?request_options: OpenAI::request_opts ) -> OpenAI::ImagesResponse + def generate_stream_raw: ( + prompt: String, + ?background: OpenAI::Models::ImageGenerateParams::background?, + ?model: OpenAI::Models::ImageGenerateParams::model?, + ?moderation: OpenAI::Models::ImageGenerateParams::moderation?, + ?n: Integer?, + ?output_compression: Integer?, + ?output_format: OpenAI::Models::ImageGenerateParams::output_format?, + ?partial_images: Integer?, + ?quality: OpenAI::Models::ImageGenerateParams::quality?, + ?response_format: OpenAI::Models::ImageGenerateParams::response_format?, + ?size: OpenAI::Models::ImageGenerateParams::size?, + ?style: OpenAI::Models::ImageGenerateParams::style?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Stream[OpenAI::Models::image_gen_stream_event] + def initialize: (client: OpenAI::Client) -> void end end