diff --git a/.release-please-manifest.json b/.release-please-manifest.json index c9da8cc1..66788158 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.23.2" + ".": "0.23.3" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index e3897189..905a02c4 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 118 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-94b1e3cb0bdc616ff0c2f267c33dadd95f133b1f64e647aab6c64afb292b2793.yml -openapi_spec_hash: 2395319ac9befd59b6536ae7f9564a05 -config_hash: 930dac3aa861344867e4ac84f037b5df +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d30ff992a48873c1466c49f3c01f2ec8933faebff23424748f8d056065b1bcef.yml +openapi_spec_hash: e933ec43b46f45c348adb78840e5808d +config_hash: bf45940f0a7805b4ec2017eecdd36893 diff --git a/CHANGELOG.md b/CHANGELOG.md index 6df92bf4..7d858f1c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 0.23.3 (2025-09-15) + +Full Changelog: [v0.23.2...v0.23.3](https://github.com/openai/openai-ruby/compare/v0.23.2...v0.23.3) + +### Chores + +* **api:** docs and spec refactoring ([81ccb86](https://github.com/openai/openai-ruby/commit/81ccb86c346e51a2b5d532a5997358aa86977572)) + ## 0.23.2 (2025-09-11) Full Changelog: [v0.23.1...v0.23.2](https://github.com/openai/openai-ruby/compare/v0.23.1...v0.23.2) diff --git a/Gemfile.lock b/Gemfile.lock index 04b60951..5da34d73 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -11,7 +11,7 @@ GIT PATH remote: . specs: - openai (0.23.2) + openai (0.23.3) connection_pool GEM diff --git a/README.md b/README.md index 9a262951..916935e8 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application ```ruby -gem "openai", "~> 0.23.2" +gem "openai", "~> 0.23.3" ``` diff --git a/lib/openai/models/chat/completion_list_params.rb b/lib/openai/models/chat/completion_list_params.rb index 0e18202c..62c0b42f 100644 --- a/lib/openai/models/chat/completion_list_params.rb +++ b/lib/openai/models/chat/completion_list_params.rb @@ -21,9 +21,12 @@ class CompletionListParams < OpenAI::Internal::Type::BaseModel optional :limit, Integer # @!attribute metadata - # A list of metadata keys to filter the Chat Completions by. Example: + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # `metadata[key1]=value1&metadata[key2]=value2` + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -49,7 +52,7 @@ class CompletionListParams < OpenAI::Internal::Type::BaseModel # # @param limit [Integer] Number of Chat Completions to retrieve. # - # @param metadata [Hash{Symbol=>String}, nil] A list of metadata keys to filter the Chat Completions by. Example: + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # # @param model [String] The model used to generate the Chat Completions. # diff --git a/lib/openai/models/conversations/conversation_create_params.rb b/lib/openai/models/conversations/conversation_create_params.rb index 58cc4ba9..a56542d5 100644 --- a/lib/openai/models/conversations/conversation_create_params.rb +++ b/lib/openai/models/conversations/conversation_create_params.rb @@ -18,8 +18,12 @@ class ConversationCreateParams < OpenAI::Internal::Type::BaseModel nil?: true # @!attribute metadata - # Set of 16 key-value pairs that can be attached to an object. Useful for storing - # additional information about the object in a structured format. + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true @@ -30,7 +34,7 @@ class ConversationCreateParams < OpenAI::Internal::Type::BaseModel # # @param items [Array, nil] Initial items to include in the conversation context. # - # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. Useful for + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end diff --git a/lib/openai/models/evals/run_cancel_response.rb b/lib/openai/models/evals/run_cancel_response.rb index 83d62131..4c97c294 100644 --- a/lib/openai/models/evals/run_cancel_response.rb +++ b/lib/openai/models/evals/run_cancel_response.rb @@ -314,8 +314,11 @@ class Responses < OpenAI::Internal::Type::BaseModel optional :model, String, nil?: true # @!attribute reasoning_effort - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -361,7 +364,7 @@ class Responses < OpenAI::Internal::Type::BaseModel # # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses. # diff --git a/lib/openai/models/evals/run_create_params.rb b/lib/openai/models/evals/run_create_params.rb index 90cc0be5..04477bef 100644 --- a/lib/openai/models/evals/run_create_params.rb +++ b/lib/openai/models/evals/run_create_params.rb @@ -226,8 +226,11 @@ class Responses < OpenAI::Internal::Type::BaseModel optional :model, String, nil?: true # @!attribute reasoning_effort - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -273,7 +276,7 @@ class Responses < OpenAI::Internal::Type::BaseModel # # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses. # diff --git a/lib/openai/models/evals/run_create_response.rb b/lib/openai/models/evals/run_create_response.rb index a12f2f48..9e7dce76 100644 --- a/lib/openai/models/evals/run_create_response.rb +++ b/lib/openai/models/evals/run_create_response.rb @@ -314,8 +314,11 @@ class Responses < OpenAI::Internal::Type::BaseModel optional :model, String, nil?: true # @!attribute reasoning_effort - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -361,7 +364,7 @@ class Responses < OpenAI::Internal::Type::BaseModel # # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses. # diff --git a/lib/openai/models/evals/run_list_response.rb b/lib/openai/models/evals/run_list_response.rb index 869f0049..a7082381 100644 --- a/lib/openai/models/evals/run_list_response.rb +++ b/lib/openai/models/evals/run_list_response.rb @@ -314,8 +314,11 @@ class Responses < OpenAI::Internal::Type::BaseModel optional :model, String, nil?: true # @!attribute reasoning_effort - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -361,7 +364,7 @@ class Responses < OpenAI::Internal::Type::BaseModel # # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses. # diff --git a/lib/openai/models/evals/run_retrieve_response.rb b/lib/openai/models/evals/run_retrieve_response.rb index 411a3668..f55b5ef0 100644 --- a/lib/openai/models/evals/run_retrieve_response.rb +++ b/lib/openai/models/evals/run_retrieve_response.rb @@ -314,8 +314,11 @@ class Responses < OpenAI::Internal::Type::BaseModel optional :model, String, nil?: true # @!attribute reasoning_effort - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -361,7 +364,7 @@ class Responses < OpenAI::Internal::Type::BaseModel # # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses. # diff --git a/lib/openai/resources/chat/completions.rb b/lib/openai/resources/chat/completions.rb index a3518b48..0f69e130 100644 --- a/lib/openai/resources/chat/completions.rb +++ b/lib/openai/resources/chat/completions.rb @@ -387,7 +387,7 @@ def update(completion_id, params) # # @param limit [Integer] Number of Chat Completions to retrieve. # - # @param metadata [Hash{Symbol=>String}, nil] A list of metadata keys to filter the Chat Completions by. Example: + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # # @param model [String] The model used to generate the Chat Completions. # diff --git a/lib/openai/resources/conversations.rb b/lib/openai/resources/conversations.rb index c88b2deb..fae70832 100644 --- a/lib/openai/resources/conversations.rb +++ b/lib/openai/resources/conversations.rb @@ -15,7 +15,7 @@ class Conversations # # @param items [Array, nil] Initial items to include in the conversation context. # - # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. Useful for + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # diff --git a/lib/openai/version.rb b/lib/openai/version.rb index fbf5600a..2a63a5d1 100644 --- a/lib/openai/version.rb +++ b/lib/openai/version.rb @@ -1,5 +1,5 @@ # frozen_string_literal: true module OpenAI - VERSION = "0.23.2" + VERSION = "0.23.3" end diff --git a/rbi/openai/models/chat/completion_list_params.rbi b/rbi/openai/models/chat/completion_list_params.rbi index cb92d858..2dd114dd 100644 --- a/rbi/openai/models/chat/completion_list_params.rbi +++ b/rbi/openai/models/chat/completion_list_params.rbi @@ -26,9 +26,12 @@ module OpenAI sig { params(limit: Integer).void } attr_writer :limit - # A list of metadata keys to filter the Chat Completions by. Example: + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # `metadata[key1]=value1&metadata[key2]=value2` + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -70,9 +73,12 @@ module OpenAI after: nil, # Number of Chat Completions to retrieve. limit: nil, - # A list of metadata keys to filter the Chat Completions by. Example: + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # `metadata[key1]=value1&metadata[key2]=value2` + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # The model used to generate the Chat Completions. model: nil, diff --git a/rbi/openai/models/conversations/conversation_create_params.rbi b/rbi/openai/models/conversations/conversation_create_params.rbi index 72463e9b..cf8283d6 100644 --- a/rbi/openai/models/conversations/conversation_create_params.rbi +++ b/rbi/openai/models/conversations/conversation_create_params.rbi @@ -50,8 +50,12 @@ module OpenAI end attr_accessor :items - # Set of 16 key-value pairs that can be attached to an object. Useful for storing - # additional information about the object in a structured format. + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(T::Hash[Symbol, String])) } attr_accessor :metadata @@ -93,8 +97,12 @@ module OpenAI # Initial items to include in the conversation context. You may add up to 20 items # at a time. items: nil, - # Set of 16 key-value pairs that can be attached to an object. Useful for storing - # additional information about the object in a structured format. + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, request_options: {} ) diff --git a/rbi/openai/models/evals/run_cancel_response.rbi b/rbi/openai/models/evals/run_cancel_response.rbi index bcc7465e..cb502fe8 100644 --- a/rbi/openai/models/evals/run_cancel_response.rbi +++ b/rbi/openai/models/evals/run_cancel_response.rbi @@ -510,8 +510,11 @@ module OpenAI sig { returns(T.nilable(String)) } attr_accessor :model - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. sig do returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) end @@ -566,8 +569,11 @@ module OpenAI # The name of the model to find responses for. This is a query parameter used to # select responses. model: nil, - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # Sampling temperature. This is a query parameter used to select responses. temperature: nil, diff --git a/rbi/openai/models/evals/run_create_params.rbi b/rbi/openai/models/evals/run_create_params.rbi index 122c2926..52dead66 100644 --- a/rbi/openai/models/evals/run_create_params.rbi +++ b/rbi/openai/models/evals/run_create_params.rbi @@ -420,8 +420,11 @@ module OpenAI sig { returns(T.nilable(String)) } attr_accessor :model - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort @@ -474,8 +477,11 @@ module OpenAI # The name of the model to find responses for. This is a query parameter used to # select responses. model: nil, - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # Sampling temperature. This is a query parameter used to select responses. temperature: nil, diff --git a/rbi/openai/models/evals/run_create_response.rbi b/rbi/openai/models/evals/run_create_response.rbi index 4b547fc6..1a8def7e 100644 --- a/rbi/openai/models/evals/run_create_response.rbi +++ b/rbi/openai/models/evals/run_create_response.rbi @@ -510,8 +510,11 @@ module OpenAI sig { returns(T.nilable(String)) } attr_accessor :model - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. sig do returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) end @@ -566,8 +569,11 @@ module OpenAI # The name of the model to find responses for. This is a query parameter used to # select responses. model: nil, - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # Sampling temperature. This is a query parameter used to select responses. temperature: nil, diff --git a/rbi/openai/models/evals/run_list_response.rbi b/rbi/openai/models/evals/run_list_response.rbi index d259c146..54400783 100644 --- a/rbi/openai/models/evals/run_list_response.rbi +++ b/rbi/openai/models/evals/run_list_response.rbi @@ -506,8 +506,11 @@ module OpenAI sig { returns(T.nilable(String)) } attr_accessor :model - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. sig do returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) end @@ -562,8 +565,11 @@ module OpenAI # The name of the model to find responses for. This is a query parameter used to # select responses. model: nil, - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # Sampling temperature. This is a query parameter used to select responses. temperature: nil, diff --git a/rbi/openai/models/evals/run_retrieve_response.rbi b/rbi/openai/models/evals/run_retrieve_response.rbi index b5dc012b..c270c1d0 100644 --- a/rbi/openai/models/evals/run_retrieve_response.rbi +++ b/rbi/openai/models/evals/run_retrieve_response.rbi @@ -512,8 +512,11 @@ module OpenAI sig { returns(T.nilable(String)) } attr_accessor :model - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. sig do returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) end @@ -568,8 +571,11 @@ module OpenAI # The name of the model to find responses for. This is a query parameter used to # select responses. model: nil, - # Optional reasoning effort parameter. This is a query parameter used to select - # responses. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # Sampling temperature. This is a query parameter used to select responses. temperature: nil, diff --git a/rbi/openai/resources/chat/completions.rbi b/rbi/openai/resources/chat/completions.rbi index f65a22d6..43ec1239 100644 --- a/rbi/openai/resources/chat/completions.rbi +++ b/rbi/openai/resources/chat/completions.rbi @@ -699,9 +699,12 @@ module OpenAI after: nil, # Number of Chat Completions to retrieve. limit: nil, - # A list of metadata keys to filter the Chat Completions by. Example: + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # `metadata[key1]=value1&metadata[key2]=value2` + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, # The model used to generate the Chat Completions. model: nil, diff --git a/rbi/openai/resources/conversations.rbi b/rbi/openai/resources/conversations.rbi index ac1427e1..917988e1 100644 --- a/rbi/openai/resources/conversations.rbi +++ b/rbi/openai/resources/conversations.rbi @@ -45,8 +45,12 @@ module OpenAI # Initial items to include in the conversation context. You may add up to 20 items # at a time. items: nil, - # Set of 16 key-value pairs that can be attached to an object. Useful for storing - # additional information about the object in a structured format. + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, request_options: {} )