@@ -24,7 +24,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
2424 -> { OpenAI ::Internal ::Type ::ArrayOf [ union : OpenAI ::Models ::Chat ::ChatCompletionMessageParam ] }
2525
2626 # @!attribute model
27- # Model ID used to generate the response, like `gpt-4o` or `o1 `. OpenAI offers a
27+ # Model ID used to generate the response, like `gpt-4o` or `o3 `. OpenAI offers a
2828 # wide range of models with different capabilities, performance characteristics,
2929 # and price points. Refer to the
3030 # [model guide](https://platform.openai.com/docs/models) to browse and compare
@@ -121,7 +121,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
121121 #
122122 # This value is now deprecated in favor of `max_completion_tokens`, and is not
123123 # compatible with
124- # [o1 series models](https://platform.openai.com/docs/guides/reasoning).
124+ # [o- series models](https://platform.openai.com/docs/guides/reasoning).
125125 #
126126 # @return [Integer, nil]
127127 optional :max_tokens , Integer , nil? : true
@@ -240,6 +240,9 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
240240 # latency guarentee.
241241 # - If set to 'default', the request will be processed using the default service
242242 # tier with a lower uptime SLA and no latency guarentee.
243+ # - If set to 'flex', the request will be processed with the Flex Processing
244+ # service tier.
245+ # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
243246 # - When not set, the default behavior is 'auto'.
244247 #
245248 # When this parameter is set, the response body will include the `service_tier`
@@ -249,6 +252,8 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
249252 optional :service_tier , enum : -> { OpenAI ::Models ::Chat ::CompletionCreateParams ::ServiceTier } , nil? : true
250253
251254 # @!attribute stop
255+ # Not supported with latest reasoning models `o3` and `o4-mini`.
256+ #
252257 # Up to 4 sequences where the API will stop generating further tokens. The
253258 # returned text will not contain the stop sequence.
254259 #
@@ -422,7 +427,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
422427
423428 # def initialize: (Hash | OpenAI::Internal::Type::BaseModel) -> void
424429
425- # Model ID used to generate the response, like `gpt-4o` or `o1 `. OpenAI offers a
430+ # Model ID used to generate the response, like `gpt-4o` or `o3 `. OpenAI offers a
426431 # wide range of models with different capabilities, performance characteristics,
427432 # and price points. Refer to the
428433 # [model guide](https://platform.openai.com/docs/models) to browse and compare
@@ -432,7 +437,7 @@ module Model
432437
433438 variant String
434439
435- # Model ID used to generate the response, like `gpt-4o` or `o1 `. OpenAI
440+ # Model ID used to generate the response, like `gpt-4o` or `o3 `. OpenAI
436441 # offers a wide range of models with different capabilities, performance
437442 # characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
438443 # to browse and compare available models.
@@ -589,6 +594,9 @@ module ResponseFormat
589594 # latency guarentee.
590595 # - If set to 'default', the request will be processed using the default service
591596 # tier with a lower uptime SLA and no latency guarentee.
597+ # - If set to 'flex', the request will be processed with the Flex Processing
598+ # service tier.
599+ # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
592600 # - When not set, the default behavior is 'auto'.
593601 #
594602 # When this parameter is set, the response body will include the `service_tier`
@@ -598,6 +606,7 @@ module ServiceTier
598606
599607 AUTO = :auto
600608 DEFAULT = :default
609+ FLEX = :flex
601610
602611 finalize!
603612
@@ -606,6 +615,8 @@ module ServiceTier
606615 # def self.values; end
607616 end
608617
618+ # Not supported with latest reasoning models `o3` and `o4-mini`.
619+ #
609620 # Up to 4 sequences where the API will stop generating further tokens. The
610621 # returned text will not contain the stop sequence.
611622 module Stop
0 commit comments