Skip to content

Commit ae30f94

Browse files
chore(api): accurately represent shape for verbosity on Chat Completions
1 parent 66091b6 commit ae30f94

File tree

20 files changed

+98
-731
lines changed

20 files changed

+98
-731
lines changed

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 109
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-24be531010b354303d741fc9247c1f84f75978f9f7de68aca92cb4f240a04722.yml
3-
openapi_spec_hash: 3e46f439f6a863beadc71577eb4efa15
4-
config_hash: ed87b9139ac595a04a2162d754df2fed
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-7ef7a457c3bf05364e66e48c9ca34f31bfef1f6c9b7c15b1812346105e0abb16.yml
3+
openapi_spec_hash: a2b1f5d8fbb62175c93b0ebea9f10063
4+
config_hash: 76afa3236f36854a8705f1281b1990b8

lib/openai/models/chat/completion_create_params.rb

Lines changed: 2 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -272,7 +272,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
272272
# our [model distillation](https://platform.openai.com/docs/guides/distillation)
273273
# or [evals](https://platform.openai.com/docs/guides/evals) products.
274274
#
275-
# Supports text and image inputs. Note: image inputs over 10MB will be dropped.
275+
# Supports text and image inputs. Note: image inputs over 8MB will be dropped.
276276
#
277277
# @return [Boolean, nil]
278278
optional :store, OpenAI::Internal::Type::Boolean, nil?: true
@@ -292,11 +292,6 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
292292
# @return [Float, nil]
293293
optional :temperature, Float, nil?: true
294294

295-
# @!attribute text
296-
#
297-
# @return [OpenAI::Models::Chat::CompletionCreateParams::Text, nil]
298-
optional :text, -> { OpenAI::Chat::CompletionCreateParams::Text }
299-
300295
# @!attribute tool_choice
301296
# Controls which (if any) tool is called by the model. `none` means the model will
302297
# not call any tool and instead generates a message. `auto` means the model can
@@ -370,7 +365,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
370365
# @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, nil]
371366
optional :web_search_options, -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions }
372367

373-
# @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {})
368+
# @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {})
374369
# Some parameter documentations has been truncated, see
375370
# {OpenAI::Models::Chat::CompletionCreateParams} for more details.
376371
#
@@ -426,8 +421,6 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
426421
#
427422
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
428423
#
429-
# @param text [OpenAI::Models::Chat::CompletionCreateParams::Text]
430-
#
431424
# @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom] Controls which (if any) tool is called by the model.
432425
#
433426
# @param tools [Array<OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::Chat::ChatCompletionFunctionTool, OpenAI::Models::Chat::ChatCompletionCustomTool>] A list of tools the model may call. You can provide either
@@ -638,38 +631,6 @@ module Stop
638631
StringArray = OpenAI::Internal::Type::ArrayOf[String]
639632
end
640633

641-
class Text < OpenAI::Internal::Type::BaseModel
642-
# @!attribute verbosity
643-
# Constrains the verbosity of the model's response. Lower values will result in
644-
# more concise responses, while higher values will result in more verbose
645-
# responses. Currently supported values are `low`, `medium`, and `high`.
646-
#
647-
# @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Text::Verbosity, nil]
648-
optional :verbosity, enum: -> { OpenAI::Chat::CompletionCreateParams::Text::Verbosity }, nil?: true
649-
650-
# @!method initialize(verbosity: nil)
651-
# Some parameter documentations has been truncated, see
652-
# {OpenAI::Models::Chat::CompletionCreateParams::Text} for more details.
653-
#
654-
# @param verbosity [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Text::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in
655-
656-
# Constrains the verbosity of the model's response. Lower values will result in
657-
# more concise responses, while higher values will result in more verbose
658-
# responses. Currently supported values are `low`, `medium`, and `high`.
659-
#
660-
# @see OpenAI::Models::Chat::CompletionCreateParams::Text#verbosity
661-
module Verbosity
662-
extend OpenAI::Internal::Type::Enum
663-
664-
LOW = :low
665-
MEDIUM = :medium
666-
HIGH = :high
667-
668-
# @!method self.values
669-
# @return [Array<Symbol>]
670-
end
671-
end
672-
673634
# Constrains the verbosity of the model's response. Lower values will result in
674635
# more concise responses, while higher values will result in more verbose
675636
# responses. Currently supported values are `low`, `medium`, and `high`.

lib/openai/models/graders/text_similarity_grader.rb

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@ module Models
55
module Graders
66
class TextSimilarityGrader < OpenAI::Internal::Type::BaseModel
77
# @!attribute evaluation_metric
8-
# The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`,
9-
# `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
8+
# The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`,
9+
# `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
1010
#
1111
# @return [Symbol, OpenAI::Models::Graders::TextSimilarityGrader::EvaluationMetric]
1212
required :evaluation_metric, enum: -> { OpenAI::Graders::TextSimilarityGrader::EvaluationMetric }
@@ -41,7 +41,7 @@ class TextSimilarityGrader < OpenAI::Internal::Type::BaseModel
4141
#
4242
# A TextSimilarityGrader object which grades text based on similarity metrics.
4343
#
44-
# @param evaluation_metric [Symbol, OpenAI::Models::Graders::TextSimilarityGrader::EvaluationMetric] The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `r
44+
# @param evaluation_metric [Symbol, OpenAI::Models::Graders::TextSimilarityGrader::EvaluationMetric] The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`,
4545
#
4646
# @param input [String] The text being graded.
4747
#
@@ -51,13 +51,14 @@ class TextSimilarityGrader < OpenAI::Internal::Type::BaseModel
5151
#
5252
# @param type [Symbol, :text_similarity] The type of grader.
5353

54-
# The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`,
55-
# `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
54+
# The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`,
55+
# `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
5656
#
5757
# @see OpenAI::Models::Graders::TextSimilarityGrader#evaluation_metric
5858
module EvaluationMetric
5959
extend OpenAI::Internal::Type::Enum
6060

61+
COSINE = :cosine
6162
FUZZY_MATCH = :fuzzy_match
6263
BLEU = :bleu
6364
GLEU = :gleu

lib/openai/models/responses/response.rb

Lines changed: 8 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -229,9 +229,14 @@ class Response < OpenAI::Internal::Type::BaseModel
229229
optional :status, enum: -> { OpenAI::Responses::ResponseStatus }
230230

231231
# @!attribute text
232+
# Configuration options for a text response from the model. Can be plain text or
233+
# structured JSON data. Learn more:
232234
#
233-
# @return [OpenAI::Models::Responses::Response::Text, nil]
234-
optional :text, -> { OpenAI::Responses::Response::Text }
235+
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
236+
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
237+
#
238+
# @return [OpenAI::Models::Responses::ResponseTextConfig, nil]
239+
optional :text, -> { OpenAI::Responses::ResponseTextConfig }
235240

236241
# @!attribute top_logprobs
237242
# An integer between 0 and 20 specifying the number of most likely tokens to
@@ -341,7 +346,7 @@ def output_text
341346
#
342347
# @param status [Symbol, OpenAI::Models::Responses::ResponseStatus] The status of the response generation. One of `completed`, `failed`,
343348
#
344-
# @param text [OpenAI::Models::Responses::Response::Text]
349+
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
345350
#
346351
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
347352
#
@@ -475,59 +480,6 @@ module ServiceTier
475480
# @return [Array<Symbol>]
476481
end
477482

478-
# @see OpenAI::Models::Responses::Response#text
479-
class Text < OpenAI::Internal::Type::BaseModel
480-
# @!attribute format_
481-
# An object specifying the format that the model must output.
482-
#
483-
# Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
484-
# ensures the model will match your supplied JSON schema. Learn more in the
485-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
486-
#
487-
# The default format is `{ "type": "text" }` with no additional options.
488-
#
489-
# **Not recommended for gpt-4o and newer models:**
490-
#
491-
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
492-
# ensures the message the model generates is valid JSON. Using `json_schema` is
493-
# preferred for models that support it.
494-
#
495-
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil]
496-
optional :format_, union: -> { OpenAI::Responses::ResponseFormatTextConfig }, api_name: :format
497-
498-
# @!attribute verbosity
499-
# Constrains the verbosity of the model's response. Lower values will result in
500-
# more concise responses, while higher values will result in more verbose
501-
# responses. Currently supported values are `low`, `medium`, and `high`.
502-
#
503-
# @return [Symbol, OpenAI::Models::Responses::Response::Text::Verbosity, nil]
504-
optional :verbosity, enum: -> { OpenAI::Responses::Response::Text::Verbosity }, nil?: true
505-
506-
# @!method initialize(format_: nil, verbosity: nil)
507-
# Some parameter documentations has been truncated, see
508-
# {OpenAI::Models::Responses::Response::Text} for more details.
509-
#
510-
# @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
511-
#
512-
# @param verbosity [Symbol, OpenAI::Models::Responses::Response::Text::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in
513-
514-
# Constrains the verbosity of the model's response. Lower values will result in
515-
# more concise responses, while higher values will result in more verbose
516-
# responses. Currently supported values are `low`, `medium`, and `high`.
517-
#
518-
# @see OpenAI::Models::Responses::Response::Text#verbosity
519-
module Verbosity
520-
extend OpenAI::Internal::Type::Enum
521-
522-
LOW = :low
523-
MEDIUM = :medium
524-
HIGH = :high
525-
526-
# @!method self.values
527-
# @return [Array<Symbol>]
528-
end
529-
end
530-
531483
# The truncation strategy to use for the model response.
532484
#
533485
# - `auto`: If the context of this response and previous ones exceeds the model's

lib/openai/models/responses/response_create_params.rb

Lines changed: 3 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -193,6 +193,8 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
193193
optional :temperature, Float, nil?: true
194194

195195
# @!attribute text
196+
# Configuration options for a text response from the model. Can be plain text or
197+
# structured JSON data. Learn more:
196198
#
197199
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
198200
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
@@ -316,7 +318,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
316318
#
317319
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
318320
#
319-
# @param text [OpenAI::Models::Responses::ResponseCreateParams::Text]
321+
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
320322
#
321323
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating
322324
#
@@ -407,62 +409,6 @@ class StreamOptions < OpenAI::Internal::Type::BaseModel
407409
# @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds
408410
end
409411

410-
class Text < OpenAI::Internal::Type::BaseModel
411-
# @!attribute format_
412-
# An object specifying the format that the model must output.
413-
#
414-
# Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
415-
# ensures the model will match your supplied JSON schema. Learn more in the
416-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
417-
#
418-
# The default format is `{ "type": "text" }` with no additional options.
419-
#
420-
# **Not recommended for gpt-4o and newer models:**
421-
#
422-
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
423-
# ensures the message the model generates is valid JSON. Using `json_schema` is
424-
# preferred for models that support it.
425-
#
426-
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil]
427-
optional :format_, union: -> { OpenAI::Responses::ResponseFormatTextConfig }, api_name: :format
428-
429-
# @!attribute verbosity
430-
# Constrains the verbosity of the model's response. Lower values will result in
431-
# more concise responses, while higher values will result in more verbose
432-
# responses. Currently supported values are `low`, `medium`, and `high`.
433-
#
434-
# @return [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Text::Verbosity, nil]
435-
optional :verbosity,
436-
enum: -> {
437-
OpenAI::Responses::ResponseCreateParams::Text::Verbosity
438-
},
439-
nil?: true
440-
441-
# @!method initialize(format_: nil, verbosity: nil)
442-
# Some parameter documentations has been truncated, see
443-
# {OpenAI::Models::Responses::ResponseCreateParams::Text} for more details.
444-
#
445-
# @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
446-
#
447-
# @param verbosity [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Text::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in
448-
449-
# Constrains the verbosity of the model's response. Lower values will result in
450-
# more concise responses, while higher values will result in more verbose
451-
# responses. Currently supported values are `low`, `medium`, and `high`.
452-
#
453-
# @see OpenAI::Models::Responses::ResponseCreateParams::Text#verbosity
454-
module Verbosity
455-
extend OpenAI::Internal::Type::Enum
456-
457-
LOW = :low
458-
MEDIUM = :medium
459-
HIGH = :high
460-
461-
# @!method self.values
462-
# @return [Array<Symbol>]
463-
end
464-
end
465-
466412
# How the model should select which tool (or tools) to use when generating a
467413
# response. See the `tools` parameter to see how to specify which tools the model
468414
# can call.

0 commit comments

Comments
 (0)