Skip to content

Commit 15e5f60

Browse files
fix(client): fix verbosity parameter location in Responses
fixes error with unsupported `verbosity` parameter by correctly placing it inside the `text` parameter
1 parent f8c5dbf commit 15e5f60

File tree

14 files changed

+126
-240
lines changed

14 files changed

+126
-240
lines changed

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 109
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-56d3a72a5caa187aebcf9de169a6a28a9dc3f70a79d7467a03a9e22595936066.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6a1bfd4738fff02ef5becc3fdb2bf0cd6c026f2c924d4147a2a515474477dd9a.yml
33
openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba
4-
config_hash: 7e18239879286d68a48ac5487a649aa6
4+
config_hash: a67c5e195a59855fe8a5db0dc61a3e7f

lib/openai/models/responses/response.rb

Lines changed: 1 addition & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -297,15 +297,7 @@ def output_text
297297
texts.join
298298
end
299299

300-
# @!attribute verbosity
301-
# Constrains the verbosity of the model's response. Lower values will result in
302-
# more concise responses, while higher values will result in more verbose
303-
# responses. Currently supported values are `low`, `medium`, and `high`.
304-
#
305-
# @return [Symbol, OpenAI::Models::Responses::Response::Verbosity, nil]
306-
optional :verbosity, enum: -> { OpenAI::Responses::Response::Verbosity }, nil?: true
307-
308-
# @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, verbosity: nil, object: :response)
300+
# @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
309301
# Some parameter documentations has been truncated, see
310302
# {OpenAI::Models::Responses::Response} for more details.
311303
#
@@ -365,8 +357,6 @@ def output_text
365357
#
366358
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
367359
#
368-
# @param verbosity [Symbol, OpenAI::Models::Responses::Response::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in
369-
#
370360
# @param object [Symbol, :response] The object type of this resource - always set to `response`.
371361

372362
# @see OpenAI::Models::Responses::Response#incomplete_details
@@ -510,22 +500,6 @@ module Truncation
510500
# @!method self.values
511501
# @return [Array<Symbol>]
512502
end
513-
514-
# Constrains the verbosity of the model's response. Lower values will result in
515-
# more concise responses, while higher values will result in more verbose
516-
# responses. Currently supported values are `low`, `medium`, and `high`.
517-
#
518-
# @see OpenAI::Models::Responses::Response#verbosity
519-
module Verbosity
520-
extend OpenAI::Internal::Type::Enum
521-
522-
LOW = :low
523-
MEDIUM = :medium
524-
HIGH = :high
525-
526-
# @!method self.values
527-
# @return [Array<Symbol>]
528-
end
529503
end
530504
end
531505
end

lib/openai/models/responses/response_create_params.rb

Lines changed: 1 addition & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -279,15 +279,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
279279
# @return [String, nil]
280280
optional :user, String
281281

282-
# @!attribute verbosity
283-
# Constrains the verbosity of the model's response. Lower values will result in
284-
# more concise responses, while higher values will result in more verbose
285-
# responses. Currently supported values are `low`, `medium`, and `high`.
286-
#
287-
# @return [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Verbosity, nil]
288-
optional :verbosity, enum: -> { OpenAI::Responses::ResponseCreateParams::Verbosity }, nil?: true
289-
290-
# @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, verbosity: nil, request_options: {})
282+
# @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
291283
# Some parameter documentations has been truncated, see
292284
# {OpenAI::Models::Responses::ResponseCreateParams} for more details.
293285
#
@@ -341,8 +333,6 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
341333
#
342334
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
343335
#
344-
# @param verbosity [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in
345-
#
346336
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
347337

348338
# Text, image, or file inputs to the model, used to generate a response.
@@ -473,20 +463,6 @@ module Truncation
473463
# @!method self.values
474464
# @return [Array<Symbol>]
475465
end
476-
477-
# Constrains the verbosity of the model's response. Lower values will result in
478-
# more concise responses, while higher values will result in more verbose
479-
# responses. Currently supported values are `low`, `medium`, and `high`.
480-
module Verbosity
481-
extend OpenAI::Internal::Type::Enum
482-
483-
LOW = :low
484-
MEDIUM = :medium
485-
HIGH = :high
486-
487-
# @!method self.values
488-
# @return [Array<Symbol>]
489-
end
490466
end
491467
end
492468
end

lib/openai/models/responses/response_text_config.rb

Lines changed: 27 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,15 @@ class ResponseTextConfig < OpenAI::Internal::Type::BaseModel
2222
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil]
2323
optional :format_, union: -> { OpenAI::Responses::ResponseFormatTextConfig }, api_name: :format
2424

25-
# @!method initialize(format_: nil)
25+
# @!attribute verbosity
26+
# Constrains the verbosity of the model's response. Lower values will result in
27+
# more concise responses, while higher values will result in more verbose
28+
# responses. Currently supported values are `low`, `medium`, and `high`.
29+
#
30+
# @return [Symbol, OpenAI::Models::Responses::ResponseTextConfig::Verbosity, nil]
31+
optional :verbosity, enum: -> { OpenAI::Responses::ResponseTextConfig::Verbosity }, nil?: true
32+
33+
# @!method initialize(format_: nil, verbosity: nil)
2634
# Some parameter documentations has been truncated, see
2735
# {OpenAI::Models::Responses::ResponseTextConfig} for more details.
2836
#
@@ -33,6 +41,24 @@ class ResponseTextConfig < OpenAI::Internal::Type::BaseModel
3341
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
3442
#
3543
# @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
44+
#
45+
# @param verbosity [Symbol, OpenAI::Models::Responses::ResponseTextConfig::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in
46+
47+
# Constrains the verbosity of the model's response. Lower values will result in
48+
# more concise responses, while higher values will result in more verbose
49+
# responses. Currently supported values are `low`, `medium`, and `high`.
50+
#
51+
# @see OpenAI::Models::Responses::ResponseTextConfig#verbosity
52+
module Verbosity
53+
extend OpenAI::Internal::Type::Enum
54+
55+
LOW = :low
56+
MEDIUM = :medium
57+
HIGH = :high
58+
59+
# @!method self.values
60+
# @return [Array<Symbol>]
61+
end
3662
end
3763
end
3864
end

lib/openai/resources/responses.rb

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ class Responses
2323
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
2424
# your own data as input for the model's response.
2525
#
26-
# @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, verbosity: nil, request_options: {})
26+
# @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
2727
#
2828
# @param background [Boolean, nil] Whether to run the model response in the background.
2929
#
@@ -75,8 +75,6 @@ class Responses
7575
#
7676
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
7777
#
78-
# @param verbosity [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in
79-
#
8078
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
8179
#
8280
# @return [OpenAI::Models::Responses::Response]
@@ -122,11 +120,7 @@ def create(params = {})
122120
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
123121
# your own data as input for the model's response.
124122
#
125-
# @overload stream(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, verbosity: nil, request_options: {})
126-
#
127-
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
128-
#
129-
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
123+
# @overload stream(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
130124
#
131125
# @param background [Boolean, nil] Whether to run the model response in the background.
132126
#
@@ -298,8 +292,6 @@ def stream(params)
298292
#
299293
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
300294
#
301-
# @param verbosity [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in
302-
#
303295
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
304296
#
305297
# @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseReasoningTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseCustomToolCallInputDeltaEvent, OpenAI::Models::Responses::ResponseCustomToolCallInputDoneEvent>]

rbi/openai/models/responses/response.rbi

Lines changed: 1 addition & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -280,16 +280,6 @@ module OpenAI
280280
sig { params(user: String).void }
281281
attr_writer :user
282282

283-
# Constrains the verbosity of the model's response. Lower values will result in
284-
# more concise responses, while higher values will result in more verbose
285-
# responses. Currently supported values are `low`, `medium`, and `high`.
286-
sig do
287-
returns(
288-
T.nilable(OpenAI::Responses::Response::Verbosity::TaggedSymbol)
289-
)
290-
end
291-
attr_accessor :verbosity
292-
293283
sig do
294284
params(
295285
id: String,
@@ -367,8 +357,6 @@ module OpenAI
367357
T.nilable(OpenAI::Responses::Response::Truncation::OrSymbol),
368358
usage: OpenAI::Responses::ResponseUsage::OrHash,
369359
user: String,
370-
verbosity:
371-
T.nilable(OpenAI::Responses::Response::Verbosity::OrSymbol),
372360
object: Symbol
373361
).returns(T.attached_class)
374362
end
@@ -523,10 +511,6 @@ module OpenAI
523511
# similar requests and to help OpenAI detect and prevent abuse.
524512
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
525513
user: nil,
526-
# Constrains the verbosity of the model's response. Lower values will result in
527-
# more concise responses, while higher values will result in more verbose
528-
# responses. Currently supported values are `low`, `medium`, and `high`.
529-
verbosity: nil,
530514
# The object type of this resource - always set to `response`.
531515
object: :response
532516
)
@@ -571,9 +555,7 @@ module OpenAI
571555
OpenAI::Responses::Response::Truncation::TaggedSymbol
572556
),
573557
usage: OpenAI::Responses::ResponseUsage,
574-
user: String,
575-
verbosity:
576-
T.nilable(OpenAI::Responses::Response::Verbosity::TaggedSymbol)
558+
user: String
577559
}
578560
)
579561
end
@@ -814,34 +796,6 @@ module OpenAI
814796
def self.values
815797
end
816798
end
817-
818-
# Constrains the verbosity of the model's response. Lower values will result in
819-
# more concise responses, while higher values will result in more verbose
820-
# responses. Currently supported values are `low`, `medium`, and `high`.
821-
module Verbosity
822-
extend OpenAI::Internal::Type::Enum
823-
824-
TaggedSymbol =
825-
T.type_alias do
826-
T.all(Symbol, OpenAI::Responses::Response::Verbosity)
827-
end
828-
OrSymbol = T.type_alias { T.any(Symbol, String) }
829-
830-
LOW =
831-
T.let(:low, OpenAI::Responses::Response::Verbosity::TaggedSymbol)
832-
MEDIUM =
833-
T.let(:medium, OpenAI::Responses::Response::Verbosity::TaggedSymbol)
834-
HIGH =
835-
T.let(:high, OpenAI::Responses::Response::Verbosity::TaggedSymbol)
836-
837-
sig do
838-
override.returns(
839-
T::Array[OpenAI::Responses::Response::Verbosity::TaggedSymbol]
840-
)
841-
end
842-
def self.values
843-
end
844-
end
845799
end
846800
end
847801
end

0 commit comments

Comments
 (0)