Skip to content

Commit c4989d6

Browse files
feat(api): manual updates
1 parent 39d155f commit c4989d6

File tree

117 files changed

+3009
-12138
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

117 files changed

+3009
-12138
lines changed

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 99
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-794a6ed3c3d3d77887564755168056af8a426b17cf1ec721e3a300503dc22a41.yml
3-
openapi_spec_hash: 25a81c220713cd5b0bafc221d1dfa79a
4-
config_hash: 0b768ed1b56c6d82816f0fa40dc4aaf5
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-161ca7f1cfd7b33c1fc07d0ce25dfe4be5a7271c394f4cb526b7fb21b0729900.yml
3+
openapi_spec_hash: 602e14add4bee018c6774e320ce309b8
4+
config_hash: bdacc55eb995c15255ec82130eb8c3bb

lib/openai.rb

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -182,6 +182,7 @@
182182
require_relative "openai/models/beta/threads/text_delta_block"
183183
require_relative "openai/models/beta/thread_stream_event"
184184
require_relative "openai/models/beta/thread_update_params"
185+
require_relative "openai/models/beta/truncation_object"
185186
require_relative "openai/models/chat/chat_completion"
186187
require_relative "openai/models/chat/chat_completion_assistant_message_param"
187188
require_relative "openai/models/chat/chat_completion_audio"
@@ -234,13 +235,18 @@
234235
require_relative "openai/models/eval_custom_data_source_config"
235236
require_relative "openai/models/eval_delete_params"
236237
require_relative "openai/models/eval_delete_response"
238+
require_relative "openai/models/eval_item"
237239
require_relative "openai/models/eval_list_params"
238240
require_relative "openai/models/eval_list_response"
241+
require_relative "openai/models/eval_logs_data_source_config"
239242
require_relative "openai/models/eval_retrieve_params"
240243
require_relative "openai/models/eval_retrieve_response"
241244
require_relative "openai/models/evals/create_eval_completions_run_data_source"
242245
require_relative "openai/models/evals/create_eval_jsonl_run_data_source"
246+
require_relative "openai/models/evals/create_eval_responses_run_data_source"
243247
require_relative "openai/models/evals/eval_api_error"
248+
require_relative "openai/models/evals/eval_jsonl_file_content_source"
249+
require_relative "openai/models/evals/eval_jsonl_file_id_source"
244250
require_relative "openai/models/evals/run_cancel_params"
245251
require_relative "openai/models/evals/run_cancel_response"
246252
require_relative "openai/models/evals/run_create_params"
@@ -425,6 +431,7 @@
425431
require_relative "openai/models/vector_store_create_params"
426432
require_relative "openai/models/vector_store_deleted"
427433
require_relative "openai/models/vector_store_delete_params"
434+
require_relative "openai/models/vector_store_expiration_after"
428435
require_relative "openai/models/vector_store_list_params"
429436
require_relative "openai/models/vector_store_retrieve_params"
430437
require_relative "openai/models/vector_stores/file_batch_cancel_params"

lib/openai/models.rb

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,12 @@ module OpenAI
8686

8787
EvalDeleteParams = OpenAI::Models::EvalDeleteParams
8888

89+
EvalItem = OpenAI::Models::EvalItem
90+
8991
EvalListParams = OpenAI::Models::EvalListParams
9092

93+
EvalLogsDataSourceConfig = OpenAI::Models::EvalLogsDataSourceConfig
94+
9195
EvalRetrieveParams = OpenAI::Models::EvalRetrieveParams
9296

9397
Evals = OpenAI::Models::Evals
@@ -204,6 +208,8 @@ module OpenAI
204208

205209
VectorStoreDeleteParams = OpenAI::Models::VectorStoreDeleteParams
206210

211+
VectorStoreExpirationAfter = OpenAI::Models::VectorStoreExpirationAfter
212+
207213
VectorStoreListParams = OpenAI::Models::VectorStoreListParams
208214

209215
VectorStoreRetrieveParams = OpenAI::Models::VectorStoreRetrieveParams

lib/openai/models/audio/transcription_create_params.rb

Lines changed: 94 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,17 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel
2525
# @return [String, Symbol, OpenAI::AudioModel]
2626
required :model, union: -> { OpenAI::Audio::TranscriptionCreateParams::Model }
2727

28+
# @!attribute chunking_strategy
29+
# Controls how the audio is cut into chunks. When set to `"auto"`, the server
30+
# first normalizes loudness and then uses voice activity detection (VAD) to choose
31+
# boundaries. `server_vad` object can be provided to tweak VAD detection
32+
# parameters manually. If unset, the audio is transcribed as a single block.
33+
#
34+
# @return [Symbol, :auto, OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil]
35+
optional :chunking_strategy,
36+
union: -> { OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy },
37+
nil?: true
38+
2839
# @!attribute include
2940
# Additional information to include in the transcription response. `logprobs` will
3041
# return the log probabilities of the tokens in the response to understand the
@@ -83,14 +94,16 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel
8394
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity]
8495
}
8596

86-
# @!method initialize(file:, model:, include: nil, language: nil, prompt: nil, response_format: nil, temperature: nil, timestamp_granularities: nil, request_options: {})
97+
# @!method initialize(file:, model:, chunking_strategy: nil, include: nil, language: nil, prompt: nil, response_format: nil, temperature: nil, timestamp_granularities: nil, request_options: {})
8798
# Some parameter documentations has been truncated, see
8899
# {OpenAI::Models::Audio::TranscriptionCreateParams} for more details.
89100
#
90101
# @param file [Pathname, StringIO, IO, OpenAI::FilePart] The audio file object (not file name) to transcribe, in one of these formats: fl
91102
#
92103
# @param model [String, Symbol, OpenAI::AudioModel] ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transc
93104
#
105+
# @param chunking_strategy [Symbol, :auto, OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil] Controls how the audio is cut into chunks. When set to `"auto"`, the server firs
106+
#
94107
# @param include [Array<Symbol, OpenAI::Audio::TranscriptionInclude>] Additional information to include in the transcription response.
95108
#
96109
# @param language [String] The language of the input audio. Supplying the input language in [ISO-639-1](htt
@@ -124,6 +137,86 @@ module Model
124137
end
125138
end
126139

140+
# Controls how the audio is cut into chunks. When set to `"auto"`, the server
141+
# first normalizes loudness and then uses voice activity detection (VAD) to choose
142+
# boundaries. `server_vad` object can be provided to tweak VAD detection
143+
# parameters manually. If unset, the audio is transcribed as a single block.
144+
module ChunkingStrategy
145+
extend OpenAI::Internal::Type::Union
146+
147+
# Automatically set chunking parameters based on the audio. Must be set to `"auto"`.
148+
variant const: :auto
149+
150+
variant -> { OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig }
151+
152+
class VadConfig < OpenAI::Internal::Type::BaseModel
153+
# @!attribute type
154+
# Must be set to `server_vad` to enable manual chunking using server side VAD.
155+
#
156+
# @return [Symbol, OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type]
157+
required :type,
158+
enum: -> {
159+
OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type
160+
}
161+
162+
# @!attribute prefix_padding_ms
163+
# Amount of audio to include before the VAD detected speech (in milliseconds).
164+
#
165+
# @return [Integer, nil]
166+
optional :prefix_padding_ms, Integer
167+
168+
# @!attribute silence_duration_ms
169+
# Duration of silence to detect speech stop (in milliseconds). With shorter values
170+
# the model will respond more quickly, but may jump in on short pauses from the
171+
# user.
172+
#
173+
# @return [Integer, nil]
174+
optional :silence_duration_ms, Integer
175+
176+
# @!attribute threshold
177+
# Sensitivity threshold (0.0 to 1.0) for voice activity detection. A higher
178+
# threshold will require louder audio to activate the model, and thus might
179+
# perform better in noisy environments.
180+
#
181+
# @return [Float, nil]
182+
optional :threshold, Float
183+
184+
# @!method initialize(type:, prefix_padding_ms: nil, silence_duration_ms: nil, threshold: nil)
185+
# Some parameter documentations has been truncated, see
186+
# {OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig} for more
187+
# details.
188+
#
189+
# @param type [Symbol, OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type] Must be set to `server_vad` to enable manual chunking using server side VAD.
190+
#
191+
# @param prefix_padding_ms [Integer] Amount of audio to include before the VAD detected speech (in
192+
#
193+
# @param silence_duration_ms [Integer] Duration of silence to detect speech stop (in milliseconds).
194+
#
195+
# @param threshold [Float] Sensitivity threshold (0.0 to 1.0) for voice activity detection. A
196+
197+
# Must be set to `server_vad` to enable manual chunking using server side VAD.
198+
#
199+
# @see OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig#type
200+
module Type
201+
extend OpenAI::Internal::Type::Enum
202+
203+
SERVER_VAD = :server_vad
204+
205+
# @!method self.values
206+
# @return [Array<Symbol>]
207+
end
208+
end
209+
210+
# @!method self.variants
211+
# @return [Array(Symbol, :auto, OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig)]
212+
213+
define_sorbet_constant!(:Variants) do
214+
T.type_alias do
215+
T.any(Symbol, OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig)
216+
end
217+
end
218+
end
219+
127220
module TimestampGranularity
128221
extend OpenAI::Internal::Type::Enum
129222

lib/openai/models/beta/thread_create_and_run_params.rb

Lines changed: 3 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -159,10 +159,8 @@ class ThreadCreateAndRunParams < OpenAI::Internal::Type::BaseModel
159159
# Controls for how a thread will be truncated prior to the run. Use this to
160160
# control the intial context window of the run.
161161
#
162-
# @return [OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil]
163-
optional :truncation_strategy,
164-
-> { OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy },
165-
nil?: true
162+
# @return [OpenAI::Beta::TruncationObject, nil]
163+
optional :truncation_strategy, -> { OpenAI::Beta::TruncationObject }, nil?: true
166164

167165
# @!method initialize(assistant_id:, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, response_format: nil, temperature: nil, thread: nil, tool_choice: nil, tool_resources: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {})
168166
# Some parameter documentations has been truncated, see
@@ -196,7 +194,7 @@ class ThreadCreateAndRunParams < OpenAI::Internal::Type::BaseModel
196194
#
197195
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
198196
#
199-
# @param truncation_strategy [OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
197+
# @param truncation_strategy [OpenAI::Beta::TruncationObject, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
200198
#
201199
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
202200

@@ -708,51 +706,6 @@ class FileSearch < OpenAI::Internal::Type::BaseModel
708706
# @param vector_store_ids [Array<String>] The ID of the [vector store](https://platform.openai.com/docs/api-reference/vect
709707
end
710708
end
711-
712-
class TruncationStrategy < OpenAI::Internal::Type::BaseModel
713-
# @!attribute type
714-
# The truncation strategy to use for the thread. The default is `auto`. If set to
715-
# `last_messages`, the thread will be truncated to the n most recent messages in
716-
# the thread. When set to `auto`, messages in the middle of the thread will be
717-
# dropped to fit the context length of the model, `max_prompt_tokens`.
718-
#
719-
# @return [Symbol, OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type]
720-
required :type, enum: -> { OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type }
721-
722-
# @!attribute last_messages
723-
# The number of most recent messages from the thread when constructing the context
724-
# for the run.
725-
#
726-
# @return [Integer, nil]
727-
optional :last_messages, Integer, nil?: true
728-
729-
# @!method initialize(type:, last_messages: nil)
730-
# Some parameter documentations has been truncated, see
731-
# {OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy} for more details.
732-
#
733-
# Controls for how a thread will be truncated prior to the run. Use this to
734-
# control the intial context window of the run.
735-
#
736-
# @param type [Symbol, OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to
737-
#
738-
# @param last_messages [Integer, nil] The number of most recent messages from the thread when constructing the context
739-
740-
# The truncation strategy to use for the thread. The default is `auto`. If set to
741-
# `last_messages`, the thread will be truncated to the n most recent messages in
742-
# the thread. When set to `auto`, messages in the middle of the thread will be
743-
# dropped to fit the context length of the model, `max_prompt_tokens`.
744-
#
745-
# @see OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy#type
746-
module Type
747-
extend OpenAI::Internal::Type::Enum
748-
749-
AUTO = :auto
750-
LAST_MESSAGES = :last_messages
751-
752-
# @!method self.values
753-
# @return [Array<Symbol>]
754-
end
755-
end
756709
end
757710
end
758711
end

lib/openai/models/beta/threads/run.rb

Lines changed: 3 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -197,8 +197,8 @@ class Run < OpenAI::Internal::Type::BaseModel
197197
# Controls for how a thread will be truncated prior to the run. Use this to
198198
# control the intial context window of the run.
199199
#
200-
# @return [OpenAI::Beta::Threads::Run::TruncationStrategy, nil]
201-
required :truncation_strategy, -> { OpenAI::Beta::Threads::Run::TruncationStrategy }, nil?: true
200+
# @return [OpenAI::Beta::TruncationObject, nil]
201+
required :truncation_strategy, -> { OpenAI::Beta::TruncationObject }, nil?: true
202202

203203
# @!attribute usage
204204
# Usage statistics related to the run. This value will be `null` if the run is not
@@ -270,7 +270,7 @@ class Run < OpenAI::Internal::Type::BaseModel
270270
#
271271
# @param tools [Array<OpenAI::Beta::CodeInterpreterTool, OpenAI::Beta::FileSearchTool, OpenAI::Beta::FunctionTool>] The list of tools that the [assistant](https://platform.openai.com/docs/api-refe
272272
#
273-
# @param truncation_strategy [OpenAI::Beta::Threads::Run::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
273+
# @param truncation_strategy [OpenAI::Beta::TruncationObject, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
274274
#
275275
# @param usage [OpenAI::Beta::Threads::Run::Usage, nil] Usage statistics related to the run. This value will be `null` if the run is not
276276
#
@@ -392,52 +392,6 @@ class SubmitToolOutputs < OpenAI::Internal::Type::BaseModel
392392
end
393393
end
394394

395-
# @see OpenAI::Beta::Threads::Run#truncation_strategy
396-
class TruncationStrategy < OpenAI::Internal::Type::BaseModel
397-
# @!attribute type
398-
# The truncation strategy to use for the thread. The default is `auto`. If set to
399-
# `last_messages`, the thread will be truncated to the n most recent messages in
400-
# the thread. When set to `auto`, messages in the middle of the thread will be
401-
# dropped to fit the context length of the model, `max_prompt_tokens`.
402-
#
403-
# @return [Symbol, OpenAI::Beta::Threads::Run::TruncationStrategy::Type]
404-
required :type, enum: -> { OpenAI::Beta::Threads::Run::TruncationStrategy::Type }
405-
406-
# @!attribute last_messages
407-
# The number of most recent messages from the thread when constructing the context
408-
# for the run.
409-
#
410-
# @return [Integer, nil]
411-
optional :last_messages, Integer, nil?: true
412-
413-
# @!method initialize(type:, last_messages: nil)
414-
# Some parameter documentations has been truncated, see
415-
# {OpenAI::Beta::Threads::Run::TruncationStrategy} for more details.
416-
#
417-
# Controls for how a thread will be truncated prior to the run. Use this to
418-
# control the intial context window of the run.
419-
#
420-
# @param type [Symbol, OpenAI::Beta::Threads::Run::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to
421-
#
422-
# @param last_messages [Integer, nil] The number of most recent messages from the thread when constructing the context
423-
424-
# The truncation strategy to use for the thread. The default is `auto`. If set to
425-
# `last_messages`, the thread will be truncated to the n most recent messages in
426-
# the thread. When set to `auto`, messages in the middle of the thread will be
427-
# dropped to fit the context length of the model, `max_prompt_tokens`.
428-
#
429-
# @see OpenAI::Beta::Threads::Run::TruncationStrategy#type
430-
module Type
431-
extend OpenAI::Internal::Type::Enum
432-
433-
AUTO = :auto
434-
LAST_MESSAGES = :last_messages
435-
436-
# @!method self.values
437-
# @return [Array<Symbol>]
438-
end
439-
end
440-
441395
# @see OpenAI::Beta::Threads::Run#usage
442396
class Usage < OpenAI::Internal::Type::BaseModel
443397
# @!attribute completion_tokens

0 commit comments

Comments
 (0)