Skip to content

Commit d212d3f

Browse files
feat(api): manual updates
1 parent 470a660 commit d212d3f

20 files changed

+504
-62
lines changed

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 109
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b2a451656ca64d30d174391ebfd94806b4de3ab76dc55b92843cfb7f1a54ecb6.yml
3-
openapi_spec_hash: 27d9691b400f28c17ef063a1374048b0
4-
config_hash: e822d0c9082c8b312264403949243179
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-721e6ccaa72205ee14c71f8163129920464fb814b95d3df9567a9476bbd9b7fb.yml
3+
openapi_spec_hash: 2115413a21df8b5bf9e4552a74df4312
4+
config_hash: 9606bb315a193bfd8da0459040143242

lib/openai/models/chat/chat_completion_store_message.rb

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,41 @@ class ChatCompletionStoreMessage < OpenAI::Models::Chat::ChatCompletionMessage
1010
# @return [String]
1111
required :id, String
1212

13-
# @!method initialize(id:)
13+
# @!attribute content_parts
14+
# If a content parts array was provided, this is an array of `text` and
15+
# `image_url` parts. Otherwise, null.
16+
#
17+
# @return [Array<OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage>, nil]
18+
optional :content_parts,
19+
-> {
20+
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Chat::ChatCompletionStoreMessage::ContentPart]
21+
},
22+
nil?: true
23+
24+
# @!method initialize(id:, content_parts: nil)
25+
# Some parameter documentations has been truncated, see
26+
# {OpenAI::Models::Chat::ChatCompletionStoreMessage} for more details.
27+
#
1428
# A chat completion message generated by the model.
1529
#
1630
# @param id [String] The identifier of the chat message.
31+
#
32+
# @param content_parts [Array<OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage>, nil] If a content parts array was provided, this is an array of `text` and `image_url
33+
34+
# Learn about
35+
# [text inputs](https://platform.openai.com/docs/guides/text-generation).
36+
module ContentPart
37+
extend OpenAI::Internal::Type::Union
38+
39+
# Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation).
40+
variant -> { OpenAI::Chat::ChatCompletionContentPartText }
41+
42+
# Learn about [image inputs](https://platform.openai.com/docs/guides/vision).
43+
variant -> { OpenAI::Chat::ChatCompletionContentPartImage }
44+
45+
# @!method self.variants
46+
# @return [Array(OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage)]
47+
end
1748
end
1849
end
1950

lib/openai/models/chat/completion_create_params.rb

Lines changed: 31 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -182,6 +182,14 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
182182
# @return [Float, nil]
183183
optional :presence_penalty, Float, nil?: true
184184

185+
# @!attribute prompt_cache_key
186+
# Used by OpenAI to cache responses for similar requests to optimize your cache
187+
# hit rates. Replaces the `user` field.
188+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
189+
#
190+
# @return [String, nil]
191+
optional :prompt_cache_key, String
192+
185193
# @!attribute reasoning_effort
186194
# **o-series models only**
187195
#
@@ -208,6 +216,16 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
208216
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::ResponseFormatJSONObject, nil]
209217
optional :response_format, union: -> { OpenAI::Chat::CompletionCreateParams::ResponseFormat }
210218

219+
# @!attribute safety_identifier
220+
# A stable identifier used to help detect users of your application that may be
221+
# violating OpenAI's usage policies. The IDs should be a string that uniquely
222+
# identifies each user. We recommend hashing their username or email address, in
223+
# order to avoid sending us any identifying information.
224+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
225+
#
226+
# @return [String, nil]
227+
optional :safety_identifier, String
228+
211229
# @!attribute seed
212230
# This feature is in Beta. If specified, our system will make a best effort to
213231
# sample deterministically, such that repeated requests with the same `seed` and
@@ -320,9 +338,13 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
320338
optional :top_p, Float, nil?: true
321339

322340
# @!attribute user
323-
# A stable identifier for your end-users. Used to boost cache hit rates by better
324-
# bucketing similar requests and to help OpenAI detect and prevent abuse.
325-
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
341+
# @deprecated
342+
#
343+
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
344+
# `prompt_cache_key` instead to maintain caching optimizations. A stable
345+
# identifier for your end-users. Used to boost cache hit rates by better bucketing
346+
# similar requests and to help OpenAI detect and prevent abuse.
347+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
326348
#
327349
# @return [String, nil]
328350
optional :user, String
@@ -335,7 +357,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
335357
# @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, nil]
336358
optional :web_search_options, -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions }
337359

338-
# @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, reasoning_effort: nil, response_format: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {})
360+
# @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {})
339361
# Some parameter documentations has been truncated, see
340362
# {OpenAI::Models::Chat::CompletionCreateParams} for more details.
341363
#
@@ -371,10 +393,14 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
371393
#
372394
# @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on
373395
#
396+
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
397+
#
374398
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
375399
#
376400
# @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
377401
#
402+
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
403+
#
378404
# @param seed [Integer, nil] This feature is in Beta.
379405
#
380406
# @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
@@ -395,7 +421,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
395421
#
396422
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
397423
#
398-
# @param user [String] A stable identifier for your end-users.
424+
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
399425
#
400426
# @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response.
401427
#

lib/openai/models/responses/response.rb

Lines changed: 30 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,14 @@ class Response < OpenAI::Internal::Type::BaseModel
171171
# @return [OpenAI::Models::Responses::ResponsePrompt, nil]
172172
optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true
173173

174+
# @!attribute prompt_cache_key
175+
# Used by OpenAI to cache responses for similar requests to optimize your cache
176+
# hit rates. Replaces the `user` field.
177+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
178+
#
179+
# @return [String, nil]
180+
optional :prompt_cache_key, String
181+
174182
# @!attribute reasoning
175183
# **o-series models only**
176184
#
@@ -180,6 +188,16 @@ class Response < OpenAI::Internal::Type::BaseModel
180188
# @return [OpenAI::Models::Reasoning, nil]
181189
optional :reasoning, -> { OpenAI::Reasoning }, nil?: true
182190

191+
# @!attribute safety_identifier
192+
# A stable identifier used to help detect users of your application that may be
193+
# violating OpenAI's usage policies. The IDs should be a string that uniquely
194+
# identifies each user. We recommend hashing their username or email address, in
195+
# order to avoid sending us any identifying information.
196+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
197+
#
198+
# @return [String, nil]
199+
optional :safety_identifier, String
200+
183201
# @!attribute service_tier
184202
# Specifies the processing type used for serving the request.
185203
#
@@ -246,9 +264,13 @@ class Response < OpenAI::Internal::Type::BaseModel
246264
optional :usage, -> { OpenAI::Responses::ResponseUsage }
247265

248266
# @!attribute user
249-
# A stable identifier for your end-users. Used to boost cache hit rates by better
250-
# bucketing similar requests and to help OpenAI detect and prevent abuse.
251-
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
267+
# @deprecated
268+
#
269+
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
270+
# `prompt_cache_key` instead to maintain caching optimizations. A stable
271+
# identifier for your end-users. Used to boost cache hit rates by better bucketing
272+
# similar requests and to help OpenAI detect and prevent abuse.
273+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
252274
#
253275
# @return [String, nil]
254276
optional :user, String
@@ -313,8 +335,12 @@ def output_text
313335
#
314336
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
315337
#
338+
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
339+
#
316340
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
317341
#
342+
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
343+
#
318344
# @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the processing type used for serving the request.
319345
#
320346
# @param status [Symbol, OpenAI::Models::Responses::ResponseStatus] The status of the response generation. One of `completed`, `failed`,
@@ -327,7 +353,7 @@ def output_text
327353
#
328354
# @param usage [OpenAI::Models::Responses::ResponseUsage] Represents token usage details including input tokens, output tokens,
329355
#
330-
# @param user [String] A stable identifier for your end-users.
356+
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
331357
#
332358
# @param object [Symbol, :response] The object type of this resource - always set to `response`.
333359

lib/openai/models/responses/response_create_params.rb

Lines changed: 31 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,14 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
123123
# @return [OpenAI::Models::Responses::ResponsePrompt, nil]
124124
optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true
125125

126+
# @!attribute prompt_cache_key
127+
# Used by OpenAI to cache responses for similar requests to optimize your cache
128+
# hit rates. Replaces the `user` field.
129+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
130+
#
131+
# @return [String, nil]
132+
optional :prompt_cache_key, String
133+
126134
# @!attribute reasoning
127135
# **o-series models only**
128136
#
@@ -132,6 +140,16 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
132140
# @return [OpenAI::Models::Reasoning, nil]
133141
optional :reasoning, -> { OpenAI::Reasoning }, nil?: true
134142

143+
# @!attribute safety_identifier
144+
# A stable identifier used to help detect users of your application that may be
145+
# violating OpenAI's usage policies. The IDs should be a string that uniquely
146+
# identifies each user. We recommend hashing their username or email address, in
147+
# order to avoid sending us any identifying information.
148+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
149+
#
150+
# @return [String, nil]
151+
optional :safety_identifier, String
152+
135153
# @!attribute service_tier
136154
# Specifies the processing type used for serving the request.
137155
#
@@ -242,14 +260,18 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
242260
optional :truncation, enum: -> { OpenAI::Responses::ResponseCreateParams::Truncation }, nil?: true
243261

244262
# @!attribute user
245-
# A stable identifier for your end-users. Used to boost cache hit rates by better
246-
# bucketing similar requests and to help OpenAI detect and prevent abuse.
247-
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
263+
# @deprecated
264+
#
265+
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
266+
# `prompt_cache_key` instead to maintain caching optimizations. A stable
267+
# identifier for your end-users. Used to boost cache hit rates by better bucketing
268+
# similar requests and to help OpenAI detect and prevent abuse.
269+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
248270
#
249271
# @return [String, nil]
250272
optional :user, String
251273

252-
# @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
274+
# @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
253275
# Some parameter documentations has been truncated, see
254276
# {OpenAI::Models::Responses::ResponseCreateParams} for more details.
255277
#
@@ -275,8 +297,12 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
275297
#
276298
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
277299
#
300+
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
301+
#
278302
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
279303
#
304+
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
305+
#
280306
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
281307
#
282308
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
@@ -295,7 +321,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
295321
#
296322
# @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
297323
#
298-
# @param user [String] A stable identifier for your end-users.
324+
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
299325
#
300326
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
301327

0 commit comments

Comments
 (0)