Skip to content

Commit 7e2e96b

Browse files
Merge pull request #152 from openai/release-please--branches--main--changes--next
release: 0.5.1
2 parents 5160a5d + 05b23d7 commit 7e2e96b

File tree

56 files changed

+473
-279
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

56 files changed

+473
-279
lines changed

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "0.5.0"
2+
".": "0.5.1"
33
}

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 109
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d4bcffecf0cdadf746faa6708ed1ec81fac451f9b857deabbab26f0a343b9314.yml
3-
openapi_spec_hash: 7c54a18b4381248bda7cc34c52142615
4-
config_hash: d23f847b9ebb3f427d0f198035bd3e9f
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-2bcc845d8635bf93ddcf9ee723af4d7928248412a417bee5fc10d863a1e13867.yml
3+
openapi_spec_hash: 865230cb3abeb01bd85de05891af23c4
4+
config_hash: ed1e6b3c5f93d12b80d31167f55c557c

CHANGELOG.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,13 @@
11
# Changelog
22

3+
## 0.5.1 (2025-06-02)
4+
5+
Full Changelog: [v0.5.0...v0.5.1](https://github.com/openai/openai-ruby/compare/v0.5.0...v0.5.1)
6+
7+
### Bug Fixes
8+
9+
* **api:** Fix evals and code interpreter interfaces ([24a9100](https://github.com/openai/openai-ruby/commit/24a910015e6885fc19a2ad689fe70a148bed5787))
10+
311
## 0.5.0 (2025-05-29)
412

513
Full Changelog: [v0.4.1...v0.5.0](https://github.com/openai/openai-ruby/compare/v0.4.1...v0.5.0)

Gemfile.lock

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ GIT
1111
PATH
1212
remote: .
1313
specs:
14-
openai (0.5.0)
14+
openai (0.5.1)
1515
connection_pool
1616

1717
GEM

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
1515
<!-- x-release-please-start-version -->
1616

1717
```ruby
18-
gem "openai", "~> 0.5.0"
18+
gem "openai", "~> 0.5.1"
1919
```
2020

2121
<!-- x-release-please-end -->

lib/openai/models/audio/transcription_text_delta_event.rb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ class Logprob < OpenAI::Internal::Type::BaseModel
5050
# @!attribute bytes
5151
# The bytes that were used to generate the log probability.
5252
#
53-
# @return [Array<Object>, nil]
54-
optional :bytes, OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
53+
# @return [Array<Integer>, nil]
54+
optional :bytes, OpenAI::Internal::Type::ArrayOf[Integer]
5555

5656
# @!attribute logprob
5757
# The log probability of the token.
@@ -65,7 +65,7 @@ class Logprob < OpenAI::Internal::Type::BaseModel
6565
#
6666
# @param token [String] The token that was used to generate the log probability.
6767
#
68-
# @param bytes [Array<Object>] The bytes that were used to generate the log probability.
68+
# @param bytes [Array<Integer>] The bytes that were used to generate the log probability.
6969
#
7070
# @param logprob [Float] The log probability of the token.
7171
end

lib/openai/models/audio/transcription_text_done_event.rb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ class Logprob < OpenAI::Internal::Type::BaseModel
5151
# @!attribute bytes
5252
# The bytes that were used to generate the log probability.
5353
#
54-
# @return [Array<Object>, nil]
55-
optional :bytes, OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
54+
# @return [Array<Integer>, nil]
55+
optional :bytes, OpenAI::Internal::Type::ArrayOf[Integer]
5656

5757
# @!attribute logprob
5858
# The log probability of the token.
@@ -66,7 +66,7 @@ class Logprob < OpenAI::Internal::Type::BaseModel
6666
#
6767
# @param token [String] The token that was used to generate the log probability.
6868
#
69-
# @param bytes [Array<Object>] The bytes that were used to generate the log probability.
69+
# @param bytes [Array<Integer>] The bytes that were used to generate the log probability.
7070
#
7171
# @param logprob [Float] The log probability of the token.
7272
end

lib/openai/models/chat/chat_completion.rb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,9 @@ class ChatCompletion < OpenAI::Internal::Type::BaseModel
4646
# utilize scale tier credits until they are exhausted.
4747
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
4848
# be processed using the default service tier with a lower uptime SLA and no
49-
# latency guarentee.
49+
# latency guarantee.
5050
# - If set to 'default', the request will be processed using the default service
51-
# tier with a lower uptime SLA and no latency guarentee.
51+
# tier with a lower uptime SLA and no latency guarantee.
5252
# - If set to 'flex', the request will be processed with the Flex Processing
5353
# service tier.
5454
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -195,9 +195,9 @@ class Logprobs < OpenAI::Internal::Type::BaseModel
195195
# utilize scale tier credits until they are exhausted.
196196
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
197197
# be processed using the default service tier with a lower uptime SLA and no
198-
# latency guarentee.
198+
# latency guarantee.
199199
# - If set to 'default', the request will be processed using the default service
200-
# tier with a lower uptime SLA and no latency guarentee.
200+
# tier with a lower uptime SLA and no latency guarantee.
201201
# - If set to 'flex', the request will be processed with the Flex Processing
202202
# service tier.
203203
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).

lib/openai/models/chat/chat_completion_chunk.rb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,9 @@ class ChatCompletionChunk < OpenAI::Internal::Type::BaseModel
4545
# utilize scale tier credits until they are exhausted.
4646
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
4747
# be processed using the default service tier with a lower uptime SLA and no
48-
# latency guarentee.
48+
# latency guarantee.
4949
# - If set to 'default', the request will be processed using the default service
50-
# tier with a lower uptime SLA and no latency guarentee.
50+
# tier with a lower uptime SLA and no latency guarantee.
5151
# - If set to 'flex', the request will be processed with the Flex Processing
5252
# service tier.
5353
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -378,9 +378,9 @@ class Logprobs < OpenAI::Internal::Type::BaseModel
378378
# utilize scale tier credits until they are exhausted.
379379
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
380380
# be processed using the default service tier with a lower uptime SLA and no
381-
# latency guarentee.
381+
# latency guarantee.
382382
# - If set to 'default', the request will be processed using the default service
383-
# tier with a lower uptime SLA and no latency guarentee.
383+
# tier with a lower uptime SLA and no latency guarantee.
384384
# - If set to 'flex', the request will be processed with the Flex Processing
385385
# service tier.
386386
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).

lib/openai/models/chat/completion_create_params.rb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -226,9 +226,9 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
226226
# utilize scale tier credits until they are exhausted.
227227
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
228228
# be processed using the default service tier with a lower uptime SLA and no
229-
# latency guarentee.
229+
# latency guarantee.
230230
# - If set to 'default', the request will be processed using the default service
231-
# tier with a lower uptime SLA and no latency guarentee.
231+
# tier with a lower uptime SLA and no latency guarantee.
232232
# - If set to 'flex', the request will be processed with the Flex Processing
233233
# service tier.
234234
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -553,9 +553,9 @@ module ResponseFormat
553553
# utilize scale tier credits until they are exhausted.
554554
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
555555
# be processed using the default service tier with a lower uptime SLA and no
556-
# latency guarentee.
556+
# latency guarantee.
557557
# - If set to 'default', the request will be processed using the default service
558-
# tier with a lower uptime SLA and no latency guarentee.
558+
# tier with a lower uptime SLA and no latency guarantee.
559559
# - If set to 'flex', the request will be processed with the Flex Processing
560560
# service tier.
561561
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).

0 commit comments

Comments
 (0)