Skip to content

Commit 9071fa2

Browse files
committed
feat: added some usage examples
1 parent fc96721 commit 9071fa2

File tree

7 files changed

+269
-4
lines changed

7 files changed

+269
-4
lines changed
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
name: Create releases
2+
on:
3+
schedule:
4+
- cron: '0 5 * * *' # every day at 5am UTC
5+
push:
6+
branches:
7+
- main
8+
9+
jobs:
10+
release:
11+
name: release
12+
if: github.ref == 'refs/heads/main' && github.repository == 'openai/openai-ruby'
13+
runs-on: ubuntu-latest
14+
environment: publish
15+
16+
steps:
17+
- uses: actions/checkout@v4
18+
19+
- uses: stainless-api/trigger-release-please@v1
20+
id: release
21+
with:
22+
repo: ${{ github.event.repository.full_name }}
23+
stainless-api-key: ${{ secrets.STAINLESS_API_KEY }}
24+
25+
- name: Update RubyDocs
26+
if: ${{ steps.release.outputs.releases_created }}
27+
run: |
28+
curl -i -H "Content-Type: application/json" -X POST -d '{"repository":{"url":"https://github.com/openai/openai-ruby"}}' https://www.rubydoc.info/checkout

examples/.keep

Lines changed: 0 additions & 4 deletions
This file was deleted.

examples/advanced_streaming.rb

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
#!/usr/bin/env -S -- ruby
2+
# frozen_string_literal: true
3+
# typed: strong
4+
5+
require_relative "../lib/openai"
6+
7+
# gets API Key from environment variable `OPENAI_API_KEY`
8+
client = OpenAI::Client.new
9+
10+
begin
11+
pp("----- streams are enumerable -----")
12+
13+
stream = client.completions.create_streaming(
14+
model: :"gpt-3.5-turbo-instruct",
15+
prompt: "1,2,3,",
16+
max_tokens: 5,
17+
temperature: 0.0
18+
)
19+
20+
# the `stream` itself is an `https://rubyapi.org/3.1/o/enumerable`
21+
# which means that you can work with the stream almost as if it is an array
22+
all_choices =
23+
stream
24+
# calling any of the `enumerable` methods will block until the whole stream is consumed
25+
# it will also clean up the stream.
26+
.select do |completion|
27+
completion.object == :text_completion
28+
end
29+
.flat_map do |completion|
30+
completion.choices
31+
end
32+
33+
pp(all_choices)
34+
35+
# once the stream has been consumed, it will become "empty"
36+
pp("this will print an empty array")
37+
pp(stream.to_a)
38+
end
39+
40+
begin
41+
pp("----- streams can be lazy -----")
42+
43+
stream = client.completions.create_streaming(
44+
model: :"gpt-3.5-turbo-instruct",
45+
prompt: "1,2,3,",
46+
max_tokens: 5,
47+
temperature: 0.0
48+
)
49+
50+
stream_of_choices =
51+
stream
52+
# calling `#lazy` will return a deferred `https://rubyapi.org/3.1/o/enumerator/lazy`
53+
.lazy
54+
# each successive calls to methods that return another `enumerable` will not consume the stream
55+
# but rather, return a transformed stream. (see link above)
56+
.select do |completion|
57+
completion.object == :text_completion
58+
end
59+
.flat_map do |completion|
60+
completion.choices
61+
end
62+
63+
# prints the suspended intermediary stream
64+
pp(stream_of_choices)
65+
# beware that if the intermediary stream is not used, a call to `stream.close` is required
66+
# to release the underlying connection
67+
68+
# method calls that do not return another `enumerable` will consume the intermediary stream
69+
# and perform cleanup
70+
stream_of_choices.each do |choice|
71+
pp(choice)
72+
end
73+
74+
# at this point the stream has been consumed already, so it will return an empty array
75+
pp(stream_of_choices.to_a)
76+
end

examples/demo.rb

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
#!/usr/bin/env -S -- ruby
2+
# frozen_string_literal: true
3+
# typed: strong
4+
5+
require_relative "../lib/openai"
6+
7+
# gets API Key from environment variable `OPENAI_API_KEY`
8+
client = OpenAI::Client.new
9+
10+
begin
11+
# Non-streaming:
12+
pp("----- standard request -----")
13+
14+
completion = client.chat.completions.create(
15+
model: "gpt-4",
16+
messages: [
17+
{
18+
role: "user",
19+
content: "Say this is a test"
20+
}
21+
]
22+
)
23+
24+
pp(completion.choices.first&.message&.content)
25+
end
26+
27+
begin
28+
# Streaming:
29+
pp("----- streaming request -----")
30+
31+
stream = client.chat.completions.create_streaming(
32+
model: "gpt-4",
33+
messages: [
34+
{
35+
role: "user",
36+
content: "How do I output all files in a directory using Python?"
37+
}
38+
]
39+
)
40+
41+
stream.each do |chunk|
42+
next if chunk.choices.to_a.empty?
43+
44+
pp(chunk.choices.first&.delta&.content)
45+
end
46+
end

examples/demo_sorbet.rb

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
#!/usr/bin/env -S -- ruby
2+
# frozen_string_literal: true
3+
# typed: strong
4+
5+
require_relative "../lib/openai"
6+
7+
# gets API Key from environment variable `OPENAI_API_KEY`
8+
client = OpenAI::Client.new
9+
10+
begin
11+
pp("----- named arguments in sorbet -----")
12+
13+
# the method signature in sorbet has named arguments
14+
# the following example type checks.
15+
completion = client.chat.completions.create(
16+
messages: [{role: :user, content: "Say this is a test"}],
17+
model: "gpt-4"
18+
)
19+
20+
pp(completion.choices.first&.message&.content)
21+
end
22+
23+
24+
begin
25+
pp("----- trying to use params class in sorbet -----")
26+
27+
params = OpenAI::Models::Chat::CompletionCreateParams.new(
28+
messages: [{role: :user, content: "Say this is a test again"}],
29+
model: "gpt-4"
30+
)
31+
32+
# if you have sorbet LSP enabled, and uncomment the two lines below
33+
# you will see a red squiggly line on `params` due to a quirk of the sorbet type system.
34+
#
35+
# this file will still infact, run correctly as uncommented.
36+
37+
# completion = client.chat.completions.create(params)
38+
# pp(completion.choices.first&.message&.content)
39+
end
40+
41+
begin
42+
pp("----- using params class correctly in sorbet -----")
43+
44+
params = OpenAI::Models::Chat::CompletionCreateParams.new(
45+
messages: [{role: :user, content: "Say this is a test yet again"}],
46+
model: "gpt-4"
47+
)
48+
49+
# notice the `**` operator, it allows you to pass a parameter's class
50+
# into compatible methods that have named arguments
51+
completion = client.chat.completions.create(**params)
52+
53+
pp(completion.choices.first&.message&.content)
54+
end

examples/picture.rb

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
#!/usr/bin/env -S -- ruby
2+
# frozen_string_literal: true
3+
# typed: strong
4+
5+
require_relative "../lib/openai"
6+
7+
# gets API Key from environment variable `OPENAI_API_KEY`
8+
client = OpenAI::Client.new
9+
10+
prompt = "An astronaut lounging in a tropical resort in space, pixel art"
11+
12+
# Generate an image based on the prompt
13+
response = client.images.generate(prompt: prompt)
14+
15+
# Prints response containing a URL link to image
16+
pp(response)

examples/streaming.rb

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
#!/usr/bin/env -S -- ruby
2+
# frozen_string_literal: true
3+
# typed: strong
4+
5+
require_relative "../lib/openai"
6+
7+
# gets API Key from environment variable `OPENAI_API_KEY`
8+
client = OpenAI::Client.new
9+
10+
begin
11+
pp("----- streaming 101 -----")
12+
13+
stream = client.completions.create_streaming(
14+
model: :"gpt-3.5-turbo-instruct",
15+
prompt: "1,2,3,",
16+
max_tokens: 5,
17+
temperature: 0.0
18+
)
19+
20+
# calling `#each` will always clean up the stream, even if an error is thrown inside the `#each` block.
21+
stream.each do |data|
22+
pp(data)
23+
24+
# it is possible to exit out of the `#each` loop early, this will also clean up the stream for you.
25+
if data.choices.size > 2
26+
pp("too many choices")
27+
break
28+
end
29+
end
30+
31+
# once the stream has been exhausted, no more chunks will be produced.
32+
stream.each do
33+
pp("This will never run")
34+
end
35+
end
36+
37+
begin
38+
pp("----- manual closing of stream -----")
39+
40+
stream = client.completions.create_streaming(
41+
model: :"gpt-3.5-turbo-instruct",
42+
prompt: "1,2,3,",
43+
max_tokens: 5,
44+
temperature: 0.0
45+
)
46+
47+
# `stream` need to be manually closed if it is not consumed
48+
stream.close
49+
end

0 commit comments

Comments
 (0)