Skip to content

Commit 60a2061

Browse files
renfredxhms-jpq
authored andcommitted
feat: responses streaming helpers (#721)
* add responses streaming helpers * refactor tests * fix helpers.md * add param canonization step to stream * update README streaming example * support parsed responses in the final response * misc: PR touch ups --------- Co-authored-by: dogisgreat <[email protected]>
1 parent c60933d commit 60a2061

File tree

17 files changed

+1829
-82
lines changed

17 files changed

+1829
-82
lines changed

README.md

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -42,16 +42,14 @@ puts(chat_completion)
4242

4343
We provide support for streaming responses using Server-Sent Events (SSE).
4444

45-
**coming soon:** `openai.chat.completions.stream` will soon come with Python SDK-style higher-level streaming responses support.
46-
4745
```ruby
48-
stream = openai.chat.completions.stream_raw(
49-
messages: [{role: "user", content: "Say this is a test"}],
46+
stream = openai.responses.stream(
47+
input: "Write a haiku about OpenAI.",
5048
model: :"gpt-4.1"
5149
)
5250

53-
stream.each do |completion|
54-
puts(completion)
51+
stream.each do |event|
52+
puts(event.type)
5553
end
5654
```
5755

examples/responses/streaming_basic.rb

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
#!/usr/bin/env ruby
2+
# frozen_string_literal: true
3+
# typed: strict
4+
5+
require_relative "../../lib/openai"
6+
7+
client = OpenAI::Client.new
8+
9+
stream = client.responses.stream(
10+
input: "Write a haiku about OpenAI.",
11+
model: "gpt-4o-2024-08-06"
12+
)
13+
14+
stream.each do |event|
15+
case event
16+
when OpenAI::Streaming::ResponseTextDeltaEvent
17+
print(event.delta)
18+
when OpenAI::Streaming::ResponseTextDoneEvent
19+
puts("\n--------------------------")
20+
when OpenAI::Streaming::ResponseCompletedEvent
21+
puts("Response completed! (response id: #{event.response.id})")
22+
end
23+
end
Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
#!/usr/bin/env ruby
2+
# frozen_string_literal: true
3+
4+
require_relative "../../lib/openai"
5+
6+
# This example demonstrates how to resume a streaming response.
7+
8+
client = OpenAI::Client.new
9+
10+
# Request 1: Create a new streaming response with store=true
11+
puts "Creating a new streaming response..."
12+
stream = client.responses.stream(
13+
model: "o4-mini",
14+
input: "Tell me a short story about a robot learning to paint.",
15+
instructions: "You are a creative storyteller.",
16+
background: true
17+
)
18+
19+
events = []
20+
response_id = ""
21+
22+
stream.each do |event|
23+
events << event
24+
puts "Event from initial stream: #{event.type} (seq: #{event.sequence_number})"
25+
case event
26+
27+
when OpenAI::Models::Responses::ResponseCreatedEvent
28+
response_id = event.response.id if response_id.empty?
29+
puts("Captured response ID: #{response_id}")
30+
end
31+
32+
# Simulate stopping after a few events
33+
if events.length >= 5
34+
puts "Terminating after #{events.length} events"
35+
break
36+
end
37+
end
38+
39+
stream.close
40+
41+
puts
42+
puts "Collected #{events.length} events"
43+
puts "Response ID: #{response_id}"
44+
puts "Last event sequence number: #{events.last.sequence_number}.\n"
45+
46+
# Give the background response some time to process more events.
47+
puts "Waiting a moment for the background response to progress...\n"
48+
sleep(2)
49+
50+
# Request 2: Resume the stream using the captured response_id.
51+
puts "Resuming stream from sequence #{events.last.sequence_number}..."
52+
53+
resumed_stream = client.responses.stream(
54+
previous_response_id: response_id,
55+
starting_after: events.last.sequence_number
56+
)
57+
58+
resumed_events = []
59+
resumed_stream.each do |event|
60+
resumed_events << event
61+
puts "Event from resumed stream: #{event.type} (seq: #{event.sequence_number})"
62+
# Stop when we get the completed event or collect enough events.
63+
if event.is_a?(OpenAI::Models::Responses::ResponseCompletedEvent)
64+
puts "Response completed!"
65+
break
66+
end
67+
68+
break if resumed_events.length >= 10
69+
end
70+
71+
puts "\nCollected #{resumed_events.length} additional events"
72+
73+
# Show that we properly resumed from where we left off.
74+
if resumed_events.any?
75+
first_resumed_event = resumed_events.first
76+
last_initial_event = events.last
77+
puts "First resumed event sequence: #{first_resumed_event.sequence_number}"
78+
puts "Should be greater than last initial event: #{last_initial_event.sequence_number}"
79+
end
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
#!/usr/bin/env ruby
2+
# frozen_string_literal: true
3+
4+
require_relative "../../lib/openai"
5+
6+
# Defining structured output models.
7+
class Step < OpenAI::BaseModel
8+
required :explanation, String
9+
required :output, String
10+
end
11+
12+
class MathResponse < OpenAI::BaseModel
13+
required :steps, OpenAI::ArrayOf[Step]
14+
required :final_answer, String
15+
end
16+
17+
client = OpenAI::Client.new
18+
19+
stream = client.responses.stream(
20+
input: "solve 8x + 31 = 2",
21+
model: "gpt-4o-2024-08-06",
22+
text: MathResponse
23+
)
24+
25+
stream.each do |event|
26+
case event
27+
when OpenAI::Streaming::ResponseTextDeltaEvent
28+
print(event.delta)
29+
when OpenAI::Streaming::ResponseTextDoneEvent
30+
puts
31+
puts("--- Parsed object ---")
32+
pp(event.parsed)
33+
end
34+
end
35+
36+
response = stream.get_final_response
37+
38+
puts
39+
puts("----- parsed outputs from final response -----")
40+
response
41+
.output
42+
.flat_map { _1.content }
43+
.each do |content|
44+
# parsed is an instance of `MathResponse`
45+
pp(content.parsed)
46+
end

examples/responses/streaming_text.rb

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
#!/usr/bin/env ruby
2+
# frozen_string_literal: true
3+
# typed: strong
4+
5+
require_relative "../../lib/openai"
6+
7+
client = OpenAI::Client.new
8+
9+
stream = client.responses.stream(
10+
input: "Write a haiku about OpenAI.",
11+
model: "gpt-4o-2024-08-06"
12+
)
13+
14+
stream.text.each do |text|
15+
print(text)
16+
end
17+
18+
puts
19+
20+
# Get all of the text that was streamed with .get_output_text
21+
puts "Character count: #{stream.get_output_text.length}"

examples/responses/streaming_tools.rb

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
#!/usr/bin/env ruby
2+
# frozen_string_literal: true
3+
# typed: true
4+
5+
require_relative "../../lib/openai"
6+
7+
class DynamicValue < OpenAI::BaseModel
8+
required :column_name, String
9+
end
10+
11+
class Condition < OpenAI::BaseModel
12+
required :column, String
13+
required :operator, OpenAI::EnumOf[:eq, :gt, :lt, :le, :ge, :ne]
14+
required :value, OpenAI::UnionOf[String, Integer, DynamicValue]
15+
end
16+
17+
# you can assign `OpenAI::{...}` schema specifiers to a constant
18+
Columns = OpenAI::EnumOf[
19+
:id,
20+
:status,
21+
:expected_delivery_date,
22+
:delivered_at,
23+
:shipped_at,
24+
:ordered_at,
25+
:canceled_at
26+
]
27+
28+
class Query < OpenAI::BaseModel
29+
required :table_name, OpenAI::EnumOf[:orders, :customers, :products]
30+
required :columns, OpenAI::ArrayOf[Columns]
31+
required :conditions, OpenAI::ArrayOf[Condition]
32+
required :order_by, OpenAI::EnumOf[:asc, :desc]
33+
end
34+
35+
client = OpenAI::Client.new
36+
37+
stream = client.responses.stream(
38+
model: "gpt-4o-2024-08-06",
39+
input: "look up all my orders in november of last year that were fulfilled but not delivered on time",
40+
tools: [Query]
41+
)
42+
43+
stream.each do |event|
44+
case event
45+
when OpenAI::Streaming::ResponseFunctionCallArgumentsDeltaEvent
46+
puts("delta: #{event.delta}")
47+
puts("snapshot: #{event.snapshot}")
48+
end
49+
end
50+
51+
response = stream.get_final_response
52+
53+
puts
54+
puts("----- parsed outputs from final response -----")
55+
response
56+
.output
57+
.each do |output|
58+
case output
59+
when OpenAI::Models::Responses::ResponseFunctionToolCall
60+
# parsed is an instance of `Query`
61+
pp(output.parsed)
62+
end
63+
end

0 commit comments

Comments
 (0)