|
7 | 7 |
|
8 | 8 | client = OpenAI::Client.new
|
9 | 9 |
|
10 |
| -# Request 1: Create a new streaming response with store=true |
11 |
| -puts "Creating a new streaming response..." |
12 |
| -stream = client.responses.stream( |
13 |
| - model: "o4-mini", |
14 |
| - input: "Tell me a short story about a robot learning to paint.", |
15 |
| - instructions: "You are a creative storyteller.", |
16 |
| - background: true |
17 |
| -) |
18 |
| - |
19 |
| -events = [] |
20 |
| -response_id = "" |
21 |
| - |
22 |
| -stream.each do |event| |
23 |
| - events << event |
24 |
| - puts "Event from initial stream: #{event.type} (seq: #{event.sequence_number})" |
25 |
| - case event |
26 |
| - |
27 |
| - when OpenAI::Models::Responses::ResponseCreatedEvent |
28 |
| - response_id = event.response.id if response_id.empty? |
29 |
| - puts("Captured response ID: #{response_id}") |
| 10 | +begin |
| 11 | + puts "----- resuming stream from a previous response -----" |
| 12 | + |
| 13 | + # Request 1: Create a new streaming response with background=true |
| 14 | + puts "Creating a new streaming response..." |
| 15 | + stream = client.responses.stream( |
| 16 | + model: "o4-mini", |
| 17 | + input: "Tell me a short story about a robot learning to paint.", |
| 18 | + instructions: "You are a creative storyteller.", |
| 19 | + background: true |
| 20 | + ) |
| 21 | + |
| 22 | + events = [] |
| 23 | + response_id = "" |
| 24 | + |
| 25 | + stream.each do |event| |
| 26 | + events << event |
| 27 | + puts "Event from initial stream: #{event.type} (seq: #{event.sequence_number})" |
| 28 | + case event |
| 29 | + |
| 30 | + when OpenAI::Models::Responses::ResponseCreatedEvent |
| 31 | + response_id = event.response.id if response_id.empty? |
| 32 | + puts("Captured response ID: #{response_id}") |
| 33 | + end |
| 34 | + |
| 35 | + # Simulate stopping after a few events |
| 36 | + if events.length >= 5 |
| 37 | + puts "Terminating after #{events.length} events" |
| 38 | + break |
| 39 | + end |
30 | 40 | end
|
31 | 41 |
|
32 |
| - # Simulate stopping after a few events |
33 |
| - if events.length >= 5 |
34 |
| - puts "Terminating after #{events.length} events" |
35 |
| - break |
| 42 | + puts "Collected #{events.length} events" |
| 43 | + puts "Response ID: #{response_id}" |
| 44 | + puts "Last event sequence number: #{events.last.sequence_number}.\n" |
| 45 | + |
| 46 | + # Give the background response some time to process more events. |
| 47 | + puts "Waiting a moment for the background response to progress...\n" |
| 48 | + sleep(3) |
| 49 | + |
| 50 | + # Request 2: Resume the stream using the captured response_id. |
| 51 | + puts |
| 52 | + puts "Resuming stream from sequence #{events.last.sequence_number}..." |
| 53 | + |
| 54 | + resumed_stream = client.responses.stream( |
| 55 | + previous_response_id: response_id, |
| 56 | + starting_after: events.last.sequence_number |
| 57 | + ) |
| 58 | + |
| 59 | + resumed_events = [] |
| 60 | + resumed_stream.each do |event| |
| 61 | + resumed_events << event |
| 62 | + puts "Event from resumed stream: #{event.type} (seq: #{event.sequence_number})" |
| 63 | + # Stop when we get the completed event or collect enough events. |
| 64 | + if event.is_a?(OpenAI::Models::Responses::ResponseCompletedEvent) |
| 65 | + puts "Response completed!" |
| 66 | + break |
| 67 | + end |
| 68 | + |
| 69 | + break if resumed_events.length >= 10 |
| 70 | + end |
| 71 | + |
| 72 | + puts "Collected #{resumed_events.length} additional events" |
| 73 | + |
| 74 | + # Show that we properly resumed from where we left off. |
| 75 | + if resumed_events.any? |
| 76 | + first_resumed_event = resumed_events.first |
| 77 | + last_initial_event = events.last |
| 78 | + puts "First resumed event sequence: #{first_resumed_event.sequence_number}" |
| 79 | + puts "Should be greater than last initial event: #{last_initial_event.sequence_number}" |
36 | 80 | end
|
37 | 81 | end
|
38 | 82 |
|
39 |
| -stream.close |
40 |
| - |
41 |
| -puts |
42 |
| -puts "Collected #{events.length} events" |
43 |
| -puts "Response ID: #{response_id}" |
44 |
| -puts "Last event sequence number: #{events.last.sequence_number}.\n" |
45 |
| - |
46 |
| -# Give the background response some time to process more events. |
47 |
| -puts "Waiting a moment for the background response to progress...\n" |
48 |
| -sleep(2) |
49 |
| - |
50 |
| -# Request 2: Resume the stream using the captured response_id. |
51 |
| -puts "Resuming stream from sequence #{events.last.sequence_number}..." |
52 |
| - |
53 |
| -resumed_stream = client.responses.stream( |
54 |
| - previous_response_id: response_id, |
55 |
| - starting_after: events.last.sequence_number |
56 |
| -) |
57 |
| - |
58 |
| -resumed_events = [] |
59 |
| -resumed_stream.each do |event| |
60 |
| - resumed_events << event |
61 |
| - puts "Event from resumed stream: #{event.type} (seq: #{event.sequence_number})" |
62 |
| - # Stop when we get the completed event or collect enough events. |
63 |
| - if event.is_a?(OpenAI::Models::Responses::ResponseCompletedEvent) |
64 |
| - puts "Response completed!" |
65 |
| - break |
| 83 | +begin |
| 84 | + puts "\n----- resuming stream with structured outputs -----" |
| 85 | + |
| 86 | + class Step < OpenAI::BaseModel |
| 87 | + required :explanation, String |
| 88 | + required :output, String |
66 | 89 | end
|
67 | 90 |
|
68 |
| - break if resumed_events.length >= 10 |
69 |
| -end |
| 91 | + class MathResponse < OpenAI::BaseModel |
| 92 | + required :steps, OpenAI::ArrayOf[Step] |
| 93 | + required :final_answer, String |
| 94 | + end |
| 95 | + |
| 96 | + puts "Creating a background streaming response with structured output..." |
| 97 | + stream = client.responses.stream( |
| 98 | + input: "solve 8x + 31 = 2", |
| 99 | + model: "gpt-4o-2024-08-06", |
| 100 | + text: MathResponse, |
| 101 | + background: true |
| 102 | + ) |
70 | 103 |
|
71 |
| -puts "\nCollected #{resumed_events.length} additional events" |
| 104 | + events = [] |
| 105 | + response_id = "" |
| 106 | + |
| 107 | + stream.each do |event| |
| 108 | + events << event |
| 109 | + |
| 110 | + case event |
| 111 | + when OpenAI::Models::Responses::ResponseCreatedEvent |
| 112 | + response_id = event.response.id if response_id.empty? |
| 113 | + end |
| 114 | + |
| 115 | + if events.length >= 5 |
| 116 | + break |
| 117 | + end |
| 118 | + end |
| 119 | + |
| 120 | + puts "Waiting for the background response to complete...\n" |
| 121 | + sleep(3) |
| 122 | + |
| 123 | + puts |
| 124 | + puts "Resuming stream from sequence #{events.last.sequence_number}..." |
| 125 | + |
| 126 | + resumed_stream = client.responses.stream( |
| 127 | + previous_response_id: response_id, |
| 128 | + starting_after: events.last.sequence_number, |
| 129 | + # NOTE: You must pass the structured output format when resuming to access parsed |
| 130 | + # outputs in the resumed stream. |
| 131 | + text: MathResponse |
| 132 | + ) |
| 133 | + |
| 134 | + resumed_stream.each do |event| |
| 135 | + case event |
| 136 | + when OpenAI::Streaming::ResponseTextDeltaEvent |
| 137 | + print(event.delta) |
| 138 | + when OpenAI::Streaming::ResponseTextDoneEvent |
| 139 | + puts |
| 140 | + puts("--- Parsed object from resumed stream ---") |
| 141 | + pp(event.parsed) |
| 142 | + when OpenAI::Models::Responses::ResponseCompletedEvent |
| 143 | + puts("Response completed.") |
| 144 | + break |
| 145 | + end |
| 146 | + end |
72 | 147 |
|
73 |
| -# Show that we properly resumed from where we left off. |
74 |
| -if resumed_events.any? |
75 |
| - first_resumed_event = resumed_events.first |
76 |
| - last_initial_event = events.last |
77 |
| - puts "First resumed event sequence: #{first_resumed_event.sequence_number}" |
78 |
| - puts "Should be greater than last initial event: #{last_initial_event.sequence_number}" |
| 148 | + puts "\nFinal response parsed outputs:" |
| 149 | + response = resumed_stream.get_final_response |
| 150 | + response |
| 151 | + .output |
| 152 | + .flat_map { _1.content } |
| 153 | + .each do |content| |
| 154 | + pp(content.parsed) |
| 155 | + end |
79 | 156 | end
|
0 commit comments