|
| 1 | +#!/usr/bin/env ruby |
| 2 | +# frozen_string_literal: true |
| 3 | + |
| 4 | +require_relative "../../lib/openai" |
| 5 | + |
| 6 | +# This example demonstrates how to resume a streaming response. |
| 7 | + |
| 8 | +client = OpenAI::Client.new |
| 9 | + |
| 10 | +# Request 1: Create a new streaming response with store=true |
| 11 | +puts "Creating a new streaming response..." |
| 12 | +stream = client.responses.stream( |
| 13 | + model: "o4-mini", |
| 14 | + input: "Tell me a short story about a robot learning to paint.", |
| 15 | + instructions: "You are a creative storyteller.", |
| 16 | + background: true |
| 17 | +) |
| 18 | + |
| 19 | +events = [] |
| 20 | +response_id = "" |
| 21 | + |
| 22 | +stream.each do |event| |
| 23 | + events << event |
| 24 | + puts "Event from initial stream: #{event.type} (seq: #{event.sequence_number})" |
| 25 | + case event |
| 26 | + |
| 27 | + when OpenAI::Models::Responses::ResponseCreatedEvent |
| 28 | + response_id = event.response.id if response_id.empty? |
| 29 | + puts("Captured response ID: #{response_id}") |
| 30 | + end |
| 31 | + |
| 32 | + # Simulate stopping after a few events |
| 33 | + if events.length >= 5 |
| 34 | + puts "Terminating after #{events.length} events" |
| 35 | + break |
| 36 | + end |
| 37 | +end |
| 38 | + |
| 39 | +stream.close |
| 40 | + |
| 41 | +puts |
| 42 | +puts "Collected #{events.length} events" |
| 43 | +puts "Response ID: #{response_id}" |
| 44 | +puts "Last event sequence number: #{events.last.sequence_number}.\n" |
| 45 | + |
| 46 | +# Give the background response some time to process more events. |
| 47 | +puts "Waiting a moment for the background response to progress...\n" |
| 48 | +sleep(2) |
| 49 | + |
| 50 | +# Request 2: Resume the stream using the captured response_id. |
| 51 | +puts "Resuming stream from sequence #{events.last.sequence_number}..." |
| 52 | + |
| 53 | +resumed_stream = client.responses.stream( |
| 54 | + previous_response_id: response_id, |
| 55 | + starting_after: events.last.sequence_number |
| 56 | +) |
| 57 | + |
| 58 | +resumed_events = [] |
| 59 | +resumed_stream.each do |event| |
| 60 | + resumed_events << event |
| 61 | + puts "Event from resumed stream: #{event.type} (seq: #{event.sequence_number})" |
| 62 | + # Stop when we get the completed event or collect enough events. |
| 63 | + if event.is_a?(OpenAI::Models::Responses::ResponseCompletedEvent) |
| 64 | + puts "Response completed!" |
| 65 | + break |
| 66 | + end |
| 67 | + |
| 68 | + break if resumed_events.length >= 10 |
| 69 | +end |
| 70 | + |
| 71 | +puts "\nCollected #{resumed_events.length} additional events" |
| 72 | + |
| 73 | +# Show that we properly resumed from where we left off. |
| 74 | +if resumed_events.any? |
| 75 | + first_resumed_event = resumed_events.first |
| 76 | + last_initial_event = events.last |
| 77 | + puts "First resumed event sequence: #{first_resumed_event.sequence_number}" |
| 78 | + puts "Should be greater than last initial event: #{last_initial_event.sequence_number}" |
| 79 | +end |
0 commit comments