Skip to content

Commit ab5e94a

Browse files
authored
fix: Handle nil response in OpenAI LLM streaming (#953)
When using the `chat` method of `Langchain::LLM::OpenAI` with a block for streaming responses, the underlying `ruby-openai` client returns `nil` after the stream completes. The `with_api_error_handling` method previously attempted to call `.empty?` on this `nil` response, resulting in a `NoMethodError`. This commit fixes the issue by adding a `response.nil? ||` check before calling `.empty?` in `with_api_error_handling`. This prevents the error when the client yields control back after finishing the stream. Adds a new RSpec test case specifically for the streaming scenario. The test mocks the client to return `nil` after streaming and verifies that no error is raised and the final response object is correctly assembled from the collected chunks.
1 parent 1f5b797 commit ab5e94a

File tree

2 files changed

+48
-1
lines changed

2 files changed

+48
-1
lines changed

lib/langchain/llm/openai.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ def reset_response_chunks
173173

174174
def with_api_error_handling
175175
response = yield
176-
return if response.empty?
176+
return if response.nil? || response.empty?
177177

178178
raise Langchain::LLM::ApiError.new "OpenAI API error: #{response.dig("error", "message")}" if response&.dig("error")
179179

spec/langchain/llm/openai_spec.rb

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -619,6 +619,53 @@
619619
end
620620
end
621621

622+
context "when streaming with a block" do
623+
let(:messages) { [{role: "user", content: "Tell me a joke"}] }
624+
let(:stream_chunks) do
625+
now = Time.now.to_i # Use a single timestamp for simplicity in the mock
626+
model_name = "gpt-4o-mini" # Define model name once
627+
chunk_id = "chatcmpl-stream-test" # Use a consistent ID
628+
629+
[
630+
{"id" => chunk_id, "object" => "chat.completion.chunk", "created" => now, "model" => model_name, "choices" => [{"index" => 0, "delta" => {"role" => "assistant"}}]},
631+
{"id" => chunk_id, "object" => "chat.completion.chunk", "created" => now, "model" => model_name, "choices" => [{"index" => 0, "delta" => {"content" => "Why did the chicken cross the road?"}}]},
632+
{"id" => chunk_id, "object" => "chat.completion.chunk", "created" => now, "model" => model_name, "choices" => [{"index" => 0, "delta" => {}, "finish_reason" => "stop"}]},
633+
{"id" => chunk_id, "object" => "chat.completion.chunk", "created" => now, "model" => model_name, "usage" => {"prompt_tokens" => 5, "completion_tokens" => 10, "total_tokens" => 15}}
634+
]
635+
end
636+
let(:collected_yielded_chunks) { [] }
637+
let(:streaming_block) { proc { |chunk| collected_yielded_chunks << chunk } }
638+
let(:expected_completion) { "Why did the chicken cross the road?" }
639+
640+
before do
641+
allow(subject.client).to receive(:chat) do |parameters:|
642+
expect(parameters[:stream]).to be_a(Proc)
643+
expect(parameters[:stream_options]).to eq({include_usage: true})
644+
stream_chunks.each { |chunk| parameters[:stream].call(chunk, chunk.to_json.bytesize) }
645+
nil # Simulate nil return after streaming
646+
end
647+
end
648+
649+
it "does not raise NoMethodError and returns correctly assembled response" do
650+
expect {
651+
response = subject.chat(messages: messages, &streaming_block)
652+
expect(response).to be_a(Langchain::LLM::OpenAIResponse)
653+
expect(response.chat_completion).to eq(expected_completion)
654+
expect(response.role).to eq("assistant")
655+
expect(response.prompt_tokens).to eq(5)
656+
expect(response.completion_tokens).to eq(10)
657+
expect(response.total_tokens).to eq(15)
658+
}.not_to raise_error
659+
end
660+
661+
it "yields the processed delta chunks to the block" do
662+
subject.chat(messages: messages, &streaming_block)
663+
expected_yielded_chunks = stream_chunks.map { |c| c.dig("choices", 0) || {} }
664+
expect(collected_yielded_chunks).to eq(expected_yielded_chunks)
665+
expect(collected_yielded_chunks.map { |c| c.dig("delta", "content") }.compact.join).to eq(expected_completion)
666+
end
667+
end
668+
622669
context "with streaming" do
623670
let(:streamed_response_chunk) do
624671
{

0 commit comments

Comments
 (0)