|
619 | 619 | end |
620 | 620 | end |
621 | 621 |
|
| 622 | + context "when streaming with a block" do |
| 623 | + let(:messages) { [{role: "user", content: "Tell me a joke"}] } |
| 624 | + let(:stream_chunks) do |
| 625 | + now = Time.now.to_i # Use a single timestamp for simplicity in the mock |
| 626 | + model_name = "gpt-4o-mini" # Define model name once |
| 627 | + chunk_id = "chatcmpl-stream-test" # Use a consistent ID |
| 628 | + |
| 629 | + [ |
| 630 | + {"id" => chunk_id, "object" => "chat.completion.chunk", "created" => now, "model" => model_name, "choices" => [{"index" => 0, "delta" => {"role" => "assistant"}}]}, |
| 631 | + {"id" => chunk_id, "object" => "chat.completion.chunk", "created" => now, "model" => model_name, "choices" => [{"index" => 0, "delta" => {"content" => "Why did the chicken cross the road?"}}]}, |
| 632 | + {"id" => chunk_id, "object" => "chat.completion.chunk", "created" => now, "model" => model_name, "choices" => [{"index" => 0, "delta" => {}, "finish_reason" => "stop"}]}, |
| 633 | + {"id" => chunk_id, "object" => "chat.completion.chunk", "created" => now, "model" => model_name, "usage" => {"prompt_tokens" => 5, "completion_tokens" => 10, "total_tokens" => 15}} |
| 634 | + ] |
| 635 | + end |
| 636 | + let(:collected_yielded_chunks) { [] } |
| 637 | + let(:streaming_block) { proc { |chunk| collected_yielded_chunks << chunk } } |
| 638 | + let(:expected_completion) { "Why did the chicken cross the road?" } |
| 639 | + |
| 640 | + before do |
| 641 | + allow(subject.client).to receive(:chat) do |parameters:| |
| 642 | + expect(parameters[:stream]).to be_a(Proc) |
| 643 | + expect(parameters[:stream_options]).to eq({include_usage: true}) |
| 644 | + stream_chunks.each { |chunk| parameters[:stream].call(chunk, chunk.to_json.bytesize) } |
| 645 | + nil # Simulate nil return after streaming |
| 646 | + end |
| 647 | + end |
| 648 | + |
| 649 | + it "does not raise NoMethodError and returns correctly assembled response" do |
| 650 | + expect { |
| 651 | + response = subject.chat(messages: messages, &streaming_block) |
| 652 | + expect(response).to be_a(Langchain::LLM::OpenAIResponse) |
| 653 | + expect(response.chat_completion).to eq(expected_completion) |
| 654 | + expect(response.role).to eq("assistant") |
| 655 | + expect(response.prompt_tokens).to eq(5) |
| 656 | + expect(response.completion_tokens).to eq(10) |
| 657 | + expect(response.total_tokens).to eq(15) |
| 658 | + }.not_to raise_error |
| 659 | + end |
| 660 | + |
| 661 | + it "yields the processed delta chunks to the block" do |
| 662 | + subject.chat(messages: messages, &streaming_block) |
| 663 | + expected_yielded_chunks = stream_chunks.map { |c| c.dig("choices", 0) || {} } |
| 664 | + expect(collected_yielded_chunks).to eq(expected_yielded_chunks) |
| 665 | + expect(collected_yielded_chunks.map { |c| c.dig("delta", "content") }.compact.join).to eq(expected_completion) |
| 666 | + end |
| 667 | + end |
| 668 | + |
622 | 669 | context "with streaming" do |
623 | 670 | let(:streamed_response_chunk) do |
624 | 671 | { |
|
0 commit comments