Skip to content

Commit f162ab6

Browse files
authored
Merge pull request #4 from scientist-labs/simplecov
Add SimpleCov and improve test coverage
2 parents 3dbf637 + 3087e7e commit f162ab6

File tree

4 files changed

+158
-0
lines changed

4 files changed

+158
-0
lines changed

ruby_llm-red_candle.gemspec

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,4 +44,5 @@ Gem::Specification.new do |spec|
4444
spec.add_development_dependency "rspec", "~> 3.12"
4545
spec.add_development_dependency "rubocop", "~> 1.0"
4646
spec.add_development_dependency "rubocop-rspec", "~> 3.0"
47+
spec.add_development_dependency "simplecov", "~> 0.22"
4748
end

spec/ruby_llm/red_candle/chat_spec.rb

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,59 @@
282282
expect(final_chunk).to be_a(RubyLLM::Chunk)
283283
expect(final_chunk.content).to eq("")
284284
end
285+
286+
it "returns a Message with accumulated content and token estimates" do
287+
tokens = %w[Hello world !]
288+
289+
allow(mock_model).to receive(:generate_stream) do |_prompt, config:, &block|
290+
tokens.each { |token| block.call(token) }
291+
end
292+
293+
payload = {
294+
messages: messages,
295+
model: "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF",
296+
temperature: 0.7
297+
}
298+
299+
result = provider.perform_streaming_completion!(payload) { |_chunk| }
300+
301+
expect(result).to be_a(RubyLLM::Message)
302+
expect(result.role).to eq(:assistant)
303+
expect(result.content).to eq("Helloworld!")
304+
expect(result.model_id).to eq("TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF")
305+
expect(result.input_tokens).to be_a(Integer)
306+
expect(result.output_tokens).to be_a(Integer)
307+
end
308+
309+
it "wraps streaming errors with helpful messages" do
310+
allow(mock_model).to receive(:generate_stream).and_raise(StandardError, "stream error")
311+
312+
payload = {
313+
messages: messages,
314+
model: "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF",
315+
temperature: 0.7
316+
}
317+
318+
expect { provider.perform_streaming_completion!(payload) { |_| } }.to raise_error(
319+
RubyLLM::Error,
320+
/Generation failed for TheBloke\/TinyLlama.*stream error/
321+
)
322+
end
323+
324+
it "provides helpful message for OOM errors during streaming" do
325+
allow(mock_model).to receive(:generate_stream).and_raise(StandardError, "out of memory")
326+
327+
payload = {
328+
messages: messages,
329+
model: "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF",
330+
temperature: 0.7
331+
}
332+
333+
expect { provider.perform_streaming_completion!(payload) { |_| } }.to raise_error(
334+
RubyLLM::Error,
335+
/Out of memory.*Try using a smaller model/m
336+
)
337+
end
285338
end
286339

287340
describe "message formatting" do
Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
# frozen_string_literal: true
2+
3+
require "spec_helper"
4+
5+
RSpec.describe RubyLLM::RedCandle::Streaming do
6+
let(:config) { RubyLLM::Configuration.new }
7+
let(:provider) { RubyLLM::RedCandle::Provider.new(config) }
8+
let(:mock_model) { instance_double(Candle::LLM) }
9+
10+
before do
11+
allow(provider).to receive(:ensure_model_loaded!).and_return(mock_model)
12+
allow(mock_model).to receive(:respond_to?).with(:apply_chat_template).and_return(true)
13+
allow(mock_model).to receive(:apply_chat_template).and_return("formatted prompt")
14+
end
15+
16+
describe "#stream" do
17+
let(:messages) { [{ role: "user", content: "Test message" }] }
18+
19+
context "when stream: true" do
20+
it "calls perform_streaming_completion!" do
21+
tokens = %w[Hello world]
22+
23+
allow(mock_model).to receive(:generate_stream) do |_prompt, config:, &block|
24+
tokens.each { |token| block.call(token) }
25+
end
26+
27+
payload = {
28+
messages: messages,
29+
model: "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF",
30+
temperature: 0.7,
31+
stream: true
32+
}
33+
34+
chunks = []
35+
provider.stream(payload) { |chunk| chunks << chunk }
36+
37+
# Should receive token chunks plus final empty chunk
38+
expect(chunks.size).to eq(3)
39+
expect(chunks[0].content).to eq("Hello")
40+
expect(chunks[1].content).to eq("world")
41+
expect(chunks[2].content).to eq("")
42+
end
43+
end
44+
45+
context "when stream: false" do
46+
it "yields a single chunk with complete result" do
47+
allow(mock_model).to receive(:generate).and_return("Complete response")
48+
49+
payload = {
50+
messages: messages,
51+
model: "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF",
52+
temperature: 0.7,
53+
stream: false
54+
}
55+
56+
chunks = []
57+
provider.stream(payload) { |chunk| chunks << chunk }
58+
59+
expect(chunks.size).to eq(1)
60+
expect(chunks[0][:content]).to eq("Complete response")
61+
expect(chunks[0][:role]).to eq("assistant")
62+
end
63+
end
64+
65+
context "when stream is nil (defaults to non-streaming)" do
66+
it "yields a single chunk with complete result" do
67+
allow(mock_model).to receive(:generate).and_return("Complete response")
68+
69+
payload = {
70+
messages: messages,
71+
model: "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF",
72+
temperature: 0.7
73+
}
74+
75+
chunks = []
76+
provider.stream(payload) { |chunk| chunks << chunk }
77+
78+
expect(chunks.size).to eq(1)
79+
expect(chunks[0][:content]).to eq("Complete response")
80+
end
81+
end
82+
end
83+
84+
describe "#stream_processor" do
85+
it "returns nil for compatibility" do
86+
expect(provider.send(:stream_processor)).to be_nil
87+
end
88+
end
89+
90+
describe "#process_stream_response" do
91+
it "returns the response unchanged" do
92+
response = { content: "test" }
93+
expect(provider.send(:process_stream_response, response)).to eq(response)
94+
end
95+
end
96+
end

spec/spec_helper.rb

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,13 @@
11
# frozen_string_literal: true
22

3+
require "simplecov"
4+
SimpleCov.start do
5+
add_filter "/spec/"
6+
add_group "Provider", "lib/ruby_llm/red_candle"
7+
enable_coverage :branch
8+
minimum_coverage line: 80, branch: 70
9+
end
10+
311
require "bundler/setup"
412
require "ruby_llm-red_candle"
513

0 commit comments

Comments
 (0)