|
| 1 | +# frozen_string_literal: true |
| 2 | + |
| 3 | +require_relative "endpoint_compliance" |
| 4 | + |
| 5 | +class OllamaMock < EndpointMock |
| 6 | + def response(content) |
| 7 | + message_content = { content: content } |
| 8 | + |
| 9 | + { |
| 10 | + created_at: "2024-09-25T06:47:21.283028Z", |
| 11 | + model: "llama3.1", |
| 12 | + message: { role: "assistant" }.merge(message_content), |
| 13 | + done: true, |
| 14 | + done_reason: "stop", |
| 15 | + total_duration: 7_639_718_541, |
| 16 | + load_duration: 299_886_663, |
| 17 | + prompt_eval_count: 18, |
| 18 | + prompt_eval_duration: 220_447_000, |
| 19 | + eval_count: 18, |
| 20 | + eval_duration: 220_447_000, |
| 21 | + } |
| 22 | + end |
| 23 | + |
| 24 | + def stub_response(prompt, response_text) |
| 25 | + WebMock |
| 26 | + .stub_request(:post, "http://api.ollama.ai/api/chat") |
| 27 | + .with(body: request_body(prompt)) |
| 28 | + .to_return(status: 200, body: JSON.dump(response(response_text))) |
| 29 | + end |
| 30 | + |
| 31 | + def stream_line(delta) |
| 32 | + message_content = { content: delta } |
| 33 | + |
| 34 | + +{ |
| 35 | + model: "llama3.1", |
| 36 | + created_at: "2024-09-25T06:47:21.283028Z", |
| 37 | + message: { role: "assistant" }.merge(message_content), |
| 38 | + done: false, |
| 39 | + }.to_json |
| 40 | + end |
| 41 | + |
| 42 | + def stub_raw(chunks) |
| 43 | + WebMock.stub_request(:post, "http://api.ollama.ai/api/chat").to_return( |
| 44 | + status: 200, |
| 45 | + body: chunks, |
| 46 | + ) |
| 47 | + end |
| 48 | + |
| 49 | + def stub_streamed_response(prompt, deltas) |
| 50 | + chunks = deltas.each_with_index.map { |_, index| stream_line(deltas[index]) } |
| 51 | + |
| 52 | + chunks = |
| 53 | + ( |
| 54 | + chunks.join("\n\n") << { |
| 55 | + model: "llama3.1", |
| 56 | + created_at: "2024-09-25T06:47:21.283028Z", |
| 57 | + message: { |
| 58 | + role: "assistant", |
| 59 | + content: "", |
| 60 | + }, |
| 61 | + done: true, |
| 62 | + done_reason: "stop", |
| 63 | + total_duration: 7_639_718_541, |
| 64 | + load_duration: 299_886_663, |
| 65 | + prompt_eval_count: 18, |
| 66 | + prompt_eval_duration: 220_447_000, |
| 67 | + eval_count: 18, |
| 68 | + eval_duration: 220_447_000, |
| 69 | + }.to_json |
| 70 | + ).split("") |
| 71 | + |
| 72 | + WebMock |
| 73 | + .stub_request(:post, "http://api.ollama.ai/api/chat") |
| 74 | + .with(body: request_body(prompt, stream: true)) |
| 75 | + .to_return(status: 200, body: chunks) |
| 76 | + |
| 77 | + yield if block_given? |
| 78 | + end |
| 79 | + |
| 80 | + def request_body(prompt, stream: false) |
| 81 | + model.default_options.merge(messages: prompt).tap { |b| b[:stream] = false if !stream }.to_json |
| 82 | + end |
| 83 | +end |
| 84 | + |
| 85 | +RSpec.describe DiscourseAi::Completions::Endpoints::Ollama do |
| 86 | + subject(:endpoint) { described_class.new(model) } |
| 87 | + |
| 88 | + fab!(:user) |
| 89 | + fab!(:model) { Fabricate(:ollama_model) } |
| 90 | + |
| 91 | + let(:ollama_mock) { OllamaMock.new(endpoint) } |
| 92 | + |
| 93 | + let(:compliance) do |
| 94 | + EndpointsCompliance.new(self, endpoint, DiscourseAi::Completions::Dialects::Ollama, user) |
| 95 | + end |
| 96 | + |
| 97 | + describe "#perform_completion!" do |
| 98 | + context "when using regular mode" do |
| 99 | + it "completes a trivial prompt and logs the response" do |
| 100 | + compliance.regular_mode_simple_prompt(ollama_mock) |
| 101 | + end |
| 102 | + end |
| 103 | + end |
| 104 | + |
| 105 | + describe "when using streaming mode" do |
| 106 | + context "with simpel prompts" do |
| 107 | + it "completes a trivial prompt and logs the response" do |
| 108 | + compliance.streaming_mode_simple_prompt(ollama_mock) |
| 109 | + end |
| 110 | + end |
| 111 | + end |
| 112 | +end |
0 commit comments