|
5 | 5 | RSpec.describe ProblemCheck::AiLlmStatus do |
6 | 6 | subject(:check) { described_class.new } |
7 | 7 |
|
8 | | - # let(:spec_model) do |
9 | | - # LlmModel.new( |
10 | | - # id: 50, |
11 | | - # display_name: "GPT-4 Turbo", |
12 | | - # name: "gpt-4-turbo", |
13 | | - # provider: "open_ai", |
14 | | - # tokenizer: "DiscourseAi::Tokenizer::OpenAiTokenizer", |
15 | | - # max_prompt_tokens: 131_072, |
16 | | - # api_key: "invalid", |
17 | | - # url: "https://api.openai.com/v1/chat/completions", |
18 | | - # ) |
19 | | - # end |
20 | | - |
21 | 8 | fab!(:llm_model) |
22 | 9 |
|
| 10 | + let(:post_url) { "https://api.openai.com/v1/chat/completions" } |
| 11 | + let(:success_response) do |
| 12 | + { |
| 13 | + model: "gpt-4-turbo", |
| 14 | + usage: { |
| 15 | + max_prompt_tokens: 131_072, |
| 16 | + }, |
| 17 | + choices: [ |
| 18 | + { message: { role: "assistant", content: "test" }, finish_reason: "stop", index: 0 }, |
| 19 | + ], |
| 20 | + }.to_json |
| 21 | + end |
| 22 | + |
| 23 | + let(:error_response) do |
| 24 | + { message: "API key error! Please check you have supplied the correct API key." }.to_json |
| 25 | + end |
| 26 | + |
23 | 27 | before do |
24 | | - pp "Spec model: #{llm_model.inspect}" |
| 28 | + stub_request(:post, post_url).to_return(status: 200, body: success_response, headers: {}) |
25 | 29 | SiteSetting.ai_summarization_model = "custom:#{llm_model.id}" |
26 | | - # assign_fake_provider_to(:ai_summarization_model) |
27 | 30 | SiteSetting.ai_summarization_enabled = true |
28 | 31 | end |
29 | 32 |
|
|
34 | 37 | end |
35 | 38 |
|
36 | 39 | context "with discourse-ai plugin enabled for the site" do |
37 | | - # let(:llm_model) { LlmModel.in_use.first } |
38 | | - |
39 | 40 | before { SiteSetting.discourse_ai_enabled = true } |
40 | 41 |
|
41 | 42 | it "returns a problem with an LLM model" do |
42 | | - stub_request(:post, "https://api.openai.com/v1/chat/completions").with( |
43 | | - body: |
44 | | - "{\"model\":\"gpt-4-turbo\",\"messages\":[{\"role\":\"system\",\"content\":\"You are a helpful bot\"},{\"role\":\"user\",\"content\":\"How much is 1 + 1?\"}]}", |
45 | | - headers: { |
46 | | - "Accept" => "*/*", |
47 | | - "Accept-Encoding" => "gzip;q=1.0,deflate;q=0.6,identity;q=0.3", |
48 | | - "Authorization" => "Bearer 123", |
49 | | - "Content-Type" => "application/json", |
50 | | - "Host" => "api.openai.com", |
51 | | - "User-Agent" => "Ruby", |
52 | | - }, |
53 | | - ).to_return(status: 200, body: "", headers: {}) |
| 43 | + stub_request(:post, post_url).to_return(status: 403, body: error_response, headers: {}) |
54 | 44 | message = |
55 | 45 | "#{I18n.t("dashboard.problem.ai_llm_status", { model_name: llm_model.display_name, model_id: llm_model.id })}" |
56 | 46 |
|
|
63 | 53 | details: { |
64 | 54 | model_id: llm_model.id, |
65 | 55 | model_name: llm_model.display_name, |
66 | | - error: "Forced error for testing", |
| 56 | + error: JSON.parse(error_response)["message"], |
67 | 57 | }, |
68 | 58 | ), |
69 | 59 | ) |
70 | 60 | end |
| 61 | + |
| 62 | + it "does not return a problem if the LLM models are working" do |
| 63 | + stub_request(:post, post_url).to_return(status: 200, body: success_response, headers: {}) |
| 64 | + expect(check).to be_chill_about_it |
| 65 | + end |
71 | 66 | end |
72 | 67 | end |
73 | 68 | end |
0 commit comments