Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

Commit 33946df

Browse files
committed
FIX: spec 🙌
1 parent 90fa0eb commit 33946df

File tree

2 files changed

+25
-31
lines changed

2 files changed

+25
-31
lines changed

‎app/services/problem_check/ai_llm_status.rb‎

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ def llm_errors
1919

2020
def try_validate(model, &blk)
2121
begin
22-
# raise({ message: "Forced error for testing" }.to_json) if Rails.env.test?
2322
blk.call
2423
nil
2524
rescue => e

‎spec/services/problem_check/ai_llm_status_spec.rb‎

Lines changed: 25 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -5,25 +5,28 @@
55
RSpec.describe ProblemCheck::AiLlmStatus do
66
subject(:check) { described_class.new }
77

8-
# let(:spec_model) do
9-
# LlmModel.new(
10-
# id: 50,
11-
# display_name: "GPT-4 Turbo",
12-
# name: "gpt-4-turbo",
13-
# provider: "open_ai",
14-
# tokenizer: "DiscourseAi::Tokenizer::OpenAiTokenizer",
15-
# max_prompt_tokens: 131_072,
16-
# api_key: "invalid",
17-
# url: "https://api.openai.com/v1/chat/completions",
18-
# )
19-
# end
20-
218
fab!(:llm_model)
229

10+
let(:post_url) { "https://api.openai.com/v1/chat/completions" }
11+
let(:success_response) do
12+
{
13+
model: "gpt-4-turbo",
14+
usage: {
15+
max_prompt_tokens: 131_072,
16+
},
17+
choices: [
18+
{ message: { role: "assistant", content: "test" }, finish_reason: "stop", index: 0 },
19+
],
20+
}.to_json
21+
end
22+
23+
let(:error_response) do
24+
{ message: "API key error! Please check you have supplied the correct API key." }.to_json
25+
end
26+
2327
before do
24-
pp "Spec model: #{llm_model.inspect}"
28+
stub_request(:post, post_url).to_return(status: 200, body: success_response, headers: {})
2529
SiteSetting.ai_summarization_model = "custom:#{llm_model.id}"
26-
# assign_fake_provider_to(:ai_summarization_model)
2730
SiteSetting.ai_summarization_enabled = true
2831
end
2932

@@ -34,23 +37,10 @@
3437
end
3538

3639
context "with discourse-ai plugin enabled for the site" do
37-
# let(:llm_model) { LlmModel.in_use.first }
38-
3940
before { SiteSetting.discourse_ai_enabled = true }
4041

4142
it "returns a problem with an LLM model" do
42-
stub_request(:post, "https://api.openai.com/v1/chat/completions").with(
43-
body:
44-
"{\"model\":\"gpt-4-turbo\",\"messages\":[{\"role\":\"system\",\"content\":\"You are a helpful bot\"},{\"role\":\"user\",\"content\":\"How much is 1 + 1?\"}]}",
45-
headers: {
46-
"Accept" => "*/*",
47-
"Accept-Encoding" => "gzip;q=1.0,deflate;q=0.6,identity;q=0.3",
48-
"Authorization" => "Bearer 123",
49-
"Content-Type" => "application/json",
50-
"Host" => "api.openai.com",
51-
"User-Agent" => "Ruby",
52-
},
53-
).to_return(status: 200, body: "", headers: {})
43+
stub_request(:post, post_url).to_return(status: 403, body: error_response, headers: {})
5444
message =
5545
"#{I18n.t("dashboard.problem.ai_llm_status", { model_name: llm_model.display_name, model_id: llm_model.id })}"
5646

@@ -63,11 +53,16 @@
6353
details: {
6454
model_id: llm_model.id,
6555
model_name: llm_model.display_name,
66-
error: "Forced error for testing",
56+
error: JSON.parse(error_response)["message"],
6757
},
6858
),
6959
)
7060
end
61+
62+
it "does not return a problem if the LLM models are working" do
63+
stub_request(:post, post_url).to_return(status: 200, body: success_response, headers: {})
64+
expect(check).to be_chill_about_it
65+
end
7166
end
7267
end
7368
end

0 commit comments

Comments
 (0)