Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

Commit 90fa0eb

Browse files
committed
WIP: update spec
1 parent 34f5b15 commit 90fa0eb

File tree

2 files changed

+32
-4
lines changed

2 files changed

+32
-4
lines changed

app/services/problem_check/ai_llm_status.rb

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,14 @@ def call
1212

1313
def llm_errors
1414
return [] if !SiteSetting.discourse_ai_enabled
15-
1615
LlmModel.in_use.find_each.filter_map do |model|
1716
try_validate(model) { validator.run_test(model) }
1817
end
1918
end
2019

2120
def try_validate(model, &blk)
2221
begin
23-
raise({ message: "Forced error for testing" }.to_json) if Rails.env.test?
22+
# raise({ message: "Forced error for testing" }.to_json) if Rails.env.test?
2423
blk.call
2524
nil
2625
rescue => e

spec/services/problem_check/ai_llm_status_spec.rb

Lines changed: 31 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,25 @@
55
RSpec.describe ProblemCheck::AiLlmStatus do
66
subject(:check) { described_class.new }
77

8+
# let(:spec_model) do
9+
# LlmModel.new(
10+
# id: 50,
11+
# display_name: "GPT-4 Turbo",
12+
# name: "gpt-4-turbo",
13+
# provider: "open_ai",
14+
# tokenizer: "DiscourseAi::Tokenizer::OpenAiTokenizer",
15+
# max_prompt_tokens: 131_072,
16+
# api_key: "invalid",
17+
# url: "https://api.openai.com/v1/chat/completions",
18+
# )
19+
# end
20+
21+
fab!(:llm_model)
22+
823
before do
9-
assign_fake_provider_to(:ai_summarization_model)
24+
pp "Spec model: #{llm_model.inspect}"
25+
SiteSetting.ai_summarization_model = "custom:#{llm_model.id}"
26+
# assign_fake_provider_to(:ai_summarization_model)
1027
SiteSetting.ai_summarization_enabled = true
1128
end
1229

@@ -17,11 +34,23 @@
1734
end
1835

1936
context "with discourse-ai plugin enabled for the site" do
20-
let(:llm_model) { LlmModel.in_use.first }
37+
# let(:llm_model) { LlmModel.in_use.first }
2138

2239
before { SiteSetting.discourse_ai_enabled = true }
2340

2441
it "returns a problem with an LLM model" do
42+
stub_request(:post, "https://api.openai.com/v1/chat/completions").with(
43+
body:
44+
"{\"model\":\"gpt-4-turbo\",\"messages\":[{\"role\":\"system\",\"content\":\"You are a helpful bot\"},{\"role\":\"user\",\"content\":\"How much is 1 + 1?\"}]}",
45+
headers: {
46+
"Accept" => "*/*",
47+
"Accept-Encoding" => "gzip;q=1.0,deflate;q=0.6,identity;q=0.3",
48+
"Authorization" => "Bearer 123",
49+
"Content-Type" => "application/json",
50+
"Host" => "api.openai.com",
51+
"User-Agent" => "Ruby",
52+
},
53+
).to_return(status: 200, body: "", headers: {})
2554
message =
2655
"#{I18n.t("dashboard.problem.ai_llm_status", { model_name: llm_model.display_name, model_id: llm_model.id })}"
2756

0 commit comments

Comments
 (0)