Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

Commit cc34882

Browse files
committed
FIX: rest of specs
1 parent 4b03cdc commit cc34882

File tree

6 files changed

+27
-25
lines changed

6 files changed

+27
-25
lines changed

lib/configuration/llm_enumerator.rb

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -87,12 +87,8 @@ def self.global_usage
8787
)
8888
.each do |model_text, name, id|
8989
next if model_text.blank?
90-
model_id = model_text.split("custom:").last.to_i
91-
if model_id.present?
92-
if model_text =~ /custom:(\d+)/
93-
rval[model_id] << { type: :automation, name: name, id: id }
94-
end
95-
end
90+
model_id = model_text.to_i
91+
rval[model_id] << { type: :automation, name: name, id: id } if model_id.present?
9692
end
9793
end
9894

lib/configuration/llm_validator.rb

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,11 +31,6 @@ def valid_value?(val)
3131
end
3232

3333
def run_test(val)
34-
if Rails.env.test?
35-
# In test mode, we assume the model is reachable.
36-
return true
37-
end
38-
3934
DiscourseAi::Completions::Llm
4035
.proxy(val)
4136
.generate("How much is 1 + 1?", user: nil, feature_name: "llm_validator")

spec/configuration/llm_enumerator_spec.rb

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
RSpec.describe DiscourseAi::Configuration::LlmEnumerator do
44
fab!(:fake_model)
5+
fab!(:ai_persona) { Fabricate(:ai_persona, default_llm_id: fake_model.id) }
56
fab!(:llm_model)
67
fab!(:seeded_model)
78
fab!(:automation) do
@@ -42,24 +43,32 @@
4243
describe "#global_usage" do
4344
it "returns a hash of Llm models in use globally" do
4445
assign_fake_provider_to(:ai_default_llm_model)
46+
SiteSetting.ai_helper_proofreader_persona = ai_persona.id
4547
SiteSetting.ai_helper_enabled = true
46-
expect(described_class.global_usage).to eq(fake_model.id => [{ type: :ai_helper }])
48+
expect(described_class.global_usage).to eq(
49+
fake_model.id => [{ type: :ai_helper }],
50+
fake_model.id => [
51+
{ id: ai_persona.id, name: ai_persona.name, type: :ai_persona },
52+
{ name: "Proofread text", type: :ai_helper },
53+
],
54+
)
4755
end
4856

4957
it "returns information about automation rules" do
5058
automation.fields.create!(
5159
component: "text",
5260
name: "model",
5361
metadata: {
54-
value: fake_model.id,
62+
value: llm_model.id,
5563
},
5664
target: "script",
5765
)
5866

5967
usage = described_class.global_usage
6068

6169
expect(usage).to eq(
62-
{ fake_model.id => [{ type: :automation, name: "some automation", id: automation.id }] },
70+
fake_model.id => [{ id: ai_persona.id, name: ai_persona.name, type: :ai_persona }],
71+
llm_model.id => [{ id: automation.id, name: automation.name, type: :automation }],
6372
)
6473
end
6574
end

spec/lib/completions/endpoints/nova_spec.rb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def encode_message(message)
2828
end
2929

3030
it "should be able to make a simple request" do
31-
proxy = DiscourseAi::Completions::Llm.proxy(model)
31+
proxy = DiscourseAi::Completions::Llm.proxy(nova_model)
3232

3333
content = {
3434
"output" => {
@@ -90,7 +90,7 @@ def encode_message(message)
9090

9191
stub_request(:post, stream_url).to_return(status: 200, body: messages.join)
9292

93-
proxy = DiscourseAi::Completions::Llm.proxy(model)
93+
proxy = DiscourseAi::Completions::Llm.proxy(nova_model)
9494
responses = []
9595
proxy.generate("Hello!", user: user) { |partial| responses << partial }
9696

@@ -104,7 +104,7 @@ def encode_message(message)
104104
#model.provider_params["disable_native_tools"] = true
105105
#model.save!
106106

107-
proxy = DiscourseAi::Completions::Llm.proxy(model)
107+
proxy = DiscourseAi::Completions::Llm.proxy(nova_model)
108108
prompt =
109109
DiscourseAi::Completions::Prompt.new(
110110
"You are a helpful assistant.",

spec/requests/admin/ai_llms_controller_spec.rb

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -473,16 +473,16 @@
473473
error_type: "validation",
474474
}
475475

476-
WebMock.stub_request(:post, test_attrs[:url]).to_return(
477-
status: 422,
478-
body: error_message.to_json,
479-
)
476+
error =
477+
DiscourseAi::Completions::Endpoints::Base::CompletionFailed.new(error_message.to_json)
480478

481-
get "/admin/plugins/discourse-ai/ai-llms/test.json", params: { ai_llm: test_attrs }
479+
DiscourseAi::Completions::Llm.with_prepared_responses([error]) do
480+
get "/admin/plugins/discourse-ai/ai-llms/test.json", params: { ai_llm: test_attrs }
482481

483-
expect(response).to be_successful
484-
expect(response.parsed_body["success"]).to eq(false)
485-
expect(response.parsed_body["error"]).to eq(error_message.to_json)
482+
expect(response).to be_successful
483+
expect(response.parsed_body["success"]).to eq(false)
484+
expect(response.parsed_body["error"]).to eq(error_message.to_json)
485+
end
486486
end
487487
end
488488
end

spec/services/problem_check/ai_llm_status_spec.rb

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
subject(:check) { described_class.new }
77

88
fab!(:llm_model)
9+
fab!(:ai_persona) { Fabricate(:ai_persona, default_llm_id: llm_model.id) }
910

1011
let(:post_url) { "https://api.openai.com/v1/chat/completions" }
1112
let(:success_response) do
@@ -27,6 +28,7 @@
2728
before do
2829
stub_request(:post, post_url).to_return(status: 200, body: success_response, headers: {})
2930
assign_fake_provider_to(:ai_default_llm_model)
31+
SiteSetting.ai_summarization_persona = ai_persona.id
3032
SiteSetting.ai_summarization_enabled = true
3133
end
3234

0 commit comments

Comments
 (0)