Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions app/models/llm_model.rb
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,11 @@ class LlmModel < ActiveRecord::Base
validates_presence_of :name, :api_key
validates :max_prompt_tokens, numericality: { greater_than: 0 }
validate :required_provider_params
scope :in_use,
-> do
model_ids = DiscourseAi::Configuration::LlmEnumerator.global_usage.keys
where(id: model_ids)
end

def self.provider_params
{
Expand Down
58 changes: 58 additions & 0 deletions app/services/problem_check/ai_llm_status.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# frozen_string_literal: true

class ProblemCheck::AiLlmStatus < ProblemCheck
self.priority = "high"
self.perform_every = 6.hours

def call
llm_errors
end

def base_path
Discourse.base_path
end
Comment on lines +11 to +13
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure why, but calling Discourse.base_path by putting it in a function is the only way I can get it to work without seeing:

Translation missing: en.dashboard.problem.ai_llm_status

Calling Discourse.base_path directly inside try_validate results in the above error.


private

def llm_errors
return [] if !SiteSetting.discourse_ai_enabled
LlmModel.in_use.find_each.filter_map do |model|
try_validate(model) { validator.run_test(model) }
end
end

def try_validate(model, &blk)
begin
blk.call
nil
rescue => e
error_message = parse_error_message(e.message)
message =
"#{I18n.t("dashboard.problem.ai_llm_status", { base_path: base_path, model_name: model.display_name, model_id: model.id })}"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry. Late to the party. This shouldn't really need to be wrapped in an interpolation string? 🙂


Problem.new(
message,
priority: "high",
identifier: "ai_llm_status",
target: model.id,
details: {
model_id: model.id,
model_name: model.display_name,
error: error_message,
},
)
end
end

def validator
@validator ||= DiscourseAi::Configuration::LlmValidator.new
end

def parse_error_message(message)
begin
JSON.parse(message)["message"]
rescue JSON::ParserError
message.to_s
end
end
end
3 changes: 3 additions & 0 deletions config/locales/server.en.yml
Original file line number Diff line number Diff line change
Expand Up @@ -453,3 +453,6 @@ en:
no_default_llm: The persona must have a default_llm defined.
user_not_allowed: The user is not allowed to participate in the topic.
prompt_message_length: The message %{idx} is over the 1000 character limit.
dashboard:
problem:
ai_llm_status: "The LLM model: %{model_name} is encountering issues. Please check the <a href='%{base_path}/admin/plugins/discourse-ai/ai-llms/%{model_id}'>model's configuration page</a>."
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

BTW, it's actually recommended to build the URL in Ruby, and have a single %{url} parameter. If the URL ever changes, translators don't need to update their translations.

Copy link
Member Author

@keegangeorge keegangeorge Dec 13, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@gschlager When I change it to that I get this and it no longer works. I'm not sure why it's so finicky

Screenshot 2024-12-13 at 12 17 12

and when I just pass the base_path separately:

I get it showing correctly:
Screenshot 2024-12-13 at 12 23 17

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if it's the way we cache problem checks? I ran into it earlier in the week when working on encrypt: (internal /t/128444/7)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, @davidtaylorhq, I still consistently see the translation error when using %{url}(even after clearing the admin notice from the database and refreshing to run a new problem check)

2 changes: 2 additions & 0 deletions plugin.rb
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,8 @@ def self.public_asset_path(name)
DiscourseAi::AiModeration::EntryPoint.new,
].each { |a_module| a_module.inject_into(self) }

register_problem_check ProblemCheck::AiLlmStatus

register_reviewable_type ReviewableAiChatMessage
register_reviewable_type ReviewableAiPost

Expand Down
68 changes: 68 additions & 0 deletions spec/services/problem_check/ai_llm_status_spec.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
# frozen_string_literal: true

require "rails_helper"

RSpec.describe ProblemCheck::AiLlmStatus do
subject(:check) { described_class.new }

fab!(:llm_model)

let(:post_url) { "https://api.openai.com/v1/chat/completions" }
let(:success_response) do
{
model: "gpt-4-turbo",
usage: {
max_prompt_tokens: 131_072,
},
choices: [
{ message: { role: "assistant", content: "test" }, finish_reason: "stop", index: 0 },
],
}.to_json
end

let(:error_response) do
{ message: "API key error! Please check you have supplied the correct API key." }.to_json
end

before do
stub_request(:post, post_url).to_return(status: 200, body: success_response, headers: {})
SiteSetting.ai_summarization_model = "custom:#{llm_model.id}"
SiteSetting.ai_summarization_enabled = true
end

describe "#call" do
it "does nothing if discourse-ai plugin disabled" do
SiteSetting.discourse_ai_enabled = false
expect(check).to be_chill_about_it
end

context "with discourse-ai plugin enabled for the site" do
before { SiteSetting.discourse_ai_enabled = true }

it "returns a problem with an LLM model" do
stub_request(:post, post_url).to_return(status: 403, body: error_response, headers: {})
message =
"#{I18n.t("dashboard.problem.ai_llm_status", { base_path: Discourse.base_path, model_name: llm_model.display_name, model_id: llm_model.id })}"

expect(described_class.new.call).to contain_exactly(
have_attributes(
identifier: "ai_llm_status",
target: llm_model.id,
priority: "high",
message: message,
details: {
model_id: llm_model.id,
model_name: llm_model.display_name,
error: JSON.parse(error_response)["message"],
},
),
)
end

it "does not return a problem if the LLM models are working" do
stub_request(:post, post_url).to_return(status: 200, body: success_response, headers: {})
expect(check).to be_chill_about_it
end
end
end
end
Loading