-
Notifications
You must be signed in to change notification settings - Fork 40
FEATURE: Add periodic problem checks for each LLM in use #1020
Changes from all commits
fdc8c70
61d84b0
8f10c3d
8947a7d
ec20ca5
34f5b15
90fa0eb
33946df
18598a1
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,58 @@ | ||
| # frozen_string_literal: true | ||
|
|
||
| class ProblemCheck::AiLlmStatus < ProblemCheck | ||
| self.priority = "high" | ||
| self.perform_every = 6.hours | ||
|
|
||
| def call | ||
| llm_errors | ||
| end | ||
|
|
||
| def base_path | ||
| Discourse.base_path | ||
| end | ||
|
|
||
| private | ||
|
|
||
| def llm_errors | ||
| return [] if !SiteSetting.discourse_ai_enabled | ||
| LlmModel.in_use.find_each.filter_map do |model| | ||
| try_validate(model) { validator.run_test(model) } | ||
| end | ||
| end | ||
|
|
||
| def try_validate(model, &blk) | ||
| begin | ||
| blk.call | ||
| nil | ||
| rescue => e | ||
| error_message = parse_error_message(e.message) | ||
| message = | ||
| "#{I18n.t("dashboard.problem.ai_llm_status", { base_path: base_path, model_name: model.display_name, model_id: model.id })}" | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sorry. Late to the party. This shouldn't really need to be wrapped in an interpolation string? 🙂 |
||
|
|
||
| Problem.new( | ||
| message, | ||
| priority: "high", | ||
| identifier: "ai_llm_status", | ||
| target: model.id, | ||
| details: { | ||
| model_id: model.id, | ||
| model_name: model.display_name, | ||
| error: error_message, | ||
| }, | ||
| ) | ||
| end | ||
| end | ||
|
|
||
| def validator | ||
| @validator ||= DiscourseAi::Configuration::LlmValidator.new | ||
| end | ||
|
|
||
| def parse_error_message(message) | ||
| begin | ||
| JSON.parse(message)["message"] | ||
| rescue JSON::ParserError | ||
| message.to_s | ||
| end | ||
| end | ||
| end | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -453,3 +453,6 @@ en: | |
| no_default_llm: The persona must have a default_llm defined. | ||
| user_not_allowed: The user is not allowed to participate in the topic. | ||
| prompt_message_length: The message %{idx} is over the 1000 character limit. | ||
| dashboard: | ||
| problem: | ||
| ai_llm_status: "The LLM model: %{model_name} is encountering issues. Please check the <a href='%{base_path}/admin/plugins/discourse-ai/ai-llms/%{model_id}'>model's configuration page</a>." | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. BTW, it's actually recommended to build the URL in Ruby, and have a single %{url} parameter. If the URL ever changes, translators don't need to update their translations.
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @gschlager When I change it to that I get this and it no longer works. I'm not sure why it's so finicky and when I just pass the base_path separately:
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I wonder if it's the way we cache problem checks? I ran into it earlier in the week when working on encrypt: (internal
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hmm, @davidtaylorhq, I still consistently see the translation error when using |
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,68 @@ | ||
| # frozen_string_literal: true | ||
|
|
||
| require "rails_helper" | ||
|
|
||
| RSpec.describe ProblemCheck::AiLlmStatus do | ||
| subject(:check) { described_class.new } | ||
|
|
||
| fab!(:llm_model) | ||
|
|
||
| let(:post_url) { "https://api.openai.com/v1/chat/completions" } | ||
| let(:success_response) do | ||
| { | ||
| model: "gpt-4-turbo", | ||
| usage: { | ||
| max_prompt_tokens: 131_072, | ||
| }, | ||
| choices: [ | ||
| { message: { role: "assistant", content: "test" }, finish_reason: "stop", index: 0 }, | ||
| ], | ||
| }.to_json | ||
| end | ||
|
|
||
| let(:error_response) do | ||
| { message: "API key error! Please check you have supplied the correct API key." }.to_json | ||
| end | ||
|
|
||
| before do | ||
| stub_request(:post, post_url).to_return(status: 200, body: success_response, headers: {}) | ||
| SiteSetting.ai_summarization_model = "custom:#{llm_model.id}" | ||
| SiteSetting.ai_summarization_enabled = true | ||
| end | ||
|
|
||
| describe "#call" do | ||
| it "does nothing if discourse-ai plugin disabled" do | ||
| SiteSetting.discourse_ai_enabled = false | ||
| expect(check).to be_chill_about_it | ||
| end | ||
|
|
||
| context "with discourse-ai plugin enabled for the site" do | ||
| before { SiteSetting.discourse_ai_enabled = true } | ||
|
|
||
| it "returns a problem with an LLM model" do | ||
| stub_request(:post, post_url).to_return(status: 403, body: error_response, headers: {}) | ||
| message = | ||
| "#{I18n.t("dashboard.problem.ai_llm_status", { base_path: Discourse.base_path, model_name: llm_model.display_name, model_id: llm_model.id })}" | ||
|
|
||
| expect(described_class.new.call).to contain_exactly( | ||
| have_attributes( | ||
| identifier: "ai_llm_status", | ||
| target: llm_model.id, | ||
| priority: "high", | ||
| message: message, | ||
| details: { | ||
| model_id: llm_model.id, | ||
| model_name: llm_model.display_name, | ||
| error: JSON.parse(error_response)["message"], | ||
| }, | ||
| ), | ||
| ) | ||
| end | ||
|
|
||
| it "does not return a problem if the LLM models are working" do | ||
| stub_request(:post, post_url).to_return(status: 200, body: success_response, headers: {}) | ||
| expect(check).to be_chill_about_it | ||
| end | ||
| end | ||
| end | ||
| end |


There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Not sure why, but calling
Discourse.base_pathby putting it in a function is the only way I can get it to work without seeing:Calling
Discourse.base_pathdirectly insidetry_validateresults in the above error.