Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions config/locales/server.en.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ en:
flag_types:
review: "Add post to review queue"
spam: "Flag as spam and hide post"
spam_silence: "Flag as spam, hide post and silence user"
scriptables:
llm_triage:
title: Triage posts using AI
Expand Down
31 changes: 27 additions & 4 deletions lib/ai_bot/playground.rb
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,16 @@ def self.schedule_reply(post)
bot_user = nil
mentioned = nil

all_llm_user_ids = LlmModel.joins(:user).pluck("users.id")
all_llm_users =
LlmModel
.where(enabled_chat_bot: true)
.joins(:user)
.pluck("users.id", "users.username_lower")

if post.topic.private_message?
# this is an edge case, you started a PM with a different bot
bot_user = post.topic.topic_allowed_users.where(user_id: all_llm_user_ids).first&.user
bot_user =
post.topic.topic_allowed_users.where(user_id: all_llm_users.map(&:first)).first&.user
bot_user ||=
post
.topic
Expand All @@ -92,14 +97,17 @@ def self.schedule_reply(post)
&.user
end

if mentionables.present?
mentions = nil
if mentionables.present? || (bot_user && post.topic.private_message?)
mentions = post.mentions.map(&:downcase)

# in case we are replying to a post by a bot
if post.reply_to_post_number && post.reply_to_post&.user
mentions << post.reply_to_post.user.username_lower
end
end

if mentionables.present?
mentioned = mentionables.find { |mentionable| mentions.include?(mentionable[:username]) }

# direct PM to mentionable
Expand All @@ -117,7 +125,9 @@ def self.schedule_reply(post)
end

if bot_user
persona_id = mentioned&.dig(:id) || post.topic.custom_fields["ai_persona_id"]
topic_persona_id = post.topic.custom_fields["ai_persona_id"]
persona_id = mentioned&.dig(:id) || topic_persona_id

persona = nil

if persona_id
Expand All @@ -130,6 +140,19 @@ def self.schedule_reply(post)
DiscourseAi::AiBot::Personas::Persona.find_by(user: post.user, name: persona_name)
end

# edge case, llm was mentioned in an ai persona conversation
if persona_id == topic_persona_id.to_i && post.topic.private_message? && persona &&
all_llm_users.present?
if !persona.force_default_llm && mentions.present?
mentioned_llm_user_id, _ =
all_llm_users.find { |id, username| mentions.include?(username) }

if mentioned_llm_user_id
bot_user = User.find_by(id: mentioned_llm_user_id) || bot_user
end
end
end

persona ||= DiscourseAi::AiBot::Personas::General

bot_user = User.find(persona.user_id) if persona && persona.force_default_llm
Expand Down
4 changes: 4 additions & 0 deletions lib/automation.rb
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@ def self.flag_types
[
{ id: "review", translated_name: I18n.t("discourse_automation.ai.flag_types.review") },
{ id: "spam", translated_name: I18n.t("discourse_automation.ai.flag_types.spam") },
{
id: "spam_silence",
translated_name: I18n.t("discourse_automation.ai.flag_types.spam_silence"),
},
]
end
def self.available_models
Expand Down
4 changes: 3 additions & 1 deletion lib/automation/llm_triage.rb
Original file line number Diff line number Diff line change
Expand Up @@ -83,14 +83,16 @@ def self.handle(
.sub("%%AUTOMATION_ID%%", automation&.id.to_s)
.sub("%%AUTOMATION_NAME%%", automation&.name.to_s)

if flag_type == :spam
if flag_type == :spam || flag_type == :spam_silence
PostActionCreator.new(
Discourse.system_user,
post,
PostActionType.types[:spam],
message: score_reason,
queue_for_review: true,
).perform

SpamRule::AutoSilence.new(post.user, post).silence_user if flag_type == :spam_silence
else
reviewable =
ReviewablePost.needs_review!(target: post, created_by: Discourse.system_user)
Expand Down
59 changes: 59 additions & 0 deletions spec/lib/modules/ai_bot/playground_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -622,6 +622,65 @@
expect(post.topic.posts.last.post_number).to eq(1)
end

it "allows swapping a llm mid conversation using a mention" do
SiteSetting.ai_bot_enabled = true

post = nil
DiscourseAi::Completions::Llm.with_prepared_responses(
["Yes I can", "Magic Title"],
llm: "custom:#{claude_2.id}",
) do
post =
create_post(
title: "I just made a PM",
raw: "Hey there #{persona.user.username}, can you help me?",
target_usernames: "#{user.username},#{persona.user.username}",
archetype: Archetype.private_message,
user: admin,
)
end

post.topic.custom_fields["ai_persona_id"] = persona.id
post.topic.save_custom_fields

llm2 = Fabricate(:llm_model, enabled_chat_bot: true)

llm2.toggle_companion_user

DiscourseAi::Completions::Llm.with_prepared_responses(
["Hi from bot two"],
llm: "custom:#{llm2.id}",
) do
create_post(
user: admin,
raw: "hi @#{llm2.user.username.capitalize} how are you",
topic_id: post.topic_id,
)
end

last_post = post.topic.reload.posts.order("id desc").first
expect(last_post.raw).to eq("Hi from bot two")
expect(last_post.user_id).to eq(persona.user_id)

# tether llm, so it can no longer be switched
persona.update!(force_default_llm: true, default_llm: "custom:#{claude_2.id}")

DiscourseAi::Completions::Llm.with_prepared_responses(
["Hi from bot one"],
llm: "custom:#{claude_2.id}",
) do
create_post(
user: admin,
raw: "hi @#{llm2.user.username.capitalize} how are you",
topic_id: post.topic_id,
)
end

last_post = post.topic.reload.posts.order("id desc").first
expect(last_post.raw).to eq("Hi from bot one")
expect(last_post.user_id).to eq(persona.user_id)
end

it "allows PMing a persona even when no particular bots are enabled" do
SiteSetting.ai_bot_enabled = true
toggle_enabled_bots(bots: [])
Expand Down
18 changes: 18 additions & 0 deletions spec/lib/modules/automation/llm_triage_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,24 @@ def triage(**args)
expect(post.topic.reload.visible).to eq(false)
end

it "can handle spam+silence flags" do
DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do
triage(
post: post,
model: "custom:#{llm_model.id}",
system_prompt: "test %%POST%%",
search_for_text: "bad",
flag_post: true,
flag_type: :spam_silence,
automation: nil,
)
end

expect(post.reload).to be_hidden
expect(post.topic.reload.visible).to eq(false)
expect(post.user.silenced?).to eq(true)
end

it "can handle garbled output from LLM" do
DiscourseAi::Completions::Llm.with_prepared_responses(["Bad.\n\nYo"]) do
triage(
Expand Down