Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

Commit 059d3b6

Browse files
authored
FEATURE: better logging for automation reports (#853)
A new feature_context json column was added to ai_api_audit_logs This allows us to store rich json like context on any LLM request made. This new field now stores automation id and name. Additionally allows llm_triage to specify maximum number of tokens This means that you can limit the cost of llm triage by scanning only first N tokens of a post.
1 parent eae7716 commit 059d3b6

File tree

14 files changed

+138
-15
lines changed

14 files changed

+138
-15
lines changed

app/models/ai_api_audit_log.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,4 +33,4 @@ module Provider
3333
# post_id :integer
3434
# feature_name :string(255)
3535
# language_model :string(255)
36-
#
36+
# feature_context :jsonb

config/locales/client.en.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,9 @@ en:
8181
system_prompt:
8282
label: "System Prompt"
8383
description: "The prompt that will be used to triage, be sure for it to reply with a single word you can use to trigger the action"
84+
max_post_tokens:
85+
label: "Max Post Tokens"
86+
description: "The maximum number of tokens to scan using LLM triage"
8487
search_for_text:
8588
label: "Search for text"
8689
description: "If the following text appears in the llm reply, apply this actions"
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
# frozen_string_literal: true
2+
#
3+
class AddFeatureContextToAiApiLog < ActiveRecord::Migration[7.1]
4+
def change
5+
add_column :ai_api_audit_logs, :feature_context, :jsonb
6+
end
7+
end

discourse_automation/llm_report.rb

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ module DiscourseAutomation::LlmReport
9393
temperature: temperature,
9494
top_p: top_p,
9595
suppress_notifications: suppress_notifications,
96+
automation: self.automation,
9697
)
9798
rescue => e
9899
Discourse.warn_exception e, message: "Error running LLM report!"

discourse_automation/llm_triage.rb

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
field :system_prompt, component: :message, required: false
1313
field :search_for_text, component: :text, required: true
14+
field :max_post_tokens, component: :text
1415
field :model,
1516
component: :choices,
1617
required: true,
@@ -49,6 +50,9 @@
4950
hide_topic = fields.dig("hide_topic", "value")
5051
flag_post = fields.dig("flag_post", "value")
5152
flag_type = fields.dig("flag_type", "value")
53+
max_post_tokens = fields.dig("max_post_tokens", "value").to_i
54+
55+
max_post_tokens = nil if max_post_tokens <= 0
5256

5357
begin
5458
RateLimiter.new(
@@ -77,6 +81,7 @@
7781
hide_topic: hide_topic,
7882
flag_post: flag_post,
7983
flag_type: flag_type.to_s.to_sym,
84+
max_post_tokens: max_post_tokens,
8085
automation: self.automation,
8186
)
8287
rescue => e

lib/automation/llm_triage.rb

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,20 +15,26 @@ def self.handle(
1515
hide_topic: nil,
1616
flag_post: nil,
1717
flag_type: nil,
18-
automation: nil
18+
automation: nil,
19+
max_post_tokens: nil
1920
)
2021
if category_id.blank? && tags.blank? && canned_reply.blank? && hide_topic.blank? &&
2122
flag_post.blank?
2223
raise ArgumentError, "llm_triage: no action specified!"
2324
end
2425

26+
llm = DiscourseAi::Completions::Llm.proxy(model)
27+
2528
s_prompt = system_prompt.to_s.sub("%%POST%%", "") # Backwards-compat. We no longer sub this.
2629
prompt = DiscourseAi::Completions::Prompt.new(s_prompt)
27-
prompt.push(type: :user, content: "title: #{post.topic.title}\n#{post.raw}")
2830

29-
result = nil
31+
content = "title: #{post.topic.title}\n#{post.raw}"
3032

31-
llm = DiscourseAi::Completions::Llm.proxy(model)
33+
content = llm.tokenizer.truncate(content, max_post_tokens) if max_post_tokens.present?
34+
35+
prompt.push(type: :user, content: content)
36+
37+
result = nil
3238

3339
result =
3440
llm.generate(
@@ -37,6 +43,10 @@ def self.handle(
3743
max_tokens: 700, # ~500 words
3844
user: Discourse.system_user,
3945
feature_name: "llm_triage",
46+
feature_context: {
47+
automation_id: automation&.id,
48+
automation_name: automation&.name,
49+
},
4050
)&.strip
4151

4252
if result.present? && result.downcase.include?(search_for_text.downcase)

lib/automation/report_runner.rb

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,8 @@ def initialize(
5353
exclude_tags: nil,
5454
top_p: 0.1,
5555
temperature: 0.2,
56-
suppress_notifications: false
56+
suppress_notifications: false,
57+
automation: nil
5758
)
5859
@sender = User.find_by(username: sender_username)
5960
@receivers = User.where(username: receivers)
@@ -90,6 +91,7 @@ def initialize(
9091
if !@topic_id && !@receivers.present? && !@email_receivers.present?
9192
raise ArgumentError, "Must specify topic_id or receivers"
9293
end
94+
@automation = automation
9395
end
9496

9597
def run!
@@ -153,6 +155,10 @@ def run!
153155
top_p: @top_p,
154156
user: Discourse.system_user,
155157
feature_name: "ai_report",
158+
feature_context: {
159+
automation_id: @automation&.id,
160+
automation_name: @automation&.name,
161+
},
156162
) do |response|
157163
print response if Rails.env.development? && @debug_mode
158164
result << response

lib/completions/endpoints/base.rb

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,14 @@ def xml_tags_to_strip(dialect)
5656
[]
5757
end
5858

59-
def perform_completion!(dialect, user, model_params = {}, feature_name: nil, &blk)
59+
def perform_completion!(
60+
dialect,
61+
user,
62+
model_params = {},
63+
feature_name: nil,
64+
feature_context: nil,
65+
&blk
66+
)
6067
allow_tools = dialect.prompt.has_tools?
6168
model_params = normalize_model_params(model_params)
6269
orig_blk = blk
@@ -111,6 +118,7 @@ def perform_completion!(dialect, user, model_params = {}, feature_name: nil, &bl
111118
post_id: dialect.prompt.post_id,
112119
feature_name: feature_name,
113120
language_model: llm_model.name,
121+
feature_context: feature_context.present? ? feature_context.as_json : nil,
114122
)
115123

116124
if !@streaming_mode

lib/completions/endpoints/canned_response.rb

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,13 @@ def prompt_messages
2323
dialect.prompt.messages
2424
end
2525

26-
def perform_completion!(dialect, _user, _model_params, feature_name: nil)
26+
def perform_completion!(
27+
dialect,
28+
_user,
29+
_model_params,
30+
feature_name: nil,
31+
feature_context: nil
32+
)
2733
@dialect = dialect
2834
response = responses[completions]
2935
if response.nil?

lib/completions/endpoints/fake.rb

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,13 @@ def self.last_call=(params)
100100
@last_call = params
101101
end
102102

103-
def perform_completion!(dialect, user, model_params = {}, feature_name: nil)
103+
def perform_completion!(
104+
dialect,
105+
user,
106+
model_params = {},
107+
feature_name: nil,
108+
feature_context: nil
109+
)
104110
self.class.last_call = { dialect: dialect, user: user, model_params: model_params }
105111

106112
content = self.class.fake_content

0 commit comments

Comments
 (0)