Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

Commit bb6f273

Browse files
committed
FIX: make AI helper more robust
- If JSON is broken for structured output then lean on a more forgiving parser - Gemini 2.5 flash does not support temp, support opting out - Evals for assistant were broken, fix interface - Add some missing LLMs - Translator was not mapped correctly to the feature - fix that - Don't mix XML in prompt for translator
1 parent 92e3615 commit bb6f273

File tree

11 files changed

+443
-27
lines changed

11 files changed

+443
-27
lines changed

app/models/llm_model.rb

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,8 @@ def self.provider_params
6565
google: {
6666
disable_native_tools: :checkbox,
6767
enable_thinking: :checkbox,
68+
disable_temperature: :checkbox,
69+
disable_top_p: :checkbox,
6870
thinking_tokens: :number,
6971
},
7072
azure: {

config/eval-llms.yml

Lines changed: 39 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,27 @@
11
llms:
2+
o3:
3+
display_name: O3
4+
name: o3
5+
tokenizer: DiscourseAi::Tokenizer::OpenAiTokenizer
6+
api_key_env: OPENAI_API_KEY
7+
provider: open_ai
8+
url: https://api.openai.com/v1/chat/completions
9+
max_prompt_tokens: 131072
10+
vision_enabled: true
11+
provider_params:
12+
disable_top_p: true
13+
disable_temperature: true
14+
15+
gpt-41:
16+
display_name: GPT-4.1
17+
name: gpt-4.1
18+
tokenizer: DiscourseAi::Tokenizer::OpenAiTokenizer
19+
api_key_env: OPENAI_API_KEY
20+
provider: open_ai
21+
url: https://api.openai.com/v1/chat/completions
22+
max_prompt_tokens: 131072
23+
vision_enabled: true
24+
225
gpt-4o:
326
display_name: GPT-4o
427
name: gpt-4o
@@ -74,12 +97,25 @@ llms:
7497
max_prompt_tokens: 1000000
7598
vision_enabled: true
7699

77-
gemini-2.0-pro-exp:
100+
gemini-2.5-flash:
101+
display_name: Gemini 2.5 Flash
102+
name: gemini-2-5-flash
103+
tokenizer: DiscourseAi::Tokenizer::GeminiTokenizer
104+
api_key_env: GEMINI_API_KEY
105+
provider: google
106+
url: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash
107+
max_prompt_tokens: 1000000
108+
vision_enabled: true
109+
provider_params:
110+
disable_top_p: true
111+
disable_temperature: true
112+
113+
gemini-2.0-pro:
78114
display_name: Gemini 2.0 pro
79-
name: gemini-2-0-pro-exp
115+
name: gemini-2-0-pro
80116
tokenizer: DiscourseAi::Tokenizer::GeminiTokenizer
81117
api_key_env: GEMINI_API_KEY
82118
provider: google
83-
url: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-pro-exp
119+
url: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-pro
84120
max_prompt_tokens: 1000000
85121
vision_enabled: true

config/locales/client.en.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,7 @@ en:
249249
markdown_tables: "Generate Markdown table"
250250
custom_prompt: "Custom prompt"
251251
image_caption: "Caption images"
252+
translator: "Translator"
252253

253254
translation:
254255
name: "Translation"
@@ -257,7 +258,7 @@ en:
257258
post_raw_translator: "Post raw translator"
258259
topic_title_translator: "Topic title translator"
259260
short_text_translator: "Short text translator"
260-
261+
261262
spam:
262263
name: "Spam"
263264
description: "Identifies potential spam using the selected LLM and flags it for site moderators to inspect in the review queue"

evals/lib/eval.rb

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -200,12 +200,7 @@ class << user
200200
user.admin = true
201201
end
202202
result =
203-
helper.generate_and_send_prompt(
204-
name,
205-
input,
206-
current_user = user,
207-
_force_default_locale = false,
208-
)
203+
helper.generate_and_send_prompt(name, input, current_user = user, force_default_locale: false)
209204

210205
result[:suggestions].first
211206
end

lib/ai_helper/assistant.rb

Lines changed: 42 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def attach_user_context(context, user = nil, force_default_locale: false)
8282
context.user_language = "#{locale_hash["name"]}"
8383

8484
if user
85-
timezone = user.user_option.timezone || "UTC"
85+
timezone = user&.user_option&.timezone || "UTC"
8686
current_time = Time.now.in_time_zone(timezone)
8787

8888
temporal_context = {
@@ -126,21 +126,34 @@ def generate_prompt(
126126
)
127127
context = attach_user_context(context, user, force_default_locale: force_default_locale)
128128

129-
helper_response = +""
129+
bad_json = false
130+
json_summary_schema_key = bot.persona.response_format&.first.to_h
131+
132+
schema_key = json_summary_schema_key["key"]&.to_sym
133+
schema_type = json_summary_schema_key["type"]
134+
135+
if schema_type == "array"
136+
helper_response = []
137+
else
138+
helper_response = +""
139+
end
130140

131141
buffer_blk =
132142
Proc.new do |partial, _, type|
133-
json_summary_schema_key = bot.persona.response_format&.first.to_h
134-
helper_response = [] if json_summary_schema_key["type"] == "array"
135-
if type == :structured_output
136-
helper_chunk = partial.read_buffered_property(json_summary_schema_key["key"]&.to_sym)
143+
if type == :structured_output && schema_type
144+
bad_json ||= partial.broken?
145+
helper_chunk = partial.read_buffered_property(schema_key)
137146
if !helper_chunk.nil? && !helper_chunk.empty?
138-
if json_summary_schema_key["type"] != "array"
139-
helper_response = helper_chunk
140-
else
147+
if !bad_json
141148
helper_response << helper_chunk
149+
else
150+
if schema_type == "string" || schema_type == "array"
151+
helper_response << helper_chunk
152+
else
153+
helper_response = helper_chunk
154+
end
155+
block.call(helper_chunk) if block
142156
end
143-
block.call(helper_chunk) if block
144157
end
145158
elsif type.blank?
146159
# Assume response is a regular completion.
@@ -151,6 +164,17 @@ def generate_prompt(
151164

152165
bot.reply(context, &buffer_blk)
153166

167+
# handle edge cases where structured output is all over the place
168+
if bad_json
169+
helper_response = helper_response.join if helper_response.is_a?(Array)
170+
helper_response =
171+
DiscourseAi::Utils::BestEffortJsonParser.extract_key(
172+
helper_response,
173+
schema_type,
174+
schema_key,
175+
)
176+
block.call(helper_response) if block
177+
end
154178
helper_response
155179
end
156180

@@ -255,7 +279,7 @@ def generate_image_caption(upload, user)
255279
Proc.new do |partial, _, type|
256280
if type == :structured_output
257281
structured_output = partial
258-
json_summary_schema_key = bot.persona.response_format&.first.to_h
282+
_json_summary_schema_key = bot.persona.response_format&.first.to_h
259283
end
260284
end
261285

@@ -287,6 +311,11 @@ def build_bot(helper_mode, user)
287311
end
288312

289313
def find_ai_helper_model(helper_mode, persona_klass)
314+
if helper_mode == IMAGE_CAPTION && @image_caption_llm.is_a?(LlmModel)
315+
return @image_caption_llm
316+
end
317+
318+
return @helper_llm if helper_mode != IMAGE_CAPTION && @helper_llm.is_a?(LlmModel)
290319
self.class.find_ai_helper_model(helper_mode, persona_klass)
291320
end
292321

@@ -299,9 +328,9 @@ def self.find_ai_helper_model(helper_mode, persona_klass)
299328

300329
if !model_id
301330
if helper_mode == IMAGE_CAPTION
302-
model_id = @helper_llm || SiteSetting.ai_helper_image_caption_model&.split(":")&.last
331+
model_id = SiteSetting.ai_helper_image_caption_model&.split(":")&.last
303332
else
304-
model_id = @image_caption_llm || SiteSetting.ai_helper_model&.split(":")&.last
333+
model_id = SiteSetting.ai_helper_model&.split(":")&.last
305334
end
306335
end
307336

lib/completions/endpoints/gemini.rb

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,8 @@ def normalize_model_params(model_params)
3333

3434
model_params[:topP] = model_params.delete(:top_p) if model_params[:top_p]
3535

36-
# temperature already supported
36+
model_params.delete(:temperature) if llm_model.lookup_custom_param("disable_temperature")
37+
model_params.delete(:topP) if llm_model.lookup_custom_param("disable_top_p")
3738

3839
model_params
3940
end

lib/completions/structured_output.rb

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,10 @@ def <<(raw)
2626
@partial_json_tracker << raw
2727
end
2828

29+
def broken?
30+
@partial_json_tracker.broken?
31+
end
32+
2933
def read_buffered_property(prop_name)
3034
# Safeguard: If the model is misbehaving and generating something that's not a JSON,
3135
# treat response as a normal string.

lib/configuration/feature.rb

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,12 @@ def ai_helper_features
103103
DiscourseAi::Configuration::Module::AI_HELPER_ID,
104104
DiscourseAi::Configuration::Module::AI_HELPER,
105105
),
106+
new(
107+
"translator",
108+
"ai_helper_translator_persona",
109+
DiscourseAi::Configuration::Module::AI_HELPER_ID,
110+
DiscourseAi::Configuration::Module::AI_HELPER,
111+
),
106112
new(
107113
"custom_prompt",
108114
"ai_helper_custom_prompt_persona",

lib/personas/translator.rb

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,12 @@ def system_prompt
1919
2020
Format your response as a JSON object with a single key named "output", which has the translation as the value.
2121
Your output should be in the following format:
22-
<output>
23-
{"output": "xx"}
24-
</output>
22+
23+
{"output": "xx"}
2524
2625
Where "xx" is replaced by the translation.
26+
27+
reply with valid JSON only
2728
PROMPT
2829
end
2930

0 commit comments

Comments
 (0)