Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

Commit 381a271

Browse files
authored
FEATURE: o3-mini supports (#1105)
1. Adds o3-mini presets 2. Adds support for reasoning effort 3. properly use "developer" messages for reasoning models
1 parent 8c22540 commit 381a271

File tree

6 files changed

+66
-23
lines changed

6 files changed

+66
-23
lines changed

app/models/llm_model.rb

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,11 @@ def self.provider_params
3434
organization: :text,
3535
disable_native_tools: :checkbox,
3636
disable_streaming: :checkbox,
37+
reasoning_effort: {
38+
type: :enum,
39+
values: %w[default low medium high],
40+
default: "default",
41+
},
3742
},
3843
mistral: {
3944
disable_native_tools: :checkbox,

assets/javascripts/discourse/components/ai-llm-editor-form.gjs

Lines changed: 35 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -114,10 +114,26 @@ export default class AiLlmEditorForm extends Component {
114114

115115
@computed("args.model.provider")
116116
get metaProviderParams() {
117-
return (
117+
const params =
118118
this.args.llms.resultSetMeta.provider_params[this.args.model.provider] ||
119-
{}
120-
);
119+
{};
120+
121+
return Object.entries(params).map(([field, value]) => {
122+
if (typeof value === "string") {
123+
return { field, type: value };
124+
} else if (typeof value === "object") {
125+
if (value.values) {
126+
value = { ...value };
127+
value.values = value.values.map((v) => {
128+
return { id: v, name: v };
129+
});
130+
}
131+
this.args.model.provider_params[field] =
132+
this.args.model.provider_params[field] || value.default;
133+
return { field, ...value };
134+
}
135+
return { field, type: "text" }; // fallback
136+
});
121137
}
122138

123139
@action
@@ -275,24 +291,31 @@ export default class AiLlmEditorForm extends Component {
275291
/>
276292
</div>
277293
</div>
278-
{{#each-in this.metaProviderParams as |field type|}}
279-
<div class="control-group ai-llm-editor-provider-param__{{type}}">
294+
{{#each this.metaProviderParams as |param|}}
295+
<div
296+
class="control-group ai-llm-editor-provider-param__{{param.type}}"
297+
>
280298
<label>{{i18n
281-
(concat "discourse_ai.llms.provider_fields." field)
299+
(concat "discourse_ai.llms.provider_fields." param.field)
282300
}}</label>
283-
{{#if (eq type "checkbox")}}
301+
{{#if (eq param.type "enum")}}
302+
<ComboBox
303+
@value={{mut (get @model.provider_params param.field)}}
304+
@content={{param.values}}
305+
/>
306+
{{else if (eq param.type "checkbox")}}
284307
<Input
285-
@type={{type}}
286-
@checked={{mut (get @model.provider_params field)}}
308+
@type={{param.type}}
309+
@checked={{mut (get @model.provider_params param.field)}}
287310
/>
288311
{{else}}
289312
<Input
290-
@type={{type}}
291-
@value={{mut (get @model.provider_params field)}}
313+
@type={{param.type}}
314+
@value={{mut (get @model.provider_params param.field)}}
292315
/>
293316
{{/if}}
294317
</div>
295-
{{/each-in}}
318+
{{/each}}
296319
<div class="control-group">
297320
<label>{{i18n "discourse_ai.llms.tokenizer"}}</label>
298321
<ComboBox

config/locales/client.en.yml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -382,7 +382,9 @@ en:
382382
open_ai-gpt-4o: "High intelligence model for complex, multi-step tasks"
383383
open_ai-gpt-4o-mini: "Affordable and fast small model for lightweight tasks"
384384
open_ai-o1-mini: "Cost-efficient reasoning model"
385-
open_ai-o1-preview: "Open AI's most capabale reasoning model"
385+
open_ai-o1-preview: "Open AI's most capabale reasoning model (preview)"
386+
open_ai-o1: "Open AI's most capable reasoning model"
387+
open_ai-o3-mini: "Advanced Cost-efficient reasoning model"
386388
samba_nova-Meta-Llama-3-1-8B-Instruct: "Efficient lightweight multilingual model"
387389
samba_nova-Meta-Llama-3-1-70B-Instruct": "Powerful multipurpose model"
388390
mistral-mistral-large-latest: "Mistral's most powerful model"
@@ -439,6 +441,7 @@ en:
439441
provider_order: "Provider order (comma delimited list)"
440442
provider_quantizations: "Order of provider quantizations (comma delimited list eg: fp16,fp8)"
441443
disable_streaming: "Disable streaming completions (convert streaming to non streaming requests)"
444+
reasoning_effort: "Reasoning effort (only applicable to reasoning models)"
442445

443446
related_topics:
444447
title: "Related topics"

lib/completions/dialects/chat_gpt.rb

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -40,11 +40,6 @@ def max_prompt_tokens
4040
llm_model.max_prompt_tokens - buffer
4141
end
4242

43-
# no support for streaming or tools or system messages
44-
def is_gpt_o?
45-
llm_model.provider == "open_ai" && llm_model.name.include?("o1-")
46-
end
47-
4843
def disable_native_tools?
4944
return @disable_native_tools if defined?(@disable_native_tools)
5045
!!@disable_native_tools = llm_model.lookup_custom_param("disable_native_tools")
@@ -60,14 +55,20 @@ def tools_dialect
6055
end
6156
end
6257

58+
# developer messages are preferred on reasoning models
59+
def supports_developer_messages?
60+
llm_model.provider == "open_ai" &&
61+
(llm_model.name.start_with?("o1") || llm_model.name.start_with?("o3"))
62+
end
63+
6364
def system_msg(msg)
6465
content = msg[:content]
6566
if disable_native_tools? && tools_dialect.instructions.present?
6667
content = content + "\n\n" + tools_dialect.instructions
6768
end
6869

69-
if is_gpt_o?
70-
{ role: "user", content: content }
70+
if supports_developer_messages?
71+
{ role: "developer", content: content }
7172
else
7273
{ role: "system", content: content }
7374
end

lib/completions/endpoints/open_ai.rb

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,14 @@ def perform_completion!(
4343
private
4444

4545
def disable_streaming?
46-
@disable_streaming = llm_model.lookup_custom_param("disable_streaming")
46+
@disable_streaming ||= llm_model.lookup_custom_param("disable_streaming")
47+
end
48+
49+
def reasoning_effort
50+
return @reasoning_effort if defined?(@reasoning_effort)
51+
@reasoning_effort = llm_model.lookup_custom_param("reasoning_effort")
52+
@reasoning_effort = nil if !%w[low medium high].include?(@reasoning_effort)
53+
@reasoning_effort
4754
end
4855

4956
def model_uri
@@ -60,6 +67,8 @@ def model_uri
6067
def prepare_payload(prompt, model_params, dialect)
6168
payload = default_options.merge(model_params).merge(messages: prompt)
6269

70+
payload[:reasoning_effort] = reasoning_effort if reasoning_effort
71+
6372
if @streaming_mode
6473
payload[:stream] = true
6574

lib/completions/llm.rb

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,10 @@ def presets
6262
{
6363
id: "open_ai",
6464
models: [
65-
{ name: "o1-preview", tokens: 131_072, display_name: "o1" },
66-
{ name: "o1-mini", tokens: 131_072, display_name: "o1 mini" },
65+
{ name: "o3-mini", tokens: 200_000, display_name: "o3 Mini" },
66+
{ name: "o1", tokens: 200_000, display_name: "o1" },
67+
{ name: "o1-preview", tokens: 131_072, display_name: "o1 preview" },
68+
{ name: "o1-mini", tokens: 131_072, display_name: "o1 Mini" },
6769
{ name: "gpt-4o", tokens: 131_072, display_name: "GPT-4 Omni" },
6870
{ name: "gpt-4o-mini", tokens: 131_072, display_name: "GPT-4 Omni Mini" },
6971
{ name: "gpt-4-turbo", tokens: 131_072, display_name: "GPT-4 Turbo" },

0 commit comments

Comments
 (0)