Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions app/models/llm_model.rb
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,11 @@ def self.provider_params
organization: :text,
disable_native_tools: :checkbox,
disable_streaming: :checkbox,
reasoning_effort: {
type: :enum,
values: %w[default low medium high],
default: "default",
},
},
mistral: {
disable_native_tools: :checkbox,
Expand Down
47 changes: 35 additions & 12 deletions assets/javascripts/discourse/components/ai-llm-editor-form.gjs
Original file line number Diff line number Diff line change
Expand Up @@ -114,10 +114,26 @@ export default class AiLlmEditorForm extends Component {

@computed("args.model.provider")
get metaProviderParams() {
return (
const params =
this.args.llms.resultSetMeta.provider_params[this.args.model.provider] ||
{}
);
{};

return Object.entries(params).map(([field, value]) => {
if (typeof value === "string") {
return { field, type: value };
} else if (typeof value === "object") {
if (value.values) {
value = { ...value };
value.values = value.values.map((v) => {
return { id: v, name: v };
});
}
this.args.model.provider_params[field] =
this.args.model.provider_params[field] || value.default;
return { field, ...value };
}
return { field, type: "text" }; // fallback
});
}

@action
Expand Down Expand Up @@ -275,24 +291,31 @@ export default class AiLlmEditorForm extends Component {
/>
</div>
</div>
{{#each-in this.metaProviderParams as |field type|}}
<div class="control-group ai-llm-editor-provider-param__{{type}}">
{{#each this.metaProviderParams as |param|}}
<div
class="control-group ai-llm-editor-provider-param__{{param.type}}"
>
<label>{{i18n
(concat "discourse_ai.llms.provider_fields." field)
(concat "discourse_ai.llms.provider_fields." param.field)
}}</label>
{{#if (eq type "checkbox")}}
{{#if (eq param.type "enum")}}
<ComboBox
@value={{mut (get @model.provider_params param.field)}}
@content={{param.values}}
/>
{{else if (eq param.type "checkbox")}}
<Input
@type={{type}}
@checked={{mut (get @model.provider_params field)}}
@type={{param.type}}
@checked={{mut (get @model.provider_params param.field)}}
/>
{{else}}
<Input
@type={{type}}
@value={{mut (get @model.provider_params field)}}
@type={{param.type}}
@value={{mut (get @model.provider_params param.field)}}
/>
{{/if}}
</div>
{{/each-in}}
{{/each}}
<div class="control-group">
<label>{{i18n "discourse_ai.llms.tokenizer"}}</label>
<ComboBox
Expand Down
5 changes: 4 additions & 1 deletion config/locales/client.en.yml
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,9 @@ en:
open_ai-gpt-4o: "High intelligence model for complex, multi-step tasks"
open_ai-gpt-4o-mini: "Affordable and fast small model for lightweight tasks"
open_ai-o1-mini: "Cost-efficient reasoning model"
open_ai-o1-preview: "Open AI's most capabale reasoning model"
open_ai-o1-preview: "Open AI's most capabale reasoning model (preview)"
open_ai-o1: "Open AI's most capable reasoning model"
open_ai-o3-mini: "Advanced Cost-efficient reasoning model"
samba_nova-Meta-Llama-3-1-8B-Instruct: "Efficient lightweight multilingual model"
samba_nova-Meta-Llama-3-1-70B-Instruct": "Powerful multipurpose model"
mistral-mistral-large-latest: "Mistral's most powerful model"
Expand Down Expand Up @@ -439,6 +441,7 @@ en:
provider_order: "Provider order (comma delimited list)"
provider_quantizations: "Order of provider quantizations (comma delimited list eg: fp16,fp8)"
disable_streaming: "Disable streaming completions (convert streaming to non streaming requests)"
reasoning_effort: "Reasoning effort (only applicable to reasoning models)"

related_topics:
title: "Related topics"
Expand Down
15 changes: 8 additions & 7 deletions lib/completions/dialects/chat_gpt.rb
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,6 @@ def max_prompt_tokens
llm_model.max_prompt_tokens - buffer
end

# no support for streaming or tools or system messages
def is_gpt_o?
llm_model.provider == "open_ai" && llm_model.name.include?("o1-")
end

def disable_native_tools?
return @disable_native_tools if defined?(@disable_native_tools)
!!@disable_native_tools = llm_model.lookup_custom_param("disable_native_tools")
Expand All @@ -60,14 +55,20 @@ def tools_dialect
end
end

# developer messages are preferred on reasoning models
def supports_developer_messages?
llm_model.provider == "open_ai" &&
(llm_model.name.start_with?("o1") || llm_model.name.start_with?("o3"))
end

def system_msg(msg)
content = msg[:content]
if disable_native_tools? && tools_dialect.instructions.present?
content = content + "\n\n" + tools_dialect.instructions
end

if is_gpt_o?
{ role: "user", content: content }
if supports_developer_messages?
{ role: "developer", content: content }
else
{ role: "system", content: content }
end
Expand Down
11 changes: 10 additions & 1 deletion lib/completions/endpoints/open_ai.rb
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,14 @@ def perform_completion!(
private

def disable_streaming?
@disable_streaming = llm_model.lookup_custom_param("disable_streaming")
@disable_streaming ||= llm_model.lookup_custom_param("disable_streaming")
end

def reasoning_effort
return @reasoning_effort if defined?(@reasoning_effort)
@reasoning_effort = llm_model.lookup_custom_param("reasoning_effort")
@reasoning_effort = nil if !%w[low medium high].include?(@reasoning_effort)
@reasoning_effort
end

def model_uri
Expand All @@ -60,6 +67,8 @@ def model_uri
def prepare_payload(prompt, model_params, dialect)
payload = default_options.merge(model_params).merge(messages: prompt)

payload[:reasoning_effort] = reasoning_effort if reasoning_effort

if @streaming_mode
payload[:stream] = true

Expand Down
6 changes: 4 additions & 2 deletions lib/completions/llm.rb
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,10 @@ def presets
{
id: "open_ai",
models: [
{ name: "o1-preview", tokens: 131_072, display_name: "o1" },
{ name: "o1-mini", tokens: 131_072, display_name: "o1 mini" },
{ name: "o3-mini", tokens: 200_000, display_name: "o3 Mini" },
{ name: "o1", tokens: 200_000, display_name: "o1" },
{ name: "o1-preview", tokens: 131_072, display_name: "o1 preview" },
{ name: "o1-mini", tokens: 131_072, display_name: "o1 Mini" },
{ name: "gpt-4o", tokens: 131_072, display_name: "GPT-4 Omni" },
{ name: "gpt-4o-mini", tokens: 131_072, display_name: "GPT-4 Omni Mini" },
{ name: "gpt-4-turbo", tokens: 131_072, display_name: "GPT-4 Turbo" },
Expand Down