Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

Commit b675c4c

Browse files
committed
DEV: Remove custom prefix in specs
1 parent 0fadf1d commit b675c4c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+106
-135
lines changed

lib/ai_helper/chat_thread_titler.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def call_llm(thread_content)
3030
messages: [{ type: :user, content: chat, id: "User" }],
3131
)
3232

33-
DiscourseAi::Completions::Llm.proxy(SiteSetting.ai_helper_model).generate(
33+
DiscourseAi::Completions::Llm.proxy(SiteSetting.ai_default_llm_model).generate(
3434
prompt,
3535
user: Discourse.system_user,
3636
stop_sequences: ["</input>"],

spec/configuration/feature_spec.rb

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@ def allow_configuring_setting(&block)
3838

3939
it "returns the configured llm model" do
4040
SiteSetting.ai_summarization_persona = ai_persona.id
41-
allow_configuring_setting { SiteSetting.ai_summarization_model = "custom:#{llm_model.id}" }
4241
expect(ai_feature.llm_models).to eq([llm_model])
4342
end
4443
end
@@ -55,8 +54,6 @@ def allow_configuring_setting(&block)
5554

5655
it "returns the persona's default llm when no specific helper model is set" do
5756
SiteSetting.ai_helper_proofreader_persona = ai_persona.id
58-
SiteSetting.ai_helper_model = ""
59-
6057
expect(ai_feature.llm_models).to eq([llm_model])
6158
end
6259
end
@@ -75,11 +72,7 @@ def allow_configuring_setting(&block)
7572

7673
it "uses translation model when configured" do
7774
SiteSetting.ai_translation_locale_detector_persona = ai_persona.id
78-
ai_persona.update!(default_llm_id: nil)
79-
allow_configuring_setting do
80-
SiteSetting.ai_translation_model = "custom:#{translation_model.id}"
81-
end
82-
75+
ai_persona.update!(default_llm_id: translation_model.id)
8376
expect(ai_feature.llm_models).to eq([translation_model])
8477
end
8578
end

spec/configuration/llm_enumerator_spec.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@
5151
component: "text",
5252
name: "model",
5353
metadata: {
54-
value: "custom:#{fake_model.id}",
54+
value: fake_model.id,
5555
},
5656
target: "script",
5757
)

spec/jobs/scheduled/topics_locale_detection_backfill_spec.rb

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,8 @@
55
subject(:job) { described_class.new }
66

77
before do
8+
assign_fake_provider_to(:ai_default_llm_model)
89
SiteSetting.discourse_ai_enabled = true
9-
Fabricate(:fake_model).tap do |fake_llm|
10-
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
11-
end
1210
SiteSetting.ai_translation_enabled = true
1311
SiteSetting.ai_translation_backfill_hourly_rate = 100
1412
SiteSetting.content_localization_supported_locales = "en"

spec/lib/completions/endpoints/anthropic_spec.rb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
RSpec.describe DiscourseAi::Completions::Endpoints::Anthropic do
55
let(:url) { "https://api.anthropic.com/v1/messages" }
66
fab!(:model) { Fabricate(:anthropic_model, name: "claude-3-opus", vision_enabled: true) }
7-
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{model.id}") }
7+
let(:llm) { DiscourseAi::Completions::Llm.proxy(model) }
88
let(:image100x100) { plugin_file_from_fixtures("100x100.jpg") }
99
let(:upload100x100) do
1010
UploadCreator.new(image100x100, "image.jpg").create_for(Discourse.system_user.id)
@@ -374,7 +374,7 @@
374374
model.provider_params["reasoning_tokens"] = 10_000
375375
model.save!
376376

377-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
377+
proxy = DiscourseAi::Completions::Llm.proxy(model)
378378
result = proxy.generate(prompt, user: Discourse.system_user)
379379
expect(result).to eq("Hello!")
380380

@@ -432,7 +432,7 @@
432432
},
433433
).to_return(status: 200, body: body)
434434

435-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
435+
proxy = DiscourseAi::Completions::Llm.proxy(model)
436436
result = proxy.generate(prompt, user: Discourse.system_user)
437437
expect(result).to eq("Hello!")
438438

spec/lib/completions/endpoints/aws_bedrock_spec.rb

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def encode_message(message)
4747
model.provider_params["disable_native_tools"] = true
4848
model.save!
4949

50-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
50+
proxy = DiscourseAi::Completions::Llm.proxy(model)
5151

5252
incomplete_tool_call = <<~XML.strip
5353
<thinking>I should be ignored</thinking>
@@ -122,7 +122,7 @@ def encode_message(message)
122122
end
123123

124124
it "supports streaming function calls" do
125-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
125+
proxy = DiscourseAi::Completions::Llm.proxy(model)
126126

127127
request = nil
128128

@@ -293,7 +293,7 @@ def encode_message(message)
293293

294294
describe "Claude 3 support" do
295295
it "supports regular completions" do
296-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
296+
proxy = DiscourseAi::Completions::Llm.proxy(model)
297297

298298
request = nil
299299

@@ -340,7 +340,7 @@ def encode_message(message)
340340
model.provider_params["reasoning_tokens"] = 10_000
341341
model.save!
342342

343-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
343+
proxy = DiscourseAi::Completions::Llm.proxy(model)
344344

345345
request = nil
346346

@@ -387,7 +387,7 @@ def encode_message(message)
387387
end
388388

389389
it "supports claude 3 streaming" do
390-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
390+
proxy = DiscourseAi::Completions::Llm.proxy(model)
391391

392392
request = nil
393393

@@ -448,7 +448,7 @@ def encode_message(message)
448448
},
449449
)
450450

451-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
451+
proxy = DiscourseAi::Completions::Llm.proxy(model)
452452
request = nil
453453

454454
content = {
@@ -487,7 +487,7 @@ def encode_message(message)
487487

488488
describe "disabled tool use" do
489489
it "handles tool_choice: :none by adding a prefill message instead of using tool_choice param" do
490-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
490+
proxy = DiscourseAi::Completions::Llm.proxy(model)
491491
request = nil
492492

493493
# Create a prompt with tool_choice: :none
@@ -549,7 +549,7 @@ def encode_message(message)
549549

550550
describe "forced tool use" do
551551
it "can properly force tool use" do
552-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
552+
proxy = DiscourseAi::Completions::Llm.proxy(model)
553553
request = nil
554554

555555
tools = [
@@ -640,7 +640,7 @@ def encode_message(message)
640640
{ type: "message_delta", delta: { usage: { output_tokens: 25 } } },
641641
].map { |message| encode_message(message) }
642642

643-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
643+
proxy = DiscourseAi::Completions::Llm.proxy(model)
644644
request = nil
645645
bedrock_mock.with_chunk_array_support do
646646
stub_request(
@@ -718,7 +718,7 @@ def encode_message(message)
718718
{ type: "message_delta", delta: { usage: { output_tokens: 25 } } },
719719
].map { |message| encode_message(message) }
720720

721-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
721+
proxy = DiscourseAi::Completions::Llm.proxy(model)
722722
request = nil
723723
bedrock_mock.with_chunk_array_support do
724724
stub_request(

spec/lib/completions/endpoints/cohere_spec.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
RSpec.describe DiscourseAi::Completions::Endpoints::Cohere do
55
fab!(:cohere_model)
6-
let(:llm) { DiscourseAi::Completions::Llm.proxy("custom:#{cohere_model.id}") }
6+
let(:llm) { DiscourseAi::Completions::Llm.proxy(cohere_model) }
77
fab!(:user)
88

99
let(:prompt) do

spec/lib/completions/endpoints/gemini_spec.rb

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ def tool_response
160160

161161
req_body = nil
162162

163-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
163+
llm = DiscourseAi::Completions::Llm.proxy(model)
164164
url = "#{model.url}:generateContent?key=123"
165165

166166
stub_request(:post, url).with(
@@ -186,7 +186,7 @@ def tool_response
186186

187187
req_body = nil
188188

189-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
189+
llm = DiscourseAi::Completions::Llm.proxy(model)
190190
url = "#{model.url}:generateContent?key=123"
191191

192192
stub_request(:post, url).with(
@@ -220,7 +220,7 @@ def tool_response
220220

221221
req_body = nil
222222

223-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
223+
llm = DiscourseAi::Completions::Llm.proxy(model)
224224
url = "#{model.url}:generateContent?key=123"
225225

226226
stub_request(:post, url).with(
@@ -246,7 +246,7 @@ def tool_response
246246

247247
req_body = nil
248248

249-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
249+
llm = DiscourseAi::Completions::Llm.proxy(model)
250250
url = "#{model.url}:generateContent?key=123"
251251

252252
stub_request(:post, url).with(
@@ -274,7 +274,7 @@ def tool_response
274274

275275
req_body = nil
276276

277-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
277+
llm = DiscourseAi::Completions::Llm.proxy(model)
278278
url = "#{model.url}:generateContent?key=123"
279279

280280
stub_request(:post, url).with(
@@ -297,7 +297,7 @@ def tool_response
297297
it "properly encodes tool calls" do
298298
prompt = DiscourseAi::Completions::Prompt.new("Hello", tools: [echo_tool])
299299

300-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
300+
llm = DiscourseAi::Completions::Llm.proxy(model)
301301
url = "#{model.url}:generateContent?key=123"
302302

303303
response_json = { "functionCall" => { name: "echo", args: { text: "<S>ydney" } } }
@@ -332,7 +332,7 @@ def tool_response
332332

333333
req_body = nil
334334

335-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
335+
llm = DiscourseAi::Completions::Llm.proxy(model)
336336
url = "#{model.url}:generateContent?key=123"
337337

338338
stub_request(:post, url).with(
@@ -410,7 +410,7 @@ def tool_response
410410

411411
payload = rows.map { |r| "data: #{r.to_json}\n\n" }.join
412412

413-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
413+
llm = DiscourseAi::Completions::Llm.proxy(model)
414414
url = "#{model.url}:streamGenerateContent?alt=sse&key=123"
415415

416416
prompt = DiscourseAi::Completions::Prompt.new("Hello", tools: [echo_tool])
@@ -450,7 +450,7 @@ def tool_response
450450
451451
TEXT
452452

453-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
453+
llm = DiscourseAi::Completions::Llm.proxy(model)
454454
url = "#{model.url}:streamGenerateContent?alt=sse&key=123"
455455

456456
output = []
@@ -478,7 +478,7 @@ def tool_response
478478

479479
split = data.split("|")
480480

481-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
481+
llm = DiscourseAi::Completions::Llm.proxy(model)
482482
url = "#{model.url}:streamGenerateContent?alt=sse&key=123"
483483

484484
output = []
@@ -497,7 +497,7 @@ def tool_response
497497

498498
req_body = nil
499499

500-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
500+
llm = DiscourseAi::Completions::Llm.proxy(model)
501501
url = "#{model.url}:generateContent?key=123"
502502

503503
stub_request(:post, url).with(
@@ -525,7 +525,7 @@ def tool_response
525525

526526
req_body = nil
527527

528-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
528+
llm = DiscourseAi::Completions::Llm.proxy(model)
529529
url = "#{model.url}:generateContent?key=123"
530530

531531
stub_request(:post, url).with(
@@ -600,7 +600,7 @@ def tool_response
600600

601601
req_body = nil
602602

603-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
603+
llm = DiscourseAi::Completions::Llm.proxy(model)
604604
url = "#{model.url}:streamGenerateContent?alt=sse&key=123"
605605

606606
stub_request(:post, url).with(
@@ -657,7 +657,7 @@ def tool_response
657657
658658
TEXT
659659

660-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
660+
llm = DiscourseAi::Completions::Llm.proxy(model)
661661
url = "#{model.url}:streamGenerateContent?alt=sse&key=123"
662662

663663
output = []

spec/lib/completions/endpoints/nova_spec.rb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def encode_message(message)
2828
end
2929

3030
it "should be able to make a simple request" do
31-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{nova_model.id}")
31+
proxy = DiscourseAi::Completions::Llm.proxy(model)
3232

3333
content = {
3434
"output" => {
@@ -90,7 +90,7 @@ def encode_message(message)
9090

9191
stub_request(:post, stream_url).to_return(status: 200, body: messages.join)
9292

93-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{nova_model.id}")
93+
proxy = DiscourseAi::Completions::Llm.proxy(model)
9494
responses = []
9595
proxy.generate("Hello!", user: user) { |partial| responses << partial }
9696

@@ -104,7 +104,7 @@ def encode_message(message)
104104
#model.provider_params["disable_native_tools"] = true
105105
#model.save!
106106

107-
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{nova_model.id}")
107+
proxy = DiscourseAi::Completions::Llm.proxy(model)
108108
prompt =
109109
DiscourseAi::Completions::Prompt.new(
110110
"You are a helpful assistant.",

spec/lib/completions/endpoints/open_ai_spec.rb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ def request_body(prompt, stream: false, tool_call: false)
177177
describe "max tokens for reasoning models" do
178178
it "uses max_completion_tokens for reasoning models" do
179179
model.update!(name: "o3-mini", max_output_tokens: 999)
180-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
180+
llm = DiscourseAi::Completions::Llm.proxy(model)
181181
prompt =
182182
DiscourseAi::Completions::Prompt.new(
183183
"You are a bot",
@@ -216,7 +216,7 @@ def request_body(prompt, stream: false, tool_call: false)
216216

217217
describe "repeat calls" do
218218
it "can properly reset context" do
219-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
219+
llm = DiscourseAi::Completions::Llm.proxy(model)
220220

221221
tools = [
222222
{
@@ -297,7 +297,7 @@ def request_body(prompt, stream: false, tool_call: false)
297297
describe "max tokens remapping" do
298298
it "remaps max_tokens to max_completion_tokens for reasoning models" do
299299
model.update!(name: "o3-mini")
300-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
300+
llm = DiscourseAi::Completions::Llm.proxy(model)
301301

302302
body_parsed = nil
303303
stub_request(:post, "https://api.openai.com/v1/chat/completions").with(
@@ -313,7 +313,7 @@ def request_body(prompt, stream: false, tool_call: false)
313313

314314
describe "forced tool use" do
315315
it "can properly force tool use" do
316-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
316+
llm = DiscourseAi::Completions::Llm.proxy(model)
317317

318318
tools = [
319319
{
@@ -441,7 +441,7 @@ def request_body(prompt, stream: false, tool_call: false)
441441

442442
describe "disabled tool use" do
443443
it "can properly disable tool use with :none" do
444-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
444+
llm = DiscourseAi::Completions::Llm.proxy(model)
445445

446446
tools = [
447447
{
@@ -532,7 +532,7 @@ def request_body(prompt, stream: false, tool_call: false)
532532
describe "image support" do
533533
it "can handle images" do
534534
model = Fabricate(:llm_model, vision_enabled: true)
535-
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
535+
llm = DiscourseAi::Completions::Llm.proxy(model)
536536
prompt =
537537
DiscourseAi::Completions::Prompt.new(
538538
"You are image bot",

0 commit comments

Comments
 (0)