Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

Commit d1fef18

Browse files
committed
specs
1 parent 2277aa2 commit d1fef18

File tree

6 files changed

+256
-4
lines changed

6 files changed

+256
-4
lines changed

lib/ai_bot/bot.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ class Bot
88
BOT_NOT_FOUND = Class.new(StandardError)
99

1010
# the future is agentic, allow for more turns
11-
MAX_COMPLETIONS = 2
11+
MAX_COMPLETIONS = 8
1212

1313
# limit is arbitrary, but 5 which was used in the past was too low
1414
MAX_TOOLS = 20

lib/completions/dialects/dialect.rb

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,13 @@ def tool_choice
6666
prompt.tool_choice
6767
end
6868

69-
def no_more_tool_calls_text
69+
def self.no_more_tool_calls_text
7070
# note, Anthropic must never prefill with an ending whitespace
71-
"Since you explicitly asked me not to use tools any more I will not call tools any more.\nHere is the best, complete, answer I can come up with given the information I know:"
71+
"I WILL NOT USE TOOLS IN THIS REPLY, user expressed they wanted to stop using tool calls.\nHere is the best, complete, answer I can come up with given the information I have."
72+
end
73+
74+
def no_more_tool_calls_text
75+
self.class.no_more_tool_calls_text
7276
end
7377

7478
def translate
@@ -80,7 +84,23 @@ def translate
8084
messages.pop
8185
end
8286

83-
trim_messages(messages).map { |msg| send("#{msg[:type]}_msg", msg) }.compact
87+
translated = trim_messages(messages).map { |msg| send("#{msg[:type]}_msg", msg) }.compact
88+
89+
if !native_tool_support?
90+
if prompt.tools.present? && prompt.tool_choice.present?
91+
if prompt.tool_choice == :none
92+
translated << model_msg(role: "assistant", content: no_more_tool_calls_text)
93+
else
94+
translated << model_msg(
95+
role: "assistant",
96+
content:
97+
"User required I call the tool: #{prompt.tool_choice} I will makes sure I use it now:",
98+
)
99+
end
100+
end
101+
end
102+
103+
translated
84104
end
85105

86106
def conversation_context

spec/lib/completions/endpoints/anthropic_spec.rb

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -714,4 +714,59 @@
714714
expect(parsed_body[:max_tokens]).to eq(500)
715715
end
716716
end
717+
718+
describe "disabled tool use" do
719+
it "can properly disable tool use with :none" do
720+
prompt =
721+
DiscourseAi::Completions::Prompt.new(
722+
"You are a bot",
723+
messages: [type: :user, id: "user1", content: "don't use any tools please"],
724+
tools: [echo_tool],
725+
tool_choice: :none,
726+
)
727+
728+
response_body = {
729+
id: "msg_01RdJkxCbsEj9VFyFYAkfy2S",
730+
type: "message",
731+
role: "assistant",
732+
model: "claude-3-haiku-20240307",
733+
content: [
734+
{ type: "text", text: "I won't use any tools. Here's a direct response instead." },
735+
],
736+
stop_reason: "end_turn",
737+
stop_sequence: nil,
738+
usage: {
739+
input_tokens: 345,
740+
output_tokens: 65,
741+
},
742+
}.to_json
743+
744+
parsed_body = nil
745+
stub_request(:post, url).with(
746+
body:
747+
proc do |req_body|
748+
parsed_body = JSON.parse(req_body, symbolize_names: true)
749+
true
750+
end,
751+
).to_return(status: 200, body: response_body)
752+
753+
result = llm.generate(prompt, user: Discourse.system_user)
754+
755+
# Verify that tool_choice is set to { type: "none" }
756+
expect(parsed_body[:tool_choice]).to eq({ type: "none" })
757+
758+
# Verify that an assistant message with no_more_tool_calls_text was added
759+
messages = parsed_body[:messages]
760+
expect(messages.length).to eq(2) # user message + added assistant message
761+
762+
last_message = messages.last
763+
expect(last_message[:role]).to eq("assistant")
764+
765+
expect(last_message[:content]).to eq(
766+
DiscourseAi::Completions::Dialects::Dialect.no_more_tool_calls_text,
767+
)
768+
769+
expect(result).to eq("I won't use any tools. Here's a direct response instead.")
770+
end
771+
end
717772
end

spec/lib/completions/endpoints/aws_bedrock_spec.rb

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -484,4 +484,66 @@ def encode_message(message)
484484
expect(request_body["max_tokens"]).to eq(500)
485485
end
486486
end
487+
488+
describe "disabled tool use" do
489+
it "handles tool_choice: :none by adding a prefill message instead of using tool_choice param" do
490+
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
491+
request = nil
492+
493+
# Create a prompt with tool_choice: :none
494+
prompt =
495+
DiscourseAi::Completions::Prompt.new(
496+
"You are a helpful assistant",
497+
messages: [{ type: :user, content: "don't use any tools please" }],
498+
tools: [
499+
{
500+
name: "echo",
501+
description: "echo something",
502+
parameters: [
503+
{ name: "text", type: "string", description: "text to echo", required: true },
504+
],
505+
},
506+
],
507+
tool_choice: :none,
508+
)
509+
510+
# Mock response from Bedrock
511+
content = {
512+
content: [text: "I won't use any tools. Here's a direct response instead."],
513+
usage: {
514+
input_tokens: 25,
515+
output_tokens: 15,
516+
},
517+
}.to_json
518+
519+
stub_request(
520+
:post,
521+
"https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-3-sonnet-20240229-v1:0/invoke",
522+
)
523+
.with do |inner_request|
524+
request = inner_request
525+
true
526+
end
527+
.to_return(status: 200, body: content)
528+
529+
proxy.generate(prompt, user: user)
530+
531+
# Parse the request body
532+
request_body = JSON.parse(request.body)
533+
534+
# Verify that tool_choice is NOT present (not supported in Bedrock)
535+
expect(request_body).not_to have_key("tool_choice")
536+
537+
# Verify that an assistant message was added with no_more_tool_calls_text
538+
messages = request_body["messages"]
539+
expect(messages.length).to eq(2) # user message + added assistant message
540+
541+
last_message = messages.last
542+
expect(last_message["role"]).to eq("assistant")
543+
544+
expect(last_message["content"]).to eq(
545+
DiscourseAi::Completions::Dialects::Dialect.no_more_tool_calls_text,
546+
)
547+
end
548+
end
487549
end

spec/lib/completions/endpoints/gemini_spec.rb

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -377,4 +377,60 @@ def tool_response
377377

378378
expect(output.join).to eq("Hello World Sam")
379379
end
380+
381+
it "can properly disable tool use with :none" do
382+
prompt = DiscourseAi::Completions::Prompt.new("Hello", tools: [echo_tool], tool_choice: :none)
383+
384+
response = gemini_mock.response("I won't use any tools").to_json
385+
386+
req_body = nil
387+
388+
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
389+
url = "#{model.url}:generateContent?key=123"
390+
391+
stub_request(:post, url).with(
392+
body:
393+
proc do |_req_body|
394+
req_body = _req_body
395+
true
396+
end,
397+
).to_return(status: 200, body: response)
398+
399+
response = llm.generate(prompt, user: user)
400+
401+
expect(response).to eq("I won't use any tools")
402+
403+
parsed = JSON.parse(req_body, symbolize_names: true)
404+
405+
# Verify that function_calling_config mode is set to "NONE"
406+
expect(parsed[:tool_config]).to eq({ function_calling_config: { mode: "NONE" } })
407+
end
408+
409+
it "can properly force specific tool use" do
410+
prompt = DiscourseAi::Completions::Prompt.new("Hello", tools: [echo_tool], tool_choice: "echo")
411+
412+
response = gemini_mock.response("World").to_json
413+
414+
req_body = nil
415+
416+
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
417+
url = "#{model.url}:generateContent?key=123"
418+
419+
stub_request(:post, url).with(
420+
body:
421+
proc do |_req_body|
422+
req_body = _req_body
423+
true
424+
end,
425+
).to_return(status: 200, body: response)
426+
427+
response = llm.generate(prompt, user: user)
428+
429+
parsed = JSON.parse(req_body, symbolize_names: true)
430+
431+
# Verify that function_calling_config is correctly set to ANY mode with the specified tool
432+
expect(parsed[:tool_config]).to eq(
433+
{ function_calling_config: { mode: "ANY", allowed_function_names: ["echo"] } },
434+
)
435+
end
380436
end

spec/lib/completions/endpoints/open_ai_spec.rb

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -395,6 +395,65 @@ def request_body(prompt, stream: false, tool_call: false)
395395
end
396396
end
397397

398+
describe "disabled tool use" do
399+
it "can properly disable tool use with :none" do
400+
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
401+
402+
tools = [
403+
{
404+
name: "echo",
405+
description: "echo something",
406+
parameters: [
407+
{ name: "text", type: "string", description: "text to echo", required: true },
408+
],
409+
},
410+
]
411+
412+
prompt =
413+
DiscourseAi::Completions::Prompt.new(
414+
"You are a bot",
415+
messages: [type: :user, id: "user1", content: "don't use any tools please"],
416+
tools: tools,
417+
tool_choice: :none,
418+
)
419+
420+
response = {
421+
id: "chatcmpl-9JxkAzzaeO4DSV3omWvok9TKhCjBH",
422+
object: "chat.completion",
423+
created: 1_714_544_914,
424+
model: "gpt-4-turbo-2024-04-09",
425+
choices: [
426+
{
427+
index: 0,
428+
message: {
429+
role: "assistant",
430+
content: "I won't use any tools. Here's a direct response instead.",
431+
},
432+
logprobs: nil,
433+
finish_reason: "stop",
434+
},
435+
],
436+
usage: {
437+
prompt_tokens: 55,
438+
completion_tokens: 13,
439+
total_tokens: 68,
440+
},
441+
system_fingerprint: "fp_ea6eb70039",
442+
}.to_json
443+
444+
body_json = nil
445+
stub_request(:post, "https://api.openai.com/v1/chat/completions").with(
446+
body: proc { |body| body_json = JSON.parse(body, symbolize_names: true) },
447+
).to_return(body: response)
448+
449+
result = llm.generate(prompt, user: user)
450+
451+
# Verify that tool_choice is set to "none" in the request
452+
expect(body_json[:tool_choice]).to eq("none")
453+
expect(result).to eq("I won't use any tools. Here's a direct response instead.")
454+
end
455+
end
456+
398457
describe "parameter disabling" do
399458
it "excludes disabled parameters from the request" do
400459
model.update!(provider_params: { disable_top_p: true, disable_temperature: true })

0 commit comments

Comments
 (0)