Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

Commit 549cee3

Browse files
committed
support disabling native tools for open ai
1 parent 82d9079 commit 549cee3

File tree

4 files changed

+101
-15
lines changed

4 files changed

+101
-15
lines changed

app/models/llm_model.rb

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,14 @@ def self.provider_params
2626
},
2727
open_ai: {
2828
organization: :text,
29+
disable_native_tools: :checkbox,
2930
},
3031
google: {
3132
disable_native_tools: :checkbox,
3233
},
34+
azure: {
35+
disable_native_tools: :checkbox,
36+
},
3337
hugging_face: {
3438
disable_system_prompt: :checkbox,
3539
},

lib/completions/dialects/chat_gpt.rb

Lines changed: 27 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -44,17 +44,31 @@ def is_gpt_o?
4444
llm_model.provider == "open_ai" && llm_model.name.include?("o1-")
4545
end
4646

47+
def disable_native_tools?
48+
return @disable_native_tools if defined?(@disable_native_tools)
49+
!!@disable_native_tools = llm_model.lookup_custom_param("disable_native_tools")
50+
end
51+
4752
private
4853

4954
def tools_dialect
50-
@tools_dialect ||= DiscourseAi::Completions::Dialects::OpenAiTools.new(prompt.tools)
55+
if disable_native_tools?
56+
super
57+
else
58+
@tools_dialect ||= DiscourseAi::Completions::Dialects::OpenAiTools.new(prompt.tools)
59+
end
5160
end
5261

5362
def system_msg(msg)
63+
content = msg[:content]
64+
if disable_native_tools? && tools_dialect.instructions.present?
65+
content = content + "\n\n" + tools_dialect.instructions
66+
end
67+
5468
if is_gpt_o?
55-
{ role: "user", content: msg[:content] }
69+
{ role: "user", content: content }
5670
else
57-
{ role: "system", content: msg[:content] }
71+
{ role: "system", content: content }
5872
end
5973
end
6074

@@ -63,11 +77,19 @@ def model_msg(msg)
6377
end
6478

6579
def tool_call_msg(msg)
66-
tools_dialect.from_raw_tool_call(msg)
80+
if disable_native_tools?
81+
super
82+
else
83+
tools_dialect.from_raw_tool_call(msg)
84+
end
6785
end
6886

6987
def tool_msg(msg)
70-
tools_dialect.from_raw_tool(msg)
88+
if disable_native_tools?
89+
super
90+
else
91+
tools_dialect.from_raw_tool(msg)
92+
end
7193
end
7294

7395
def user_msg(msg)

lib/completions/endpoints/open_ai.rb

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ def perform_completion!(
3636
partial_tool_calls: false,
3737
&blk
3838
)
39+
@disable_native_tools = dialect.disable_native_tools?
3940
if dialect.respond_to?(:is_gpt_o?) && dialect.is_gpt_o? && block_given?
4041
# we need to disable streaming and simulate it
4142
blk.call "", lambda { |*| }
@@ -69,10 +70,17 @@ def prepare_payload(prompt, model_params, dialect)
6970
# We'll fallback to guess this using the tokenizer.
7071
payload[:stream_options] = { include_usage: true } if llm_model.provider == "open_ai"
7172
end
72-
if dialect.tools.present?
73-
payload[:tools] = dialect.tools
74-
if dialect.tool_choice.present?
75-
payload[:tool_choice] = { type: "function", function: { name: dialect.tool_choice } }
73+
if !xml_tools_enabled?
74+
if dialect.tools.present?
75+
payload[:tools] = dialect.tools
76+
if dialect.tool_choice.present?
77+
payload[:tool_choice] = {
78+
type: "function",
79+
function: {
80+
name: dialect.tool_choice,
81+
},
82+
}
83+
end
7684
end
7785
end
7886
payload
@@ -121,7 +129,7 @@ def decode_chunk_finish
121129
end
122130

123131
def xml_tools_enabled?
124-
false
132+
!!@disable_native_tools
125133
end
126134

127135
private

spec/lib/completions/endpoints/open_ai_spec.rb

Lines changed: 57 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -53,11 +53,10 @@ def stream_line(delta, finish_reason: nil, tool_call: false)
5353
}.to_json
5454
end
5555

56-
def stub_raw(chunks)
57-
WebMock.stub_request(:post, "https://api.openai.com/v1/chat/completions").to_return(
58-
status: 200,
59-
body: chunks,
60-
)
56+
def stub_raw(chunks, body_blk: nil)
57+
stub = WebMock.stub_request(:post, "https://api.openai.com/v1/chat/completions")
58+
stub.with(body: body_blk) if body_blk
59+
stub.to_return(status: 200, body: chunks)
6160
end
6261

6362
def stub_streamed_response(prompt, deltas, tool_call: false)
@@ -391,6 +390,59 @@ def request_body(prompt, stream: false, tool_call: false)
391390
end
392391

393392
describe "#perform_completion!" do
393+
context "when using XML tool calls format" do
394+
let(:xml_tool_call_response) { <<~XML }
395+
<function_calls>
396+
<invoke>
397+
<tool_name>get_weather</tool_name>
398+
<parameters>
399+
<location>Sydney</location>
400+
<unit>c</unit>
401+
</parameters>
402+
</invoke>
403+
</function_calls>
404+
XML
405+
406+
it "parses XML tool calls" do
407+
response = {
408+
id: "chatcmpl-6sZfAb30Rnv9Q7ufzFwvQsMpjZh8S",
409+
object: "chat.completion",
410+
created: 1_678_464_820,
411+
model: "gpt-3.5-turbo-0301",
412+
usage: {
413+
prompt_tokens: 8,
414+
completion_tokens: 13,
415+
total_tokens: 499,
416+
},
417+
choices: [
418+
{
419+
message: {
420+
role: "assistant",
421+
content: xml_tool_call_response,
422+
},
423+
finish_reason: "stop",
424+
index: 0,
425+
},
426+
],
427+
}.to_json
428+
429+
endpoint.llm_model.update!(provider_params: { disable_native_tools: true })
430+
body = nil
431+
open_ai_mock.stub_raw(response, body_blk: proc { |inner_body| body = inner_body })
432+
433+
dialect = compliance.dialect(prompt: compliance.generic_prompt(tools: tools))
434+
tool_call = endpoint.perform_completion!(dialect, user)
435+
436+
body_parsed = JSON.parse(body, symbolize_names: true)
437+
expect(body_parsed[:tools]).to eq(nil)
438+
439+
expect(body_parsed[:messages][0][:content]).to include("<function_calls>")
440+
441+
expect(tool_call.name).to eq("get_weather")
442+
expect(tool_call.parameters).to eq({ location: "Sydney", unit: "c" })
443+
end
444+
end
445+
394446
context "when using regular mode" do
395447
context "with simple prompts" do
396448
it "completes a trivial prompt and logs the response" do

0 commit comments

Comments
 (0)