Skip to content

Commit f57954b

Browse files
Exclude temperature from being automatically added to OpenAI LLM parameters (#971)
* Exclude `temperature` from being automatically added to OpenAI LLM parameters * fix linter
1 parent ad30e69 commit f57954b

File tree

3 files changed

+12
-24
lines changed

3 files changed

+12
-24
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/956] Deprecate `Langchain::Vectorsearch::Epsilla` class
1717
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/961] Deprecate `Langchain::LLM::LlamaCpp` class
1818
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/962] Deprecate `Langchain::LLM::AI21` class
19+
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/971] Exclude `temperature` from being automatically added to OpenAI LLM parameters
1920

2021
## [0.19.4] - 2025-02-17
2122
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/894] Tools can now output image_urls, and all tool output must be wrapped by a tool_response() method

lib/langchain/llm/openai.rb

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ module Langchain::LLM
1515
class OpenAI < Base
1616
DEFAULTS = {
1717
n: 1,
18-
temperature: 0.0,
1918
chat_model: "gpt-4o-mini",
2019
embedding_model: "text-embedding-3-small"
2120
}.freeze

spec/langchain/llm/openai_spec.rb

Lines changed: 11 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -316,9 +316,7 @@
316316
parameters: {
317317
n: 1,
318318
model: "gpt-4o-mini",
319-
messages: [{content: "Hello World", role: "user"}],
320-
temperature: 0.0
321-
# max_tokens: 4087
319+
messages: [{content: "Hello World", role: "user"}]
322320
}
323321
}
324322
end
@@ -359,9 +357,7 @@
359357
{
360358
n: 1,
361359
model: "text-davinci-003",
362-
prompt: "Hello World",
363-
temperature: 0.0
364-
# max_tokens: 4095
360+
prompt: "Hello World"
365361
}
366362
}
367363
end
@@ -375,10 +371,8 @@
375371
expect(subject.client).to receive(:chat).with({
376372
parameters: {
377373
n: 1,
378-
# max_tokens: 4087,
379374
model: "gpt-4o-mini",
380-
messages: [{content: "Hello World", role: "user"}],
381-
temperature: 0.0
375+
messages: [{content: "Hello World", role: "user"}]
382376
}
383377
}).and_return(response)
384378
subject.complete(prompt: "Hello World")
@@ -398,9 +392,7 @@
398392
parameters: {
399393
n: 1,
400394
model: "gpt-3.5-turbo",
401-
messages: [{content: "Hello World", role: "user"}],
402-
temperature: 0.0 # ,
403-
# max_tokens: 4086
395+
messages: [{content: "Hello World", role: "user"}]
404396
}
405397
}
406398
end
@@ -409,10 +401,8 @@
409401
expect(subject.client).to receive(:chat).with({
410402
parameters: {
411403
n: 1,
412-
# max_tokens: 4087 ,
413404
model: "gpt-4o-mini",
414-
messages: [{content: "Hello World", role: "user"}],
415-
temperature: 0.0
405+
messages: [{content: "Hello World", role: "user"}]
416406
}
417407
}).and_return(response)
418408
subject.complete(prompt: "Hello World")
@@ -422,19 +412,19 @@
422412

423413
context "with prompt and parameters" do
424414
let(:parameters) do
425-
{parameters: {n: 1, model: "gpt-3.5-turbo", messages: [{content: "Hello World", role: "user"}], temperature: 1.0}} # , max_tokens: 4087}}
415+
{parameters: {n: 1, model: "gpt-3.5-turbo", messages: [{content: "Hello World", role: "user"}]}}
426416
end
427417

428418
it "returns a completion" do
429-
response = subject.complete(prompt: "Hello World", model: "gpt-3.5-turbo", temperature: 1.0)
419+
response = subject.complete(prompt: "Hello World", model: "gpt-3.5-turbo")
430420

431421
expect(response.completion).to eq("The meaning of life is subjective and can vary from person to person.")
432422
end
433423
end
434424

435425
context "with failed API call" do
436426
let(:parameters) do
437-
{parameters: {n: 1, model: "gpt-4o-mini", messages: [{content: "Hello World", role: "user"}], temperature: 0.0}} # , max_tokens: 4087}}
427+
{parameters: {n: 1, model: "gpt-4o-mini", messages: [{content: "Hello World", role: "user"}]}}
438428
end
439429
let(:response) do
440430
{"error" => {"code" => 400, "message" => "User location is not supported for the API use.", "type" => "invalid_request_error"}}
@@ -470,10 +460,9 @@
470460
describe "#chat" do
471461
let(:prompt) { "What is the meaning of life?" }
472462
let(:model) { "gpt-4o-mini" }
473-
let(:temperature) { 0.0 }
474463
let(:n) { 1 }
475464
let(:history) { [content: prompt, role: "user"] }
476-
let(:parameters) { {parameters: {n: n, messages: history, model: model, temperature: temperature}} } # max_tokens: be_between(4014, 4096)}} }
465+
let(:parameters) { {parameters: {n: n, messages: history, model: model}} }
477466
let(:answer) { "As an AI language model, I don't have feelings, but I'm functioning well. How can I assist you today?" }
478467
let(:answer_2) { "Alternative answer" }
479468
let(:choices) do
@@ -585,11 +574,10 @@
585574
end
586575

587576
context "with options" do
588-
let(:temperature) { 0.75 }
589577
let(:model) { "gpt-3.5-turbo-0301" }
590578

591579
it "sends prompt as message and additional params and returns a response message" do
592-
response = subject.complete(prompt: prompt, model: model, temperature: temperature)
580+
response = subject.complete(prompt: prompt, model: model)
593581

594582
expect(response.chat_completion).to eq(answer)
595583
end
@@ -612,7 +600,7 @@
612600
end
613601

614602
it "returns multiple response messages" do
615-
response = subject.chat(messages: [content: prompt, role: "user"], model: model, temperature: temperature, n: 2)
603+
response = subject.chat(messages: [content: prompt, role: "user"], model: model, n: 2)
616604

617605
expect(response.completions).to eq(choices)
618606
end

0 commit comments

Comments
 (0)