Skip to content

Commit c72736d

Browse files
Improve passing max tokens to anthropic (#864)
* Clean up passing `max_tokens` to Anthropic constructor and chat method * changelog entry
1 parent ffe0de3 commit c72736d

File tree

4 files changed

+41
-10
lines changed

4 files changed

+41
-10
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
## [Unreleased]
1313
- [FEATURE] [https://github.com/patterns-ai-core/langchainrb/pull/858] Assistant, when using Anthropic, now also accepts image_url in the message.
14+
- [FEATURE] [https://github.com/patterns-ai-core/langchainrb/pull/859] Clean up passing `max_tokens` to Anthropic constructor and chat method
1415

1516
## [0.19.0] - 2024-10-23
1617
- [BREAKING] [https://github.com/patterns-ai-core/langchainrb/pull/840] Rename `chat_completion_model_name` parameter to `chat_model` in Langchain::LLM parameters.

lib/langchain/llm/anthropic.rb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,14 +15,14 @@ class Anthropic < Base
1515
temperature: 0.0,
1616
completion_model: "claude-2.1",
1717
chat_model: "claude-3-5-sonnet-20240620",
18-
max_tokens_to_sample: 256
18+
max_tokens: 256
1919
}.freeze
2020

2121
# Initialize an Anthropic LLM instance
2222
#
2323
# @param api_key [String] The API key to use
2424
# @param llm_options [Hash] Options to pass to the Anthropic client
25-
# @param default_options [Hash] Default options to use on every call to LLM, e.g.: { temperature:, completion_model:, chat_model:, max_tokens_to_sample: }
25+
# @param default_options [Hash] Default options to use on every call to LLM, e.g.: { temperature:, completion_model:, chat_model:, max_tokens: }
2626
# @return [Langchain::LLM::Anthropic] Langchain::LLM::Anthropic instance
2727
def initialize(api_key:, llm_options: {}, default_options: {})
2828
depends_on "anthropic"
@@ -32,7 +32,7 @@ def initialize(api_key:, llm_options: {}, default_options: {})
3232
chat_parameters.update(
3333
model: {default: @defaults[:chat_model]},
3434
temperature: {default: @defaults[:temperature]},
35-
max_tokens: {default: @defaults[:max_tokens_to_sample]},
35+
max_tokens: {default: @defaults[:max_tokens]},
3636
metadata: {},
3737
system: {}
3838
)
@@ -55,7 +55,7 @@ def initialize(api_key:, llm_options: {}, default_options: {})
5555
def complete(
5656
prompt:,
5757
model: @defaults[:completion_model],
58-
max_tokens_to_sample: @defaults[:max_tokens_to_sample],
58+
max_tokens: @defaults[:max_tokens],
5959
stop_sequences: nil,
6060
temperature: @defaults[:temperature],
6161
top_p: nil,
@@ -64,12 +64,12 @@ def complete(
6464
stream: nil
6565
)
6666
raise ArgumentError.new("model argument is required") if model.empty?
67-
raise ArgumentError.new("max_tokens_to_sample argument is required") if max_tokens_to_sample.nil?
67+
raise ArgumentError.new("max_tokens argument is required") if max_tokens.nil?
6868

6969
parameters = {
7070
model: model,
7171
prompt: prompt,
72-
max_tokens_to_sample: max_tokens_to_sample,
72+
max_tokens_to_sample: max_tokens,
7373
temperature: temperature
7474
}
7575
parameters[:stop_sequences] = stop_sequences if stop_sequences

spec/langchain/llm/anthropic_spec.rb

Lines changed: 27 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,30 @@
55
RSpec.describe Langchain::LLM::Anthropic do
66
let(:subject) { described_class.new(api_key: "123") }
77

8+
describe "#initialize" do
9+
context "when default_options are passed" do
10+
let(:default_options) { {max_tokens: 512} }
11+
12+
subject { described_class.new(api_key: "123", default_options: default_options) }
13+
14+
it "sets the defaults options" do
15+
expect(subject.defaults[:max_tokens]).to eq(512)
16+
end
17+
18+
it "get passed to consecutive chat() call" do
19+
subject
20+
expect(subject.client).to receive(:messages).with(parameters: hash_including(default_options)).and_return({})
21+
subject.chat(messages: [{role: "user", content: "Hello json!"}])
22+
end
23+
24+
it "can be overridden" do
25+
subject
26+
expect(subject.client).to receive(:messages).with(parameters: hash_including({max_tokens: 1024})).and_return({})
27+
subject.chat(messages: [{role: "user", content: "Hello json!"}], max_tokens: 1024)
28+
end
29+
end
30+
end
31+
832
describe "#complete" do
933
let(:completion) { "How high is the sky?" }
1034
let(:fixture) { File.read("spec/fixtures/llm/anthropic/complete.json") }
@@ -17,7 +41,7 @@
1741
model: described_class::DEFAULTS[:completion_model],
1842
prompt: completion,
1943
temperature: described_class::DEFAULTS[:temperature],
20-
max_tokens_to_sample: described_class::DEFAULTS[:max_tokens_to_sample]
44+
max_tokens_to_sample: described_class::DEFAULTS[:max_tokens]
2145
})
2246
.and_return(response)
2347
end
@@ -40,7 +64,7 @@
4064
model: described_class::DEFAULTS[:completion_model],
4165
prompt: completion,
4266
temperature: described_class::DEFAULTS[:temperature],
43-
max_tokens_to_sample: described_class::DEFAULTS[:max_tokens_to_sample]
67+
max_tokens_to_sample: described_class::DEFAULTS[:max_tokens]
4468
})
4569
.and_return(JSON.parse(fixture))
4670
end
@@ -63,7 +87,7 @@
6387
model: described_class::DEFAULTS[:chat_model],
6488
messages: messages,
6589
temperature: described_class::DEFAULTS[:temperature],
66-
max_tokens: described_class::DEFAULTS[:max_tokens_to_sample],
90+
max_tokens: described_class::DEFAULTS[:max_tokens],
6791
stop_sequences: ["beep"]
6892
})
6993
.and_return(response)

spec/langchain/llm/openai_spec.rb

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,9 +91,15 @@
9191

9292
it "get passed to consecutive chat() call" do
9393
subject
94-
expect(subject.client).to receive(:chat).with(parameters: hash_including({response_format: {type: "json_object"}})).and_return({})
94+
expect(subject.client).to receive(:chat).with(parameters: hash_including(default_options)).and_return({})
9595
subject.chat(messages: [{role: "user", content: "Hello json!"}])
9696
end
97+
98+
it "can be overridden" do
99+
subject
100+
expect(subject.client).to receive(:chat).with(parameters: hash_including({response_format: {type: "text"}})).and_return({})
101+
subject.chat(messages: [{role: "user", content: "Hello json!"}], response_format: {type: "text"})
102+
end
97103
end
98104
end
99105

0 commit comments

Comments
 (0)