Skip to content

Commit 43d5584

Browse files
committed
chore: tune the argument model_id to model of bedrock same as other llms
1 parent dd1573a commit 43d5584

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

deepsearcher/llm/bedrock.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ class Bedrock(BaseLLM):
1919

2020
def __init__(
2121
self,
22-
model_id: str = "us.deepseek.r1-v1:0",
22+
model: str = "us.deepseek.r1-v1:0",
2323
max_tokens: int = 20000,
2424
region_name: str = "us-west-2",
2525
**kwargs,
@@ -28,7 +28,7 @@ def __init__(
2828
Initialize an AWS Bedrock language model client.
2929
3030
Args:
31-
model_id (str, optional): The model identifier to use. Defaults to "us.deepseek.r1-v1:0".
31+
model (str, optional): The model identifier to use. Defaults to "us.deepseek.r1-v1:0".
3232
max_tokens (int, optional): The maximum number of tokens to generate. Defaults to 2000.
3333
region_name (str, optional): AWS region for the Bedrock service. Defaults to "us-west-2".
3434
**kwargs: Additional keyword arguments to pass to the boto3 client.
@@ -38,7 +38,7 @@ def __init__(
3838
"""
3939
import boto3
4040

41-
self.model_id = model_id
41+
self.model = model
4242
self.max_tokens = max_tokens
4343

4444
# Extract AWS credentials if provided
@@ -78,7 +78,7 @@ def chat(self, messages: List[Dict]) -> ChatResponse:
7878
formatted_messages.append(message)
7979

8080
response = self.client.converse(
81-
modelId=self.model_id,
81+
modelId=self.model,
8282
messages=formatted_messages,
8383
inferenceConfig={
8484
"maxTokens": self.max_tokens,

0 commit comments

Comments
 (0)