Skip to content

Commit 119385c

Browse files
committed
reasoning_max_token set to 4000(suggested)
1 parent ff47706 commit 119385c

File tree

5 files changed

+5
-4
lines changed

5 files changed

+5
-4
lines changed

metagpt/configs/llm_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ class LLMConfig(YamlModel):
103103

104104
# reasoning / thinking switch
105105
reasoning: bool = False
106-
reasoning_max_token: int = 1024 # reasoning budget tokens to generate, usually smaller than max_token
106+
reasoning_max_token: int = 4000 # reasoning budget tokens to generate, usually smaller than max_token
107107

108108
@field_validator("api_key")
109109
@classmethod

metagpt/provider/anthropic_api.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def _update_costs(self, usage: Usage, model: str = None, local_calc_usage: bool
4242
super()._update_costs(usage, model)
4343

4444
def get_choice_text(self, resp: Message) -> str:
45-
if len(resp.content) > 0:
45+
if len(resp.content) > 1:
4646
self.reasoning_content = resp.content[0].thinking
4747
text = resp.content[1].text
4848
else:

metagpt/provider/bedrock/base_provider.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ class BaseBedrockProvider(ABC):
77
# to handle different generation kwargs
88
max_tokens_field_name = "max_tokens"
99

10-
def __init__(self, reasoning: bool = False, reasoning_max_token: int = 1024):
10+
def __init__(self, reasoning: bool = False, reasoning_max_token: int = 4000):
1111
self.reasoning = reasoning
1212
self.reasoning_max_token = reasoning_max_token
1313

metagpt/provider/bedrock/bedrock_provider.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ def get_choice_text_from_stream(self, event) -> Union[bool, str]:
191191
}
192192

193193

194-
def get_provider(model_id: str, reasoning: bool = False, reasoning_max_token: int = 1024):
194+
def get_provider(model_id: str, reasoning: bool = False, reasoning_max_token: int = 4000):
195195
arr = model_id.split(".")
196196
if len(arr) == 2:
197197
provider, model_name = arr # meta、mistral……

metagpt/utils/token_counter.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@
9090
"anthropic/claude-3.7-sonnet": {"prompt": 0.003, "completion": 0.015},
9191
"anthropic/claude-3.7-sonnet:beta": {"prompt": 0.003, "completion": 0.015},
9292
"anthropic/claude-3.7-sonnet:thinking": {"prompt": 0.003, "completion": 0.015},
93+
"anthropic.claude-3-7-sonnet-20250219-v1:0": {"prompt": 0.003, "completion": 0.015},
9394
"us.anthropic.claude-3-7-sonnet-20250219-v1:0": {"prompt": 0.003, "completion": 0.015},
9495
"google/gemini-pro-1.5": {"prompt": 0.0025, "completion": 0.0075}, # for openrouter, end
9596
"deepseek-chat": {"prompt": 0.00027, "completion": 0.0011},

0 commit comments

Comments
 (0)