Skip to content

Commit 8c84de7

Browse files
committed
update bedrock
1 parent 07d4be2 commit 8c84de7

File tree

6 files changed

+75
-36
lines changed

6 files changed

+75
-36
lines changed

metagpt/configs/llm_config.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,10 @@ class LLMConfig(YamlModel):
100100
# For Messages Control
101101
use_system_prompt: bool = True
102102

103+
# reasoning / thinking switch
104+
reasoning: bool = False
105+
reasoning_tokens: int = 4000 # reasoning budget tokens to generate, usually smaller than max_tokens
106+
103107
@field_validator("api_key")
104108
@classmethod
105109
def check_llm_key(cls, v):

metagpt/provider/base_llm.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,16 @@ class BaseLLM(ABC):
4343
model: Optional[str] = None # deprecated
4444
pricing_plan: Optional[str] = None
4545

46+
_reasoning_content: Optional[str] = None # content from reasoning mode
47+
48+
@property
49+
def reasoning_content(self):
50+
return self._reasoning_content
51+
52+
@reasoning_content.setter
53+
def reasoning_content(self, value: str):
54+
self._reasoning_content = value
55+
4656
@abstractmethod
4757
def __init__(self, config: LLMConfig):
4858
pass

metagpt/provider/bedrock/base_provider.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,16 @@
11
import json
22
from abc import ABC, abstractmethod
3+
from typing import Union
34

45

56
class BaseBedrockProvider(ABC):
67
# to handle different generation kwargs
78
max_tokens_field_name = "max_tokens"
89

10+
def __init__(self, reasoning: bool = False, reasoning_tokens: int = 4000):
11+
self.reasoning = reasoning
12+
self.reasoning_tokens = reasoning_tokens
13+
914
@abstractmethod
1015
def _get_completion_from_dict(self, rsp_dict: dict) -> str:
1116
...
@@ -14,14 +19,14 @@ def get_request_body(self, messages: list[dict], const_kwargs, *args, **kwargs)
1419
body = json.dumps({"prompt": self.messages_to_prompt(messages), **const_kwargs})
1520
return body
1621

17-
def get_choice_text(self, response_body: dict) -> str:
22+
def get_choice_text(self, response_body: dict) -> Union[str, dict[str, str]]:
1823
completions = self._get_completion_from_dict(response_body)
1924
return completions
2025

21-
def get_choice_text_from_stream(self, event) -> str:
26+
def get_choice_text_from_stream(self, event) -> Union[bool, str]:
2227
rsp_dict = json.loads(event["chunk"]["bytes"])
2328
completions = self._get_completion_from_dict(rsp_dict)
24-
return completions
29+
return False, completions
2530

2631
def messages_to_prompt(self, messages: list[dict]) -> str:
2732
"""[{"role": "user", "content": msg}] to user: <msg> etc."""

metagpt/provider/bedrock/bedrock_provider.py

Lines changed: 34 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import json
2-
from typing import Literal, Tuple
2+
from typing import Literal, Tuple, Union
33

44
from metagpt.provider.bedrock.base_provider import BaseBedrockProvider
55
from metagpt.provider.bedrock.utils import (
@@ -32,6 +32,10 @@ def _split_system_user_messages(self, messages: list[dict]) -> Tuple[str, list[d
3232
return self.messages_to_prompt(system_messages), user_messages
3333

3434
def get_request_body(self, messages: list[dict], generate_kwargs, *args, **kwargs) -> str:
35+
if self.reasoning:
36+
generate_kwargs["temperature"] = 1 # should be 1
37+
generate_kwargs["thinking"] = {"type": "enabled", "budget_tokens": self.reasoning_tokens}
38+
3539
system_message, user_messages = self._split_system_user_messages(messages)
3640
body = json.dumps(
3741
{
@@ -43,17 +47,26 @@ def get_request_body(self, messages: list[dict], generate_kwargs, *args, **kwarg
4347
)
4448
return body
4549

46-
def _get_completion_from_dict(self, rsp_dict: dict) -> str:
50+
def _get_completion_from_dict(self, rsp_dict: dict) -> dict[str, Tuple[str, str]]:
51+
if self.reasoning:
52+
return {"reasoning_content": rsp_dict["content"][0]["thinking"], "content": rsp_dict["content"][1]["text"]}
4753
return rsp_dict["content"][0]["text"]
4854

49-
def get_choice_text_from_stream(self, event) -> str:
55+
def get_choice_text_from_stream(self, event) -> Union[bool, str]:
5056
# https://docs.anthropic.com/claude/reference/messages-streaming
5157
rsp_dict = json.loads(event["chunk"]["bytes"])
5258
if rsp_dict["type"] == "content_block_delta":
53-
completions = rsp_dict["delta"]["text"]
54-
return completions
59+
reasoning = False
60+
if rsp_dict["delta"]["type"] == "text_delta":
61+
completions = rsp_dict["delta"]["text"]
62+
elif rsp_dict["delta"]["type"] == "thinking_delta":
63+
completions = rsp_dict["delta"]["thinking"]
64+
reasoning = True
65+
elif rsp_dict["delta"]["type"] == "signature_delta":
66+
completions = ""
67+
return reasoning, completions
5568
else:
56-
return ""
69+
return False, ""
5770

5871

5972
class CohereProvider(BaseBedrockProvider):
@@ -87,10 +100,10 @@ def get_request_body(self, messages: list[dict], generate_kwargs, *args, **kwarg
87100
body = json.dumps({"prompt": prompt, "stream": kwargs.get("stream", False), **generate_kwargs})
88101
return body
89102

90-
def get_choice_text_from_stream(self, event) -> str:
103+
def get_choice_text_from_stream(self, event) -> Union[bool, str]:
91104
rsp_dict = json.loads(event["chunk"]["bytes"])
92105
completions = rsp_dict.get("text", "")
93-
return completions
106+
return False, completions
94107

95108

96109
class MetaProvider(BaseBedrockProvider):
@@ -133,10 +146,10 @@ def get_request_body(self, messages: list[dict], generate_kwargs, *args, **kwarg
133146
)
134147
return body
135148

136-
def get_choice_text_from_stream(self, event) -> str:
149+
def get_choice_text_from_stream(self, event) -> Union[bool, str]:
137150
rsp_dict = json.loads(event["chunk"]["bytes"])
138151
completions = rsp_dict.get("choices", [{}])[0].get("delta", {}).get("content", "")
139-
return completions
152+
return False, completions
140153

141154
def _get_completion_from_dict(self, rsp_dict: dict) -> str:
142155
if self.model_type == "j2":
@@ -159,10 +172,10 @@ def get_request_body(self, messages: list[dict], generate_kwargs, *args, **kwarg
159172
def _get_completion_from_dict(self, rsp_dict: dict) -> str:
160173
return rsp_dict["results"][0]["outputText"]
161174

162-
def get_choice_text_from_stream(self, event) -> str:
175+
def get_choice_text_from_stream(self, event) -> Union[bool, str]:
163176
rsp_dict = json.loads(event["chunk"]["bytes"])
164177
completions = rsp_dict["outputText"]
165-
return completions
178+
return False, completions
166179

167180

168181
PROVIDERS = {
@@ -175,8 +188,14 @@ def get_choice_text_from_stream(self, event) -> str:
175188
}
176189

177190

178-
def get_provider(model_id: str):
179-
provider, model_name = model_id.split(".")[0:2] # meta、mistral……
191+
def get_provider(model_id: str, reasoning: bool = False, reasoning_tokens: int = 4000):
192+
arr = model_id.split(".")
193+
if len(arr) == 2:
194+
provider, model_name = arr # meta、mistral……
195+
elif len(arr) == 3:
196+
# some model_ids may contain country like us.xx.xxx
197+
_, provider, model_name = arr
198+
180199
if provider not in PROVIDERS:
181200
raise KeyError(f"{provider} is not supported!")
182201
if provider == "meta":
@@ -188,4 +207,4 @@ def get_provider(model_id: str):
188207
elif provider == "cohere":
189208
# distinguish between R/R+ and older models
190209
return PROVIDERS[provider](model_name)
191-
return PROVIDERS[provider]()
210+
return PROVIDERS[provider](reasoning=reasoning, reasoning_tokens=reasoning_tokens)

metagpt/provider/bedrock/utils.py

Lines changed: 3 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,9 @@
4848
"anthropic.claude-3-opus-20240229-v1:0": 4096,
4949
# Claude 3.5 Sonnet
5050
"anthropic.claude-3-5-sonnet-20240620-v1:0": 8192,
51+
# Claude 3.7 Sonnet
52+
"us.anthropic.claude-3-7-sonnet-20250219-v1:0": 131072,
53+
"anthropic.claude-3-7-sonnet-20250219-v1:0": 131072,
5154
# Command Text
5255
# https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-cohere-command.html
5356
"cohere.command-text-v14": 4096,
@@ -135,20 +138,6 @@ def messages_to_prompt_llama3(messages: list[dict]) -> str:
135138
return prompt
136139

137140

138-
def messages_to_prompt_claude2(messages: list[dict]) -> str:
139-
GENERAL_TEMPLATE = "\n\n{role}: {content}"
140-
prompt = ""
141-
for message in messages:
142-
role = message.get("role", "")
143-
content = message.get("content", "")
144-
prompt += GENERAL_TEMPLATE.format(role=role, content=content)
145-
146-
if role != "assistant":
147-
prompt += "\n\nAssistant:"
148-
149-
return prompt
150-
151-
152141
def get_max_tokens(model_id: str) -> int:
153142
try:
154143
max_tokens = (NOT_SUPPORT_STREAM_MODELS | SUPPORT_STREAM_MODELS)[model_id]

metagpt/provider/bedrock_api.py

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,9 @@ class BedrockLLM(BaseLLM):
2323
def __init__(self, config: LLMConfig):
2424
self.config = config
2525
self.__client = self.__init_client("bedrock-runtime")
26-
self.__provider = get_provider(self.config.model)
26+
self.__provider = get_provider(
27+
self.config.model, reasoning=self.config.reasoning, reasoning_tokens=self.config.reasoning_tokens
28+
)
2729
self.cost_manager = CostManager(token_costs=BEDROCK_TOKEN_COSTS)
2830
if self.config.model in NOT_SUPPORT_STREAM_MODELS:
2931
logger.warning(f"model {self.config.model} doesn't support streaming output!")
@@ -102,7 +104,11 @@ def _const_kwargs(self) -> dict:
102104
# However,aioboto3 doesn't support invoke model
103105

104106
def get_choice_text(self, rsp: dict) -> str:
105-
return self.__provider.get_choice_text(rsp)
107+
rsp = self.__provider.get_choice_text(rsp)
108+
if isinstance(rsp, dict):
109+
self.reasoning_content = rsp.get("reasoning_content")
110+
rsp = rsp.get("content")
111+
return rsp
106112

107113
async def acompletion(self, messages: list[dict]) -> dict:
108114
request_body = self.__provider.get_request_body(messages, self._const_kwargs)
@@ -133,10 +139,16 @@ def _get_response_body(self, response) -> dict:
133139
async def _get_stream_response_body(self, stream_response) -> List[str]:
134140
def collect_content() -> str:
135141
collected_content = []
142+
collected_reasoning_content = []
136143
for event in stream_response["body"]:
137-
chunk_text = self.__provider.get_choice_text_from_stream(event)
138-
collected_content.append(chunk_text)
144+
reasoning, chunk_text = self.__provider.get_choice_text_from_stream(event)
145+
if reasoning:
146+
collected_reasoning_content.append(chunk_text)
147+
else:
148+
collected_content.append(chunk_text)
139149
log_llm_stream(chunk_text)
150+
if collected_reasoning_content:
151+
self.reasoning_content = "".join(collected_reasoning_content)
140152
return collected_content
141153

142154
loop = asyncio.get_running_loop()

0 commit comments

Comments
 (0)