|
22 | 22 |
|
23 | 23 | from __future__ import annotations |
24 | 24 |
|
25 | | -from datetime import datetime |
26 | | -from typing import Annotated, Callable, Literal, Optional, Set, Type, Union |
| 25 | +from typing import Optional, Set, Type, Union |
27 | 26 |
|
28 | 27 | from pydantic import BaseModel, ConfigDict, Field, SerializeAsAny, model_validator |
29 | 28 |
|
|
33 | 32 | from metagpt.core.base import BaseEnvironment |
34 | 33 | from metagpt.core.const import MESSAGE_ROUTE_TO_SELF |
35 | 34 | from metagpt.core.context_mixin import ContextMixin |
36 | | -from metagpt.core.exp_pool import exp_cache |
37 | | -from metagpt.core.exp_pool.context_builders import RoleZeroContextBuilder |
38 | | -from metagpt.core.exp_pool.serializers import RoleZeroSerializer |
39 | 35 | from metagpt.core.logs import logger |
40 | 36 | from metagpt.core.memory import Memory |
41 | | -from metagpt.core.memory.role_zero_memory import RoleZeroLongTermMemory |
42 | | -from metagpt.core.prompts.role_zero import ( |
43 | | - CMD_PROMPT, |
44 | | - QUICK_THINK_EXAMPLES, |
45 | | - QUICK_THINK_SYSTEM_PROMPT, |
46 | | - ROLE_INSTRUCTION, |
47 | | - SYSTEM_PROMPT, |
48 | | -) |
49 | 37 | from metagpt.core.provider import HumanProvider |
50 | 38 | from metagpt.core.schema import ( |
51 | 39 | AIMessage, |
|
55 | 43 | Task, |
56 | 44 | TaskResult, |
57 | 45 | ) |
58 | | -from metagpt.core.strategy.experience_retriever import DummyExpRetriever, ExpRetriever |
59 | 46 | from metagpt.core.strategy.planner import BasePlanner |
60 | | -from metagpt.core.tools.tool_recommend import ToolRecommender |
61 | 47 | from metagpt.core.utils.common import any_to_name, any_to_str, role_raise_decorator |
62 | 48 | from metagpt.core.utils.repair_llm_raw_output import extract_state_value_from_output |
63 | 49 |
|
@@ -603,151 +589,3 @@ def action_description(self) -> str: |
603 | 589 | if self.actions: |
604 | 590 | return any_to_name(self.actions[0]) |
605 | 591 | return "" |
606 | | - |
607 | | - |
608 | | -class BaseRoleZero(Role): |
609 | | - """A role who can think and act dynamically""" |
610 | | - |
611 | | - # Basic Info |
612 | | - name: str = "Zero" |
613 | | - profile: str = "RoleZero" |
614 | | - goal: str = "" |
615 | | - system_msg: Optional[list[str]] = None # Use None to conform to the default value at llm.aask |
616 | | - system_prompt: str = SYSTEM_PROMPT # Use None to conform to the default value at llm.aask |
617 | | - cmd_prompt: str = CMD_PROMPT |
618 | | - cmd_prompt_current_state: str = "" |
619 | | - instruction: str = ROLE_INSTRUCTION |
620 | | - task_type_desc: Optional[str] = None |
621 | | - |
622 | | - # React Mode |
623 | | - react_mode: Literal["react"] = "react" |
624 | | - max_react_loop: int = 50 # used for react mode |
625 | | - |
626 | | - # Tools |
627 | | - tools: list[str] = [] # Use special symbol ["<all>"] to indicate use of all registered tools |
628 | | - tool_recommender: Optional[ToolRecommender] = None |
629 | | - tool_execution_map: Annotated[dict[str, Callable], Field(exclude=True)] = {} |
630 | | - special_tool_commands: list[str] = ["Plan.finish_current_task", "end", "Terminal.run_command", "RoleZero.ask_human"] |
631 | | - # List of exclusive tool commands. |
632 | | - # If multiple instances of these commands appear, only the first occurrence will be retained. |
633 | | - exclusive_tool_commands: list[str] = [ |
634 | | - "Editor.edit_file_by_replace", |
635 | | - "Editor.insert_content_at_line", |
636 | | - "Editor.append_file", |
637 | | - "Editor.open_file", |
638 | | - ] |
639 | | - |
640 | | - # Experience |
641 | | - experience_retriever: Annotated[ExpRetriever, Field(exclude=True)] = DummyExpRetriever() |
642 | | - |
643 | | - # Others |
644 | | - observe_all_msg_from_buffer: bool = True |
645 | | - command_rsp: str = "" # the raw string containing the commands |
646 | | - commands: list[dict] = [] # commands to be executed |
647 | | - memory_k: int = 200 # number of memories (messages) to use as historical context |
648 | | - use_fixed_sop: bool = False |
649 | | - respond_language: str = "" # Language for responding humans and publishing messages. |
650 | | - use_summary: bool = True # whether to summarize at the end |
651 | | - |
652 | | - @model_validator(mode="after") |
653 | | - def set_plan_and_tool(self) -> "RoleZero": |
654 | | - return super().__init__() |
655 | | - |
656 | | - @model_validator(mode="after") |
657 | | - def set_tool_execution(self) -> "RoleZero": |
658 | | - return super().__init__() |
659 | | - |
660 | | - @model_validator(mode="after") |
661 | | - def set_longterm_memory(self) -> "RoleZero": |
662 | | - """Set up long-term memory for the role if enabled in the configuration. |
663 | | -
|
664 | | - If `enable_longterm_memory` is True, set up long-term memory. |
665 | | - The role name will be used as the collection name. |
666 | | - """ |
667 | | - |
668 | | - if self.config.role_zero.enable_longterm_memory: |
669 | | - # Use config.role_zero to initialize long-term memory |
670 | | - self.rc.memory = RoleZeroLongTermMemory( |
671 | | - **self.rc.memory.model_dump(), |
672 | | - persist_path=self.config.role_zero.longterm_memory_persist_path, |
673 | | - collection_name=self.name.replace(" ", ""), |
674 | | - memory_k=self.config.role_zero.memory_k, |
675 | | - similarity_top_k=self.config.role_zero.similarity_top_k, |
676 | | - use_llm_ranker=self.config.role_zero.use_llm_ranker, |
677 | | - ) |
678 | | - logger.info(f"Long-term memory set for role '{self.name}'") |
679 | | - |
680 | | - return self |
681 | | - |
682 | | - async def _think(self) -> bool: |
683 | | - return super()._think() |
684 | | - |
685 | | - @exp_cache(context_builder=RoleZeroContextBuilder(), serializer=RoleZeroSerializer()) |
686 | | - async def llm_cached_aask(self, *, req: list[dict], system_msgs: list[str], **kwargs) -> str: |
687 | | - """Use `exp_cache` to automatically manage experiences. |
688 | | -
|
689 | | - The `RoleZeroContextBuilder` attempts to add experiences to `req`. |
690 | | - The `RoleZeroSerializer` extracts essential parts of `req` for the experience pool, trimming lengthy entries to retain only necessary parts. |
691 | | - """ |
692 | | - return await self.llm.aask(req, system_msgs=system_msgs) |
693 | | - |
694 | | - def _get_prefix(self) -> str: |
695 | | - time_info = datetime.now().strftime("%Y-%m-%d %H:%M:%S") |
696 | | - return super()._get_prefix() + f" The current time is {time_info}." |
697 | | - |
698 | | - async def _act(self) -> Message: |
699 | | - return await super()._act() |
700 | | - |
701 | | - async def _react(self) -> Message: |
702 | | - # NOTE: Diff 1: Each time landing here means news is observed, set todo to allow news processing in _think |
703 | | - self._set_state(0) |
704 | | - |
705 | | - # problems solvable by quick thinking doesn't need to a formal think-act cycle |
706 | | - quick_rsp, _ = await self._quick_think() |
707 | | - if quick_rsp: |
708 | | - return quick_rsp |
709 | | - |
710 | | - actions_taken = 0 |
711 | | - rsp = AIMessage(content="No actions taken yet", cause_by=Action) # will be overwritten after Role _act |
712 | | - while actions_taken < self.rc.max_react_loop: |
713 | | - # NOTE: Diff 2: Keep observing within _react, news will go into memory, allowing adapting to new info |
714 | | - await self._observe() |
715 | | - |
716 | | - # think |
717 | | - has_todo = await self._think() |
718 | | - if not has_todo: |
719 | | - break |
720 | | - # act |
721 | | - logger.debug(f"{self._setting}: {self.rc.state=}, will do {self.rc.todo}") |
722 | | - rsp = await self._act() |
723 | | - actions_taken += 1 |
724 | | - |
725 | | - # post-check |
726 | | - if self.rc.max_react_loop >= 10 and actions_taken >= self.rc.max_react_loop: |
727 | | - # If max_react_loop is a small value (e.g. < 10), it is intended to be reached and make the agent stop |
728 | | - logger.warning(f"reached max_react_loop: {actions_taken}") |
729 | | - human_rsp = await self.ask_human( |
730 | | - "I have reached my max action rounds, do you want me to continue? Yes or no" |
731 | | - ) |
732 | | - if "yes" in human_rsp.lower(): |
733 | | - actions_taken = 0 |
734 | | - return rsp # return output from the last action |
735 | | - |
736 | | - def format_quick_system_prompt(self) -> str: |
737 | | - """Format the system prompt for quick thinking.""" |
738 | | - return QUICK_THINK_SYSTEM_PROMPT.format(examples=QUICK_THINK_EXAMPLES, role_info=self._get_prefix()) |
739 | | - |
740 | | - async def _quick_think(self): |
741 | | - pass |
742 | | - |
743 | | - def _is_special_command(self, cmd) -> bool: |
744 | | - return cmd["command_name"] in self.special_tool_commands |
745 | | - |
746 | | - async def ask_human(self, question: str): |
747 | | - raise NotImplementedError |
748 | | - |
749 | | - async def reply_to_human(self, content: str): |
750 | | - raise NotImplementedError |
751 | | - |
752 | | - async def _end(self, **kwarg): |
753 | | - pass |
0 commit comments