From bbe1903e3d38344d30f9c7464e3f437f43cad0b9 Mon Sep 17 00:00:00 2001 From: jingxu8885 Date: Tue, 13 May 2025 11:32:02 +0800 Subject: [PATCH 1/5] fix: Update _get_next_step method in orchestrator.py - Fix message parameter name typo - Ensure proper request_params handling in next step generation - Maintain consistency with other method signatures --- src/mcp_agent/workflows/orchestrator/orchestrator.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/mcp_agent/workflows/orchestrator/orchestrator.py b/src/mcp_agent/workflows/orchestrator/orchestrator.py index 6ce71a037..86d14308e 100644 --- a/src/mcp_agent/workflows/orchestrator/orchestrator.py +++ b/src/mcp_agent/workflows/orchestrator/orchestrator.py @@ -178,7 +178,7 @@ async def execute( if self.plan_type == "iterative": # Get next plan/step next_step = await self._get_next_step( - objective=objective, plan_result=plan_result, model=params.model + objective=objective, plan_result=plan_result, request_params=params ) logger.debug(f"Iteration {iterations}: Iterative plan:", data=next_step) plan = Plan(steps=[next_step], is_complete=next_step.is_complete) @@ -337,7 +337,10 @@ async def _get_full_plan( return plan async def _get_next_step( - self, objective: str, plan_result: PlanResult, model: str = None + self, + objective: str, + plan_result: PlanResult, + request_params: RequestParams | None = None, ) -> NextStep: """Generate just the next needed step""" @@ -357,6 +360,7 @@ async def _get_next_step( next_step = await self.planner.generate_structured( message=prompt, response_model=NextStep, + request_params=request_params, ) return next_step From ec1e42f9e9da42a6d0c7a07a55fd1fd5633b5235 Mon Sep 17 00:00:00 2001 From: jingxu8885 Date: Sat, 17 May 2025 15:22:51 +0800 Subject: [PATCH 2/5] Refactor: Improve AsyncEventBus initialization - Add init_queue method to prevent redundant queue initialization. - Ensure proper queue initialization before starting the event bus to enhance asynchronous task execution efficiency. Previously, AsyncEventBus was instantiated during logger initialization, which occurs upon import. This led to uncontrollable queue binding. By initializing the queue during the `start` method, reliable queue binding is ensured in a multi-threaded environment. --- src/mcp_agent/logging/transport.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/mcp_agent/logging/transport.py b/src/mcp_agent/logging/transport.py index c9208694e..ad192e69c 100644 --- a/src/mcp_agent/logging/transport.py +++ b/src/mcp_agent/logging/transport.py @@ -271,11 +271,14 @@ class AsyncEventBus: def __init__(self, transport: EventTransport | None = None): self.transport: EventTransport = transport or NoOpTransport() self.listeners: Dict[str, EventListener] = {} - self._queue = asyncio.Queue() self._task: asyncio.Task | None = None self._running = False - self._stop_event = asyncio.Event() + def init_queue(self): + if self._running: + return + self._queue = asyncio.Queue() + self._stop_event = asyncio.Event() # Store the loop we're created on try: self._loop = asyncio.get_running_loop() @@ -312,7 +315,7 @@ async def start(self): """Start the event bus and all lifecycle-aware listeners.""" if self._running: return - + self.init_queue() # Start each lifecycle-aware listener for listener in self.listeners.values(): if isinstance(listener, LifecycleAwareListener): From 295fe3cb2c838b0244414a4dce5b6612e10f1f1c Mon Sep 17 00:00:00 2001 From: jingxu8885 Date: Tue, 20 May 2025 19:33:35 +0800 Subject: [PATCH 3/5] feat: Implement memory sharing for EvaluatorOptimizerLLM - Add a shared memory parameter to the EvaluatorOptimizerLLM class. - Implement the `share_memory_from` method to enable memory sharing functionality. This change aims to optimize memory management between the evaluator and optimizer. For example, the `optimizer_llm` is often bound to an MCP server. After invoking the MCP and receiving content, the LLM might sometimes "hallucinate" or produce responses inconsistent with the MCP's returned content. The `evaluator_llm`, when performing its assessment, needs access to the `optimizer_llm`'s memory to better determine if it is hallucinating or generating unexpected output. MeanWhile, optimizer_llm can do better work when having evaluator_llm's memory. --- .../workflows/evaluator_optimizer/evaluator_optimizer.py | 5 ++++- src/mcp_agent/workflows/llm/augmented_llm.py | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py b/src/mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py index 139dedc7b..37c0d2016 100644 --- a/src/mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +++ b/src/mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py @@ -72,6 +72,7 @@ def __init__( max_refinements: int = 3, llm_factory: Callable[[Agent], AugmentedLLM] | None = None, context: Optional["Context"] = None, + share_memory: bool = False, ): """ Initialize the evaluator-optimizer workflow. @@ -86,6 +87,7 @@ def __init__( min_rating: Minimum acceptable quality rating max_refinements: Maximum refinement iterations llm_factory: Optional factory to create LLMs from agents + share_memory: Whether to share the memory between the optimizer and evaluator """ super().__init__(context=context) @@ -140,7 +142,8 @@ def __init__( ) else: raise ValueError(f"Unsupported evaluator type: {type(evaluator)}") - + if share_memory: + self.evaluator_llm.share_memory_from(self.optimizer_llm) self.min_rating = min_rating self.max_refinements = max_refinements diff --git a/src/mcp_agent/workflows/llm/augmented_llm.py b/src/mcp_agent/workflows/llm/augmented_llm.py index c28f2154f..a60cba0a2 100644 --- a/src/mcp_agent/workflows/llm/augmented_llm.py +++ b/src/mcp_agent/workflows/llm/augmented_llm.py @@ -233,6 +233,9 @@ def __init__( self.model_selector = self.context.model_selector self.type_converter = type_converter + def share_memory_from(self, other: "AugmentedLLM"): + self.history = other.history + @abstractmethod async def generate( self, From 98bb9218830555e4db6ef2ed4a673054aae7bc57 Mon Sep 17 00:00:00 2001 From: jingxu8885 Date: Thu, 5 Jun 2025 16:11:46 +0800 Subject: [PATCH 4/5] Add input validation and documentation for the memory sharing method. --- src/mcp_agent/workflows/llm/augmented_llm.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/mcp_agent/workflows/llm/augmented_llm.py b/src/mcp_agent/workflows/llm/augmented_llm.py index a60cba0a2..9f7184048 100644 --- a/src/mcp_agent/workflows/llm/augmented_llm.py +++ b/src/mcp_agent/workflows/llm/augmented_llm.py @@ -234,6 +234,23 @@ def __init__( self.type_converter = type_converter def share_memory_from(self, other: "AugmentedLLM"): + """ + Share memory from another AugmentedLLM instance. + + This creates a reference to the other instance's history, meaning both + instances will share the same memory object and any changes to history + in either instance will be reflected in both. + + Args: + other: The AugmentedLLM instance to share memory from + + Raises: + ValueError: If other is None or not an AugmentedLLM instance + """ + if other is None: + raise ValueError("Cannot share memory from None") + if not isinstance(other, AugmentedLLM): + raise ValueError("Can only share memory from another AugmentedLLM instance") self.history = other.history @abstractmethod From 5cabdf01d7b1146cabd063f4bc74405ba93abce5 Mon Sep 17 00:00:00 2001 From: jingxu8885 Date: Tue, 10 Jun 2025 10:43:19 +0800 Subject: [PATCH 5/5] resolve PR feedback --- .../evaluator_optimizer.py | 2 +- src/mcp_agent/workflows/llm/augmented_llm.py | 20 ------------------- 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/src/mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py b/src/mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py index 828fc6665..4478d4fce 100644 --- a/src/mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +++ b/src/mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py @@ -150,7 +150,7 @@ def __init__( else: raise ValueError(f"Unsupported evaluator type: {type(evaluator)}") if share_memory: - self.evaluator_llm.share_memory_from(self.optimizer_llm) + self.evaluator_llm.history = self.optimizer_llm.history self.min_rating = min_rating self.max_refinements = max_refinements diff --git a/src/mcp_agent/workflows/llm/augmented_llm.py b/src/mcp_agent/workflows/llm/augmented_llm.py index bc940fff5..230feb65d 100644 --- a/src/mcp_agent/workflows/llm/augmented_llm.py +++ b/src/mcp_agent/workflows/llm/augmented_llm.py @@ -274,26 +274,6 @@ def __init__( self.model_selector = self.context.model_selector self.type_converter = type_converter - def share_memory_from(self, other: "AugmentedLLM"): - """ - Share memory from another AugmentedLLM instance. - - This creates a reference to the other instance's history, meaning both - instances will share the same memory object and any changes to history - in either instance will be reflected in both. - - Args: - other: The AugmentedLLM instance to share memory from - - Raises: - ValueError: If other is None or not an AugmentedLLM instance - """ - if other is None: - raise ValueError("Cannot share memory from None") - if not isinstance(other, AugmentedLLM): - raise ValueError("Can only share memory from another AugmentedLLM instance") - self.history = other.history - @abstractmethod async def generate( self,