Skip to content

Commit 3b03a91

Browse files
committed
fix(agents): 修复模型配置在子智能体中不生效的问题
1. 移除 AnthropicPromptCachingMiddleware 并简化 SummarizationMiddleware 配置 2. 在 context_based_model 中添加调试日志 更新 research-agent 的描述以明确其功能
1 parent a9d2f41 commit 3b03a91

File tree

2 files changed

+8
-20
lines changed

2 files changed

+8
-20
lines changed

src/agents/common/middlewares/context_middlewares.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
from langchain.agents.middleware import ModelRequest, ModelResponse, dynamic_prompt, wrap_model_call
66

7+
from src.utils import logger
78
from src.agents.common import load_chat_model
89

910

@@ -20,4 +21,5 @@ async def context_based_model(request: ModelRequest, handler: Callable[[ModelReq
2021
model = load_chat_model(model_spec)
2122

2223
request = request.override(model=model)
24+
logger.debug(f"Using model {model_spec} for request {request.messages[-1].content[:200]}")
2325
return await handler(request)

src/agents/deep_agent/graph.py

Lines changed: 6 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
from deepagents.middleware.subagents import SubAgentMiddleware
66
from langchain.agents import create_agent
77
from langchain.agents.middleware import ModelRequest, SummarizationMiddleware, TodoListMiddleware, dynamic_prompt
8-
from langchain_anthropic.middleware import AnthropicPromptCachingMiddleware
98

109
from src.agents.common import BaseAgent, load_chat_model
1110
from src.agents.common.middlewares import context_based_model, inject_attachment_context
@@ -18,7 +17,7 @@
1817

1918
research_sub_agent = {
2019
"name": "research-agent",
21-
"description": ("利用搜索工具,用于研究更深入的问题。"),
20+
"description": ("利用搜索工具,用于研究更深入的问题。将调研结果写入到主题研究文件中。"),
2221
"system_prompt": (
2322
"你是一位专注的研究员。你的工作是根据用户的问题进行研究。"
2423
"进行彻底的研究,然后用详细的答案回复用户的问题,只有你的最终答案会被传递给用户。"
@@ -85,18 +84,6 @@ async def get_graph(self, **kwargs):
8584
model = load_chat_model(context.model)
8685
tools = await self.get_tools()
8786

88-
if (
89-
model.profile is not None
90-
and isinstance(model.profile, dict)
91-
and "max_input_tokens" in model.profile
92-
and isinstance(model.profile["max_input_tokens"], int)
93-
): # 此处参考 model.dev 中的 max_input_tokens
94-
trigger = ("fraction", 0.85)
95-
keep = ("fraction", 0.10)
96-
else:
97-
trigger = ("tokens", 110000)
98-
keep = ("messages", 10)
99-
10087
# 使用 create_deep_agent 创建深度智能体
10188
graph = create_agent(
10289
model=model,
@@ -112,26 +99,25 @@ async def get_graph(self, **kwargs):
11299
default_tools=tools,
113100
subagents=[critique_sub_agent, research_sub_agent],
114101
default_middleware=[
102+
context_based_model, # 动态模型选择
115103
TodoListMiddleware(),
116104
FilesystemMiddleware(),
117105
SummarizationMiddleware(
118106
model=model,
119-
trigger=trigger,
120-
keep=keep,
107+
trigger=("tokens", 110000),
108+
keep=("messages", 10),
121109
trim_tokens_to_summarize=None,
122110
),
123-
AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore"),
124111
PatchToolCallsMiddleware(),
125112
],
126113
general_purpose_agent=True,
127114
),
128115
SummarizationMiddleware(
129116
model=model,
130-
trigger=trigger,
131-
keep=keep,
117+
trigger=("tokens", 110000),
118+
keep=("messages", 10),
132119
trim_tokens_to_summarize=None,
133120
),
134-
AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore"),
135121
PatchToolCallsMiddleware(),
136122
],
137123
checkpointer=await self._get_checkpointer(),

0 commit comments

Comments
 (0)