Skip to content

Commit 1a35150

Browse files
committed
feat(core): add config path parameter and enhance fs cli capabilities
1 parent f0bc2da commit 1a35150

File tree

12 files changed

+315
-114
lines changed

12 files changed

+315
-114
lines changed

reme/agent/chat/fs_cli.py

Lines changed: 87 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,20 +10,102 @@
1010
class FsCli(BaseReactStream):
1111
"""FsCli agent with system prompt."""
1212

13-
def __init__(self, working_dir: str, **kwargs):
13+
def __init__(
14+
self,
15+
working_dir: str,
16+
summary_params: dict | None = None,
17+
compact_params: dict | None = None,
18+
**kwargs,
19+
):
1420
super().__init__(**kwargs)
1521
self.working_dir: str = working_dir
22+
self.summary_params: dict = summary_params or {}
23+
self.compact_params: dict = compact_params or {}
24+
1625
self.messages: list[Message] = []
26+
self.previous_summary: str = ""
27+
28+
async def reset_history(self) -> str:
29+
"""Reset conversation history using summary.
30+
31+
Summarizes current messages to memory files and clears history.
32+
"""
33+
if not self.messages:
34+
self.messages.clear()
35+
self.previous_summary = ""
36+
return "No history to reset."
1737

18-
def reset_history(self):
19-
"""Reset conversation history."""
38+
# Import required modules
39+
from ..fs import FsSummarizer
40+
41+
# Summarize current conversation and save to memory files
42+
current_date = datetime.now().strftime("%Y-%m-%d")
43+
summarizer = FsSummarizer(tools=self.tools, **(self.summary_params or {}))
44+
45+
result = await summarizer.call(
46+
messages=self.messages,
47+
date=current_date,
48+
service_context=self.service_context,
49+
)
50+
51+
# Clear messages (no previous_summary update, as summarizer saves to files)
2052
self.messages.clear()
21-
return self
53+
self.previous_summary = ""
54+
55+
return f"History saved to memory files and reset. Result: {result.get('answer', 'Done')}"
56+
57+
async def compact_history(self) -> str:
58+
"""Compact history then reset.
59+
60+
First compacts messages if they exceed token limits (generating a summary),
61+
then calls reset_history to save to files and clear.
62+
"""
63+
if not self.messages:
64+
return "No history to compact."
65+
66+
# Import required modules
67+
from ..fs import FsCompactor
68+
69+
# Step 1: Compact messages
70+
compactor = FsCompactor(**(self.compact_params or {}))
71+
compact_result = await compactor.call(
72+
messages=self.messages,
73+
previous_summary=self.previous_summary,
74+
service_context=self.service_context,
75+
)
76+
77+
compacted_messages = compact_result.get("messages", self.messages)
78+
is_compacted = compact_result.get("compacted", False)
79+
80+
if not is_compacted:
81+
return "History is within token limits, no compaction needed."
82+
83+
# Step 2: Extract summary from compacted messages
84+
# The first message contains the summary wrapped in compaction_summary_format
85+
tokens_before = compact_result.get("tokens_before", 0)
86+
87+
if compacted_messages and compacted_messages[0].role == Role.USER:
88+
# Extract summary content from the first message
89+
summary_content = compacted_messages[0].content
90+
self.previous_summary = summary_content
91+
92+
# Step 3: Update messages and call reset_history to save and clear
93+
self.messages = compacted_messages
94+
reset_result = await self.reset_history()
95+
96+
return f"History compacted from {tokens_before} tokens. {reset_result}"
2297

2398
async def build_messages(self) -> list[Message]:
2499
"""Build system prompt message."""
25100
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S %A")
26-
system_prompt = self.prompt_format("system_prompt", workspace_dir=self.working_dir, current_time=current_time)
101+
102+
system_prompt = self.prompt_format(
103+
"system_prompt",
104+
workspace_dir=self.working_dir,
105+
current_time=current_time,
106+
has_previous_summary=bool(self.previous_summary),
107+
previous_summary=self.previous_summary or "",
108+
)
27109

28110
return [
29111
Message(role=Role.SYSTEM, content=system_prompt),

reme/agent/chat/fs_cli.yaml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,13 @@ system_prompt: |
88
Your working directory is: {workspace_dir}
99
Treat this directory as the single global workspace for file operations unless explicitly instructed otherwise.
1010
11+
[has_previous_summary]## Previous Conversation Summary
12+
[has_previous_summary]<previous-summary>
13+
[has_previous_summary]{previous_summary}
14+
[has_previous_summary]</previous-summary>
15+
[has_previous_summary]
16+
[has_previous_summary]The above is a summary of our previous conversation. Use it as context to maintain continuity.
17+
1118
## Session Initialization
1219
1320
Before doing anything else, read these files to orient yourself (don't ask permission):

reme/agent/fs/fs_compactor.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,14 @@ def __init__(
1717
context_window_tokens: int = 128000,
1818
reserve_tokens: int = 36000,
1919
keep_recent_tokens: int = 20000,
20+
force_compact: bool = False,
2021
**kwargs,
2122
):
2223
super().__init__(tools=[], **kwargs)
2324
self.context_window_tokens: int = context_window_tokens
2425
self.reserve_tokens: int = reserve_tokens
2526
self.keep_recent_tokens: int = keep_recent_tokens
27+
self.force_compact: bool = force_compact
2628

2729
@staticmethod
2830
def _normalize_messages(messages: list[Message | dict]) -> list[Message]:
@@ -178,16 +180,20 @@ async def execute(self):
178180
token_count: int = self.token_counter.count_token(original_messages)
179181
threshold = self.context_window_tokens - self.reserve_tokens
180182

181-
if token_count < threshold:
183+
if not self.force_compact and token_count < threshold:
182184
logger.info(f"Token count {token_count} below threshold ({threshold}), skipping compaction")
183185
return {
184186
"compacted": False,
185187
"tokens_before": token_count,
186188
"is_split_turn": False,
187189
"messages": original_messages,
190+
"summary_content": "",
188191
}
189192

190-
logger.info(f"Starting compaction, token count: {token_count}, threshold: {threshold}")
193+
if self.force_compact:
194+
logger.info(f"Force compaction enabled, token count: {token_count}, threshold: {threshold}")
195+
else:
196+
logger.info(f"Starting compaction, token count: {token_count}, threshold: {threshold}")
191197

192198
history_prompt_messages = self.build_messages_s1()
193199

@@ -198,6 +204,7 @@ async def execute(self):
198204
"tokens_before": token_count,
199205
"is_split_turn": False,
200206
"messages": original_messages,
207+
"summary_content": "",
201208
}
202209

203210
history_summary = await self._generate_summary(history_prompt_messages) if history_prompt_messages else ""
@@ -222,4 +229,5 @@ async def execute(self):
222229
"tokens_before": token_count,
223230
"is_split_turn": self.context.is_split_turn,
224231
"messages": final_messages,
232+
"summary_content": summary_content,
225233
}

reme/agent/fs/fs_summarizer.yaml

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,23 @@ user_message_v2: |
1717
1. Check if {memory_dir}/ exists; if not, create it via bash
1818
2. Check if {memory_dir}/YYYY-MM-DD.md exists (use actual date)
1919
3. If file is NEW: Write memories directly (be concise)
20-
4. If file EXISTS: Read it first, then UPDATE with new memories (keep concise, merge/deduplicate)
20+
4. If file EXISTS:
21+
a) Read the existing file content
22+
b) Compare conversation history with existing content
23+
c) Identify NEW/UPDATED information not yet captured
24+
d) Use edit_tool to add/update only the new information (preserve existing content)
25+
e) If conversation contains NO new information, skip writing
2126
5. If NO valuable information to store: Reply with reason and [SILENT]
2227
28+
IMPORTANT for updates:
29+
- Only add information that is NOT already in the file
30+
- Preserve all existing entries
31+
- Merge duplicate information intelligently
32+
- Use edit_tool for surgical updates, not write_tool (which overwrites)
33+
34+
Example of what counts as NEW information:
35+
- Existing: "Alice: Software engineer"
36+
- Conversation: "Alice loves Python and AI projects"
37+
- Action: ADD "Enjoys Python programming and AI project work" to Alice's entry
38+
2339
Store durable memories. Keep entries concise and well-organized.

reme/config/default.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@ flows:
2020
llms:
2121
default:
2222
backend: openai
23-
# model_name: qwen3-30b-a3b-instruct-2507
24-
model_name: qwen3-30b-a3b-thinking-2507
23+
model_name: qwen3-30b-a3b-instruct-2507
24+
# model_name: qwen3-30b-a3b-thinking-2507
2525
request_interval: 1
2626
# temperature: 0.0001
2727

reme/config/fs.yaml

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
backend: cmd
2+
3+
llms:
4+
default:
5+
backend: openai
6+
model_name: qwen3-30b-a3b-instruct-2507
7+
# model_name: qwen3-30b-a3b-thinking-2507
8+
request_interval: 1
9+
# temperature: 0.0001
10+
11+
embedding_models:
12+
default:
13+
backend: openai
14+
model_name: text-embedding-v4
15+
dimensions: 1024
16+
17+
memory_stores:
18+
default:
19+
backend: sqlite
20+
store_name: test_hybrid
21+
embedding_model: default
22+
fts_enabled: true
23+
snippet_max_chars: 700
24+
25+
token_counters:
26+
default:
27+
backend: base
28+
29+
hf:
30+
backend: hf
31+
model_name: Qwen/Qwen3-Coder-30B-A3B-Instruct
32+
use_mirror: true
33+

reme/core/application.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ def __init__(
2424
llm_api_base: str | None = None,
2525
embedding_api_key: str | None = None,
2626
embedding_api_base: str | None = None,
27+
config_path: str | None = None,
2728
enable_logo: bool = True,
2829
log_to_console: bool = True,
2930
parser: type[PydanticConfigParser] | None = None,
@@ -43,7 +44,7 @@ def __init__(
4344
embedding_api_base=embedding_api_base,
4445
service_config=None,
4546
parser=parser,
46-
config_path=None,
47+
config_path=config_path,
4748
enable_logo=enable_logo,
4849
log_to_console=log_to_console,
4950
default_llm_config=default_llm_config,

reme/reme.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ def __init__(
5151
llm_api_base: str | None = None,
5252
embedding_api_key: str | None = None,
5353
embedding_api_base: str | None = None,
54+
config_path: str = "default",
5455
enable_logo: bool = True,
5556
log_to_console: bool = True,
5657
default_llm_config: dict | None = None,
@@ -71,6 +72,7 @@ def __init__(
7172
llm_api_base: API base for LLM provider
7273
embedding_api_key: API key for embedding provider
7374
embedding_api_base: API base for embedding provider
75+
config_path: Path to config file
7476
enable_logo: Enable logo
7577
log_to_console: Log to console
7678
default_llm_config: LLM configuration
@@ -102,6 +104,7 @@ def __init__(
102104
llm_api_base=llm_api_base,
103105
embedding_api_key=embedding_api_key,
104106
embedding_api_base=embedding_api_base,
107+
config_path=config_path,
105108
enable_logo=enable_logo,
106109
log_to_console=log_to_console,
107110
parser=ReMeConfigParser,

0 commit comments

Comments
 (0)