Skip to content

Commit 679fafe

Browse files
committed
chore(version): bump version to 0.3.0.0a4 and add logging
1 parent e70d19a commit 679fafe

File tree

8 files changed

+29
-23
lines changed

8 files changed

+29
-23
lines changed

reme/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
"ReMeFs",
1919
]
2020

21-
__version__ = "0.3.0.0a3"
21+
__version__ = "0.3.0.0a4"
2222

2323

2424
"""

reme/agent/chat/fs_cli.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
from datetime import datetime
44
from pathlib import Path
55

6+
from loguru import logger
7+
68
from ...core.enumeration import Role, ChunkEnum
79
from ...core.op import BaseReactStream
810
from ...core.schema import Message, StreamChunk
@@ -164,6 +166,9 @@ async def execute(self):
164166
_ = await self.compact(force_compact=False)
165167

166168
messages = await self.build_messages()
169+
for i, message in enumerate(messages):
170+
role = message.name or message.role
171+
logger.info(f"[{self.__class__.__name__}] role={role} {message.simple_dump(as_dict=False)}")
167172

168173
t_tools, messages, success = await self.react(messages, self.tools)
169174

reme/core/file_watcher/full_file_watcher.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
import asyncio
88
import os
9+
from pathlib import Path
910

1011
from loguru import logger
1112
from watchfiles import Change
@@ -27,18 +28,17 @@ def __init__(self, **kwargs):
2728

2829
@staticmethod
2930
async def _build_file_metadata(path: str) -> FileMetadata:
31+
file_path = Path(path)
32+
3033
def _read_file_sync():
31-
stat_t = os.stat(path)
32-
with open(path, "r", encoding="utf-8") as f:
33-
content_t = f.read()
34-
return stat_t, content_t
34+
return file_path.stat(), file_path.read_text(encoding="utf-8")
3535

3636
stat, content = await asyncio.to_thread(_read_file_sync)
3737
return FileMetadata(
3838
hash=hash_text(content),
3939
mtime_ms=stat.st_mtime * 1000,
4040
size=stat.st_size,
41-
path=path,
41+
path=str(file_path.absolute()),
4242
content=content,
4343
)
4444

reme/core/llm/lite_llm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -101,10 +101,10 @@ async def _stream_chat(
101101

102102
delta = chunk.choices[0].delta
103103

104-
if hasattr(delta, "reasoning_content") and delta.reasoning_content is not None:
104+
if hasattr(delta, "reasoning_content") and delta.reasoning_content:
105105
yield StreamChunk(chunk_type=ChunkEnum.THINK, chunk=delta.reasoning_content)
106106

107-
if delta.content is not None:
107+
if delta.content:
108108
yield StreamChunk(chunk_type=ChunkEnum.ANSWER, chunk=delta.content)
109109

110110
if hasattr(delta, "tool_calls") and delta.tool_calls is not None:

reme/core/llm/lite_llm_sync.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,10 @@ def _stream_chat_sync(
3333

3434
delta = chunk.choices[0].delta
3535

36-
if hasattr(delta, "reasoning_content") and delta.reasoning_content is not None:
36+
if hasattr(delta, "reasoning_content") and delta.reasoning_content:
3737
yield StreamChunk(chunk_type=ChunkEnum.THINK, chunk=delta.reasoning_content)
3838

39-
if delta.content is not None:
39+
if delta.content:
4040
yield StreamChunk(chunk_type=ChunkEnum.ANSWER, chunk=delta.content)
4141

4242
if hasattr(delta, "tool_calls") and delta.tool_calls is not None:

reme/core/llm/openai_llm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,10 +95,10 @@ async def _stream_chat(
9595

9696
delta = chunk.choices[0].delta
9797

98-
if hasattr(delta, "reasoning_content") and delta.reasoning_content is not None:
98+
if hasattr(delta, "reasoning_content") and delta.reasoning_content:
9999
yield StreamChunk(chunk_type=ChunkEnum.THINK, chunk=delta.reasoning_content)
100100

101-
if delta.content is not None:
101+
if delta.content:
102102
yield StreamChunk(chunk_type=ChunkEnum.ANSWER, chunk=delta.content)
103103

104104
if delta.tool_calls is not None:

reme/core/llm/openai_llm_sync.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,10 @@ def _stream_chat_sync(
3737

3838
delta = chunk.choices[0].delta
3939

40-
if hasattr(delta, "reasoning_content") and delta.reasoning_content is not None:
40+
if hasattr(delta, "reasoning_content") and delta.reasoning_content:
4141
yield StreamChunk(chunk_type=ChunkEnum.THINK, chunk=delta.reasoning_content)
4242

43-
if delta.content is not None:
43+
if delta.content:
4444
yield StreamChunk(chunk_type=ChunkEnum.ANSWER, chunk=delta.content)
4545

4646
if delta.tool_calls is not None:

reme/reme_fs.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -293,33 +293,34 @@ async def chat(q: str) -> AsyncGenerator[StreamChunk, None]:
293293
while True:
294294
try:
295295
# Get user input (async)
296-
user_input = await session.prompt_async("You: ", default="")
297-
if not user_input.strip():
296+
user_input = await session.prompt_async("You: ")
297+
user_input = user_input.strip()
298+
if not user_input:
298299
continue
299300

300301
# Handle commands
301-
if user_input.strip() == "/exit":
302+
if user_input == "/exit":
302303
break
303304

304-
if user_input.strip() == "/new":
305+
if user_input == "/new":
305306
result = await fs_cli.reset()
306307
print(f"{result}\nConversation reset\n")
307308
continue
308309

309-
if user_input.strip() == "/compact":
310+
if user_input == "/compact":
310311
result = await fs_cli.compact(force_compact=True)
311312
print(f"{result}\nHistory compacted.\n")
312313
continue
313314

314-
if user_input.strip() == "/clear":
315+
if user_input == "/clear":
315316
fs_cli.messages.clear()
316317
print("History cleared.\n")
317318
continue
318319

319-
if user_input.strip() == "/help":
320+
if user_input == "/help":
320321
print("\nCommands:")
321-
for command in self.commands:
322-
print(f" {command}")
322+
for command, description in self.commands.items():
323+
print(f" {command}: {description}")
323324
continue
324325

325326
# Stream processing state

0 commit comments

Comments
 (0)