Skip to content

Commit 7c16d36

Browse files
committed
feat:reoganize prompt with reference in user content
2 parents 98dbf8a + 996e4a4 commit 7c16d36

File tree

10 files changed

+55
-120
lines changed

10 files changed

+55
-120
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -252,6 +252,7 @@ MemOS is licensed under the [Apache 2.0 License](./LICENSE).
252252
Stay up to date with the latest MemOS announcements, releases, and community highlights.
253253

254254

255+
- **2025-09-10** - 🎉 *MemOS v1.0.1 (Group Q&A Bot)*: Group Q&A bot based on MemOS Cube, updated KV-Cache performance comparison data across different GPU deployment schemes, optimized test benchmarks and statistics, added plaintext memory Reranker sorting, optimized plaintext memory hallucination issues, and Playground version updates. [Try PlayGround](https://memos-playground.openmem.net/login/)
255256
- **2025-08-07** - 🎉 *MemOS v1.0.0 (MemCube Release)*: First MemCube with word game demo, LongMemEval evaluation, BochaAISearchRetriever integration, NebulaGraph support, enhanced search capabilities, and official Playground launch.
256257
- **2025-07-29** – 🎉 *MemOS v0.2.2 (Nebula Update)*: Internet search+Nebula DB integration, refactored memory scheduler, KV Cache stress tests, MemCube Cookbook release (CN/EN), and 4b/1.7b/0.6b memory ops models.
257258
- **2025-07-21** – 🎉 *MemOS v0.2.1 (Neo Release)*: Lightweight Neo version with plaintext+KV Cache functionality, Docker/multi-tenant support, MCP expansion, and new Cookbook/Mud game examples.

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
##############################################################################
55

66
name = "MemoryOS"
7-
version = "1.0.0"
7+
version = "1.0.1"
88
description = "Intelligence Begins with Memory"
99
license = {text = "Apache-2.0"}
1010
readme = "README.md"

src/memos/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
__version__ = "1.0.0"
1+
__version__ = "1.0.1"
22

33
from memos.configs.mem_cube import GeneralMemCubeConfig
44
from memos.configs.mem_os import MOSConfig

src/memos/api/config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ class APIConfig:
2121
def get_openai_config() -> dict[str, Any]:
2222
"""Get OpenAI configuration."""
2323
return {
24-
"model_name_or_path": os.getenv("MOS_OPENAI_MODEL", "gpt-4o-mini"),
24+
"model_name_or_path": os.getenv("MOS_CHAT_MODEL", "gpt-4o-mini"),
2525
"temperature": float(os.getenv("MOS_CHAT_TEMPERATURE", "0.8")),
2626
"max_tokens": int(os.getenv("MOS_MAX_TOKENS", "1024")),
2727
"top_p": float(os.getenv("MOS_TOP_P", "0.9")),

src/memos/api/context/context.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
and request isolation.
77
"""
88

9+
import os
910
import uuid
1011

1112
from collections.abc import Callable
@@ -117,6 +118,11 @@ def require_context() -> RequestContext:
117118
_trace_id_getter: TraceIdGetter | None = None
118119

119120

121+
def generate_trace_id() -> str:
122+
"""Generate a random trace_id."""
123+
return os.urandom(16).hex()
124+
125+
120126
def set_trace_id_getter(getter: TraceIdGetter) -> None:
121127
"""
122128
Set a custom trace_id getter function.

src/memos/api/middleware/request_context.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,25 +3,19 @@
33
"""
44

55
import logging
6-
import os
76

87
from collections.abc import Callable
98

109
from starlette.middleware.base import BaseHTTPMiddleware
1110
from starlette.requests import Request
1211
from starlette.responses import Response
1312

14-
from memos.api.context.context import RequestContext, set_request_context
13+
from memos.api.context.context import RequestContext, generate_trace_id, set_request_context
1514

1615

1716
logger = logging.getLogger(__name__)
1817

1918

20-
def generate_trace_id() -> str:
21-
"""Generate a random trace_id."""
22-
return os.urandom(16).hex()
23-
24-
2519
def extract_trace_id_from_headers(request: Request) -> str | None:
2620
"""Extract trace_id from various possible headers with priority: g-trace-id > x-trace-id > trace-id."""
2721
trace_id = request.headers.get("g-trace-id")

src/memos/api/product_api.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
app = FastAPI(
1515
title="MemOS Product REST APIs",
1616
description="A REST API for managing multiple users with MemOS Product.",
17-
version="1.0.0",
17+
version="1.0.1",
1818
)
1919

2020
# Add request context middleware (must be added first)

src/memos/log.py

Lines changed: 7 additions & 101 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,13 @@
1-
import atexit
21
import logging
3-
import os
4-
import threading
52

63
from logging.config import dictConfig
74
from pathlib import Path
85
from sys import stdout
96

10-
import requests
11-
127
from dotenv import load_dotenv
138

149
from memos import settings
15-
from memos.api.context.context import get_current_trace_id
16-
from memos.api.context.context_thread import ContextThreadPoolExecutor
10+
from memos.api.context.context import generate_trace_id, get_current_trace_id
1711

1812

1913
# Load environment variables
@@ -39,95 +33,12 @@ class TraceIDFilter(logging.Filter):
3933
def filter(self, record):
4034
try:
4135
trace_id = get_current_trace_id()
42-
record.trace_id = trace_id if trace_id else "no-trace-id"
36+
record.trace_id = trace_id if trace_id else generate_trace_id()
4337
except Exception:
44-
record.trace_id = "no-trace-id"
38+
record.trace_id = generate_trace_id()
4539
return True
4640

4741

48-
class CustomLoggerRequestHandler(logging.Handler):
49-
_instance = None
50-
_lock = threading.Lock()
51-
52-
def __new__(cls):
53-
if cls._instance is None:
54-
with cls._lock:
55-
if cls._instance is None:
56-
cls._instance = super().__new__(cls)
57-
cls._instance._initialized = False
58-
cls._instance._executor = None
59-
cls._instance._session = None
60-
cls._instance._is_shutting_down = None
61-
return cls._instance
62-
63-
def __init__(self):
64-
"""Initialize handler with minimal setup"""
65-
if not self._initialized:
66-
super().__init__()
67-
workers = int(os.getenv("CUSTOM_LOGGER_WORKERS", "2"))
68-
self._executor = ContextThreadPoolExecutor(
69-
max_workers=workers, thread_name_prefix="log_sender"
70-
)
71-
self._is_shutting_down = threading.Event()
72-
self._session = requests.Session()
73-
self._initialized = True
74-
atexit.register(self._cleanup)
75-
76-
def emit(self, record):
77-
"""Process log records of INFO or ERROR level (non-blocking)"""
78-
if os.getenv("CUSTOM_LOGGER_URL") is None or self._is_shutting_down.is_set():
79-
return
80-
81-
try:
82-
trace_id = get_current_trace_id() or "no-trace-id"
83-
self._executor.submit(self._send_log_sync, record.getMessage(), trace_id)
84-
except Exception as e:
85-
if not self._is_shutting_down.is_set():
86-
print(f"Error sending log: {e}")
87-
88-
def _send_log_sync(self, message, trace_id):
89-
"""Send log message synchronously in a separate thread"""
90-
try:
91-
logger_url = os.getenv("CUSTOM_LOGGER_URL")
92-
token = os.getenv("CUSTOM_LOGGER_TOKEN")
93-
94-
headers = {"Content-Type": "application/json"}
95-
post_content = {"message": message, "trace_id": trace_id}
96-
97-
# Add auth token if exists
98-
if token:
99-
headers["Authorization"] = f"Bearer {token}"
100-
101-
# Add traceId to headers for consistency
102-
headers["traceId"] = trace_id
103-
104-
# Add custom attributes from env
105-
for key, value in os.environ.items():
106-
if key.startswith("CUSTOM_LOGGER_ATTRIBUTE_"):
107-
attribute_key = key[len("CUSTOM_LOGGER_ATTRIBUTE_") :].lower()
108-
post_content[attribute_key] = value
109-
110-
self._session.post(logger_url, headers=headers, json=post_content, timeout=5)
111-
except Exception:
112-
# Silently ignore errors to avoid affecting main application
113-
pass
114-
115-
def _cleanup(self):
116-
"""Clean up resources during program exit"""
117-
if not self._initialized:
118-
return
119-
120-
self._is_shutting_down.set()
121-
try:
122-
self._executor.shutdown(wait=False)
123-
self._session.close()
124-
except Exception as e:
125-
print(f"Error during cleanup: {e}")
126-
127-
def close(self):
128-
"""Override close to prevent premature shutdown"""
129-
130-
13142
LOGGING_CONFIG = {
13243
"version": 1,
13344
"disable_existing_loggers": False,
@@ -151,7 +62,7 @@ def close(self):
15162
"level": selected_log_level,
15263
"class": "logging.StreamHandler",
15364
"stream": stdout,
154-
"formatter": "simplified",
65+
"formatter": "no_datetime",
15566
"filters": ["package_tree_filter", "trace_id_filter"],
15667
},
15768
"file": {
@@ -160,18 +71,13 @@ def close(self):
16071
"filename": _setup_logfile(),
16172
"maxBytes": 1024**2 * 10,
16273
"backupCount": 10,
163-
"formatter": "simplified",
74+
"formatter": "standard",
16475
"filters": ["trace_id_filter"],
16576
},
166-
"custom_logger": {
167-
"level": selected_log_level,
168-
"class": "memos.log.CustomLoggerRequestHandler",
169-
"formatter": "simplified",
170-
},
17177
},
17278
"root": { # Root logger handles all logs
173-
"level": selected_log_level,
174-
"handlers": ["console", "file", "custom_logger"],
79+
"level": logging.DEBUG if settings.DEBUG else logging.INFO,
80+
"handlers": ["console", "file"],
17581
},
17682
"loggers": {
17783
"memos": {

src/memos/mem_os/product.py

Lines changed: 35 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -742,16 +742,39 @@ def run_async_in_thread():
742742
thread.start()
743743

744744
def _filter_memories_by_threshold(
745-
self, memories: list[TextualMemoryItem], threshold: float = 0.30, min_num: int = 3
745+
self,
746+
memories: list[TextualMemoryItem],
747+
threshold: float = 0.30,
748+
min_num: int = 3,
749+
memory_type: Literal["OuterMemory"] = "OuterMemory",
746750
) -> list[TextualMemoryItem]:
747751
"""
748-
Filter memories by threshold.
752+
Filter memories by threshold and type, at least min_num memories for Non-OuterMemory.
753+
Args:
754+
memories: list[TextualMemoryItem],
755+
threshold: float,
756+
min_num: int,
757+
memory_type: Literal["OuterMemory"],
758+
Returns:
759+
list[TextualMemoryItem]
749760
"""
750761
sorted_memories = sorted(memories, key=lambda m: m.metadata.relativity, reverse=True)
751-
filtered = [m for m in sorted_memories if m.metadata.relativity >= threshold]
762+
filtered_person = [m for m in memories if m.metadata.memory_type != memory_type]
763+
filtered_outer = [m for m in memories if m.metadata.memory_type == memory_type]
764+
filtered = []
765+
per_memory_count = 0
766+
for m in sorted_memories:
767+
if m.metadata.relativity >= threshold:
768+
if m.metadata.memory_type != memory_type:
769+
per_memory_count += 1
770+
filtered.append(m)
752771
if len(filtered) < min_num:
753-
filtered = sorted_memories[:min_num]
754-
return filtered
772+
filtered = filtered_person[:min_num] + filtered_outer[:min_num]
773+
else:
774+
if per_memory_count < min_num:
775+
filtered += filtered_person[per_memory_count:min_num]
776+
filtered_memory = sorted(filtered, key=lambda m: m.metadata.relativity, reverse=True)
777+
return filtered_memory
755778

756779
def register_mem_cube(
757780
self,
@@ -954,12 +977,17 @@ def chat(
954977
internet_search=internet_search,
955978
moscube=moscube,
956979
)["text_mem"]
980+
957981
memories_list = []
958982
if memories_result:
959983
memories_list = memories_result[0]["memories"]
960984
memories_list = self._filter_memories_by_threshold(
961985
memories_list, threshold)
962-
986+
new_memories_list = []
987+
for m in memories_list:
988+
m.metadata.embedding = []
989+
new_memories_list.append(m)
990+
memories_list = new_memories_list
963991
# Build base system prompt without memory
964992
system_prompt = self._build_base_system_prompt(base_prompt,
965993
mode="base")
@@ -999,7 +1027,7 @@ def chat_with_references(
9991027
user_id: str,
10001028
cube_id: str | None = None,
10011029
history: MessageList | None = None,
1002-
top_k: int = 10,
1030+
top_k: int = 20,
10031031
internet_search: bool = False,
10041032
moscube: bool = False,
10051033
) -> Generator[str, None, None]:

src/memos/templates/mos_prompts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@
6464

6565
MEMOS_PRODUCT_BASE_PROMPT = """
6666
# System
67-
- Role: You are MemOS🧚, nickname Little M(小忆🧚) — an advanced Memory Operating System assistant by MemTensor, a Shanghai-based AI research company advised by an academician of the Chinese Academy of Sciences.
67+
- Role: You are MemOS🧚, nickname Little M(小忆🧚) — an advanced Memory Operating System assistant by 记忆张量(MemTensor Technology Co., Ltd.), a Shanghai-based AI research company advised by an academician of the Chinese Academy of Sciences.
6868
- Date: {date}
6969
7070
- Mission & Values: Uphold MemTensor’s vision of "low cost, low hallucination, high generalization, exploring AI development paths aligned with China’s national context and driving the adoption of trustworthy AI technologies. MemOS’s mission is to give large language models (LLMs) and autonomous agents **human-like long-term memory**, turning memory from a black-box inside model weights into a **manageable, schedulable, and auditable** core resource.

0 commit comments

Comments
 (0)