Skip to content

Commit b3deaeb

Browse files
committed
fix: conflict
2 parents f5f8258 + 3f87a63 commit b3deaeb

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+1635
-422
lines changed

examples/mem_scheduler/memos_w_scheduler.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,29 +1,28 @@
1+
import re
12
import shutil
23
import sys
34

5+
from datetime import datetime
46
from pathlib import Path
57
from queue import Queue
8+
69
from memos.configs.mem_cube import GeneralMemCubeConfig
710
from memos.configs.mem_os import MOSConfig
8-
from datetime import datetime
9-
import re
10-
1111
from memos.configs.mem_scheduler import AuthConfig
1212
from memos.log import get_logger
1313
from memos.mem_cube.general import GeneralMemCube
1414
from memos.mem_os.main import MOS
15+
from memos.mem_scheduler.general_scheduler import GeneralScheduler
1516
from memos.mem_scheduler.schemas.general_schemas import (
16-
QUERY_LABEL,
17-
ANSWER_LABEL,
1817
ADD_LABEL,
18+
ANSWER_LABEL,
19+
MEM_ARCHIVE_LABEL,
1920
MEM_ORGANIZE_LABEL,
2021
MEM_UPDATE_LABEL,
21-
MEM_ARCHIVE_LABEL,
22-
NOT_APPLICABLE_TYPE,
22+
QUERY_LABEL,
2323
)
2424
from memos.mem_scheduler.schemas.message_schemas import ScheduleLogForWebItem
2525
from memos.mem_scheduler.utils.filter_utils import transform_name_to_key
26-
from memos.mem_scheduler.general_scheduler import GeneralScheduler
2726

2827

2928
FILE_PATH = Path(__file__).absolute()

src/memos/api/handlers/chat_handler.py

Lines changed: 386 additions & 60 deletions
Large diffs are not rendered by default.

src/memos/api/handlers/component_init.py

Lines changed: 38 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
from memos.api.config import APIConfig
1313
from memos.api.handlers.config_builders import (
14+
build_chat_llm_config,
1415
build_embedder_config,
1516
build_graph_db_config,
1617
build_internet_retriever_config,
@@ -77,6 +78,38 @@ def _get_default_memory_size(cube_config: Any) -> dict[str, int]:
7778
}
7879

7980

81+
def _init_chat_llms(chat_llm_configs: list[dict]) -> dict[str, Any]:
82+
"""
83+
Initialize chat language models from configuration.
84+
85+
Args:
86+
chat_llm_configs: List of chat LLM configuration dictionaries
87+
88+
Returns:
89+
Dictionary mapping model names to initialized LLM instances
90+
"""
91+
92+
def _list_models(client):
93+
try:
94+
models = (
95+
[model.id for model in client.models.list().data]
96+
if client.models.list().data
97+
else client.models.list().models
98+
)
99+
except Exception as e:
100+
logger.error(f"Error listing models: {e}")
101+
models = []
102+
return models
103+
104+
model_name_instrance_maping = {}
105+
for cfg in chat_llm_configs:
106+
llm = LLMFactory.from_config(cfg["config_class"])
107+
if cfg["support_models"]:
108+
for model_name in cfg["support_models"]:
109+
model_name_instrance_maping[model_name] = llm
110+
return model_name_instrance_maping
111+
112+
80113
def init_server() -> dict[str, Any]:
81114
"""
82115
Initialize all server components and configurations.
@@ -104,6 +137,7 @@ def init_server() -> dict[str, Any]:
104137
# Build component configurations
105138
graph_db_config = build_graph_db_config()
106139
llm_config = build_llm_config()
140+
chat_llm_config = build_chat_llm_config()
107141
embedder_config = build_embedder_config()
108142
mem_reader_config = build_mem_reader_config()
109143
reranker_config = build_reranker_config()
@@ -123,13 +157,16 @@ def init_server() -> dict[str, Any]:
123157
else None
124158
)
125159
llm = LLMFactory.from_config(llm_config)
160+
chat_llms = _init_chat_llms(chat_llm_config)
126161
embedder = EmbedderFactory.from_config(embedder_config)
127162
mem_reader = MemReaderFactory.from_config(mem_reader_config)
128163
reranker = RerankerFactory.from_config(reranker_config)
129164
internet_retriever = InternetRetrieverFactory.from_config(
130165
internet_retriever_config, embedder=embedder
131166
)
132167

168+
# Initialize chat llms
169+
133170
logger.debug("Core components instantiated")
134171

135172
# Initialize memory manager
@@ -234,7 +271,6 @@ def init_server() -> dict[str, Any]:
234271
tree_mem: TreeTextMemory = naive_mem_cube.text_mem
235272
searcher: Searcher = tree_mem.get_searcher(
236273
manual_close_internet=os.getenv("ENABLE_INTERNET", "true").lower() == "false",
237-
moscube=False,
238274
)
239275
logger.debug("Searcher created")
240276

@@ -276,6 +312,7 @@ def init_server() -> dict[str, Any]:
276312
"graph_db": graph_db,
277313
"mem_reader": mem_reader,
278314
"llm": llm,
315+
"chat_llms": chat_llms,
279316
"embedder": embedder,
280317
"reranker": reranker,
281318
"internet_retriever": internet_retriever,

src/memos/api/handlers/config_builders.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
a configuration dictionary using the appropriate ConfigFactory.
77
"""
88

9+
import json
910
import os
1011

1112
from typing import Any
@@ -81,6 +82,32 @@ def build_llm_config() -> dict[str, Any]:
8182
)
8283

8384

85+
def build_chat_llm_config() -> list[dict[str, Any]]:
86+
"""
87+
Build chat LLM configuration.
88+
89+
Returns:
90+
Validated chat LLM configuration dictionary
91+
"""
92+
configs = json.loads(os.getenv("CHAT_MODEL_LIST"))
93+
return [
94+
{
95+
"config_class": LLMConfigFactory.model_validate(
96+
{
97+
"backend": cfg.get("backend", "openai"),
98+
"config": (
99+
{k: v for k, v in cfg.items() if k not in ["backend", "support_models"]}
100+
)
101+
if cfg
102+
else APIConfig.get_openai_config(),
103+
}
104+
),
105+
"support_models": cfg.get("support_models", None),
106+
}
107+
for cfg in configs
108+
]
109+
110+
84111
def build_embedder_config() -> dict[str, Any]:
85112
"""
86113
Build embedder configuration.

src/memos/api/handlers/memory_handler.py

Lines changed: 42 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,14 @@
66

77
from typing import Any, Literal
88

9-
from memos.api.product_models import MemoryResponse
9+
from memos.api.handlers.formatters_handler import format_memory_item
10+
from memos.api.product_models import (
11+
DeleteMemoryRequest,
12+
DeleteMemoryResponse,
13+
GetMemoryRequest,
14+
GetMemoryResponse,
15+
MemoryResponse,
16+
)
1017
from memos.log import get_logger
1118
from memos.mem_os.utils.format_utils import (
1219
convert_graph_to_tree_forworkmem,
@@ -149,3 +156,37 @@ def handle_get_subgraph(
149156
except Exception as e:
150157
logger.error(f"Failed to get subgraph: {e}", exc_info=True)
151158
raise
159+
160+
161+
def handle_get_memories(get_mem_req: GetMemoryRequest, naive_mem_cube: Any) -> GetMemoryResponse:
162+
# TODO: Implement get memory with filter
163+
memories = naive_mem_cube.text_mem.get_all(user_name=get_mem_req.mem_cube_id)["nodes"]
164+
filter_params: dict[str, Any] = {}
165+
if get_mem_req.user_id is not None:
166+
filter_params["user_id"] = get_mem_req.user_id
167+
if get_mem_req.mem_cube_id is not None:
168+
filter_params["mem_cube_id"] = get_mem_req.mem_cube_id
169+
preferences = naive_mem_cube.pref_mem.get_memory_by_filter(filter_params)
170+
return GetMemoryResponse(
171+
message="Memories retrieved successfully",
172+
data={
173+
"text_mem": memories,
174+
"pref_mem": [format_memory_item(mem) for mem in preferences],
175+
},
176+
)
177+
178+
179+
def handle_delete_memories(delete_mem_req: DeleteMemoryRequest, naive_mem_cube: Any):
180+
try:
181+
naive_mem_cube.text_mem.delete(delete_mem_req.memory_ids)
182+
naive_mem_cube.pref_mem.delete(delete_mem_req.memory_ids)
183+
except Exception as e:
184+
logger.error(f"Failed to delete memories: {e}", exc_info=True)
185+
return DeleteMemoryResponse(
186+
message="Failed to delete memories",
187+
data="failure",
188+
)
189+
return DeleteMemoryResponse(
190+
message="Memories deleted successfully",
191+
data={"status": "success"},
192+
)

src/memos/api/handlers/scheduler_handler.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222

2323

2424
def handle_scheduler_status(
25-
user_name: str | None = None,
25+
mem_cube_id: str | None = None,
2626
mem_scheduler: Any | None = None,
2727
instance_id: str = "",
2828
) -> dict[str, Any]:
@@ -43,17 +43,17 @@ def handle_scheduler_status(
4343
HTTPException: If status retrieval fails
4444
"""
4545
try:
46-
if user_name:
46+
if mem_cube_id:
4747
running = mem_scheduler.dispatcher.get_running_tasks(
48-
lambda task: getattr(task, "mem_cube_id", None) == user_name
48+
lambda task: getattr(task, "mem_cube_id", None) == mem_cube_id
4949
)
5050
tasks_iter = to_iter(running)
5151
running_count = len(tasks_iter)
5252
return {
5353
"message": "ok",
5454
"data": {
5555
"scope": "user",
56-
"user_name": user_name,
56+
"mem_cube_id": mem_cube_id,
5757
"running_tasks": running_count,
5858
"timestamp": time.time(),
5959
"instance_id": instance_id,

0 commit comments

Comments
 (0)