Skip to content

Commit 33fc280

Browse files
authored
update: requirement,Dockerfile (#784)
增加全量 requirements-full.txt,优化requirements.txt依赖,更新.env.example 配置
2 parents 85268ba + 9e6f846 commit 33fc280

File tree

4 files changed

+384
-194
lines changed

4 files changed

+384
-194
lines changed

docker/.env.example

Lines changed: 102 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -3,32 +3,31 @@
33

44
## Base
55
TZ=Asia/Shanghai
6-
ENV_NAME=PLAYGROUND_OFFLINE # Tag shown in DingTalk notifications (e.g., PROD_ONLINE/TEST); no runtime effect unless ENABLE_DINGDING_BOT=true
76
MOS_CUBE_PATH=/tmp/data_test # local data path
87
MEMOS_BASE_PATH=. # CLI/SDK cache path
98
MOS_ENABLE_DEFAULT_CUBE_CONFIG=true # enable default cube config
109
MOS_ENABLE_REORGANIZE=false # enable memory reorg
10+
# MOS Text Memory Type
1111
MOS_TEXT_MEM_TYPE=general_text # general_text | tree_text
1212
ASYNC_MODE=sync # async/sync, used in default cube config
1313

1414
## User/session defaults
15-
MOS_USER_ID=root
16-
MOS_SESSION_ID=default_session
17-
MOS_MAX_TURNS_WINDOW=20
15+
# Top-K for LLM in the Product API(old version)
1816
MOS_TOP_K=50
1917

2018
## Chat LLM (main dialogue)
19+
# LLM model name for the Product API
2120
MOS_CHAT_MODEL=gpt-4o-mini
21+
# Temperature for LLM in the Product API
2222
MOS_CHAT_TEMPERATURE=0.8
23-
MOS_MAX_TOKENS=8000
23+
# Max tokens for LLM in the Product API
24+
MOS_MAX_TOKENS=2048
25+
# Top-P for LLM in the Product API
2426
MOS_TOP_P=0.9
27+
# LLM for the Product API backend
2528
MOS_CHAT_MODEL_PROVIDER=openai # openai | huggingface | vllm
26-
MOS_MODEL_SCHEMA=memos.configs.llm.VLLMLLMConfig # vllm only: config class path; keep default unless you extend it
2729
OPENAI_API_KEY=sk-xxx # [required] when provider=openai
2830
OPENAI_API_BASE=https://api.openai.com/v1 # [required] base for the key
29-
OPENAI_BASE_URL= # compatibility for eval/scheduler
30-
VLLM_API_KEY= # required when provider=vllm
31-
VLLM_API_BASE=http://localhost:8088/v1 # required when provider=vllm
3231

3332
## MemReader / retrieval LLM
3433
MEMRADER_MODEL=gpt-4o-mini
@@ -37,31 +36,60 @@ MEMRADER_API_BASE=http://localhost:3000/v1 # [required] base for the key
3736
MEMRADER_MAX_TOKENS=5000
3837

3938
## Embedding & rerank
39+
# embedding dim
4040
EMBEDDING_DIMENSION=1024
41+
# set default embedding backend
4142
MOS_EMBEDDER_BACKEND=universal_api # universal_api | ollama
43+
# set openai style
4244
MOS_EMBEDDER_PROVIDER=openai # required when universal_api
45+
# embedding model
4346
MOS_EMBEDDER_MODEL=bge-m3 # siliconflow → use BAAI/bge-m3
47+
# embedding url
4448
MOS_EMBEDDER_API_BASE=http://localhost:8000/v1 # required when universal_api
49+
# embedding model key
4550
MOS_EMBEDDER_API_KEY=EMPTY # required when universal_api
4651
OLLAMA_API_BASE=http://localhost:11434 # required when backend=ollama
52+
# reanker config
4753
MOS_RERANKER_BACKEND=http_bge # http_bge | http_bge_strategy | cosine_local
54+
# reanker url
4855
MOS_RERANKER_URL=http://localhost:8001 # required when backend=http_bge*
56+
# reranker model
4957
MOS_RERANKER_MODEL=bge-reranker-v2-m3 # siliconflow → use BAAI/bge-reranker-v2-m3
5058
MOS_RERANKER_HEADERS_EXTRA= # extra headers, JSON string, e.g. {"Authorization":"Bearer your_token"}
59+
# use source
5160
MOS_RERANKER_STRATEGY=single_turn
52-
MOS_RERANK_SOURCE= # optional rerank scope, e.g., history/stream/custom
61+
62+
63+
# External Services (for evaluation scripts)
64+
# API key for reproducting Zep(compertitor product) evaluation
65+
ZEP_API_KEY=your_zep_api_key_here
66+
# API key for reproducting Mem0(compertitor product) evaluation
67+
MEM0_API_KEY=your_mem0_api_key_here
68+
# API key for reproducting MemU(compertitor product) evaluation
69+
MEMU_API_KEY=your_memu_api_key_here
70+
# API key for reproducting MEMOBASE(compertitor product) evaluation
71+
MEMOBASE_API_KEY=your_memobase_api_key_here
72+
# Project url for reproducting MEMOBASE(compertitor product) evaluation
73+
MEMOBASE_PROJECT_URL=your_memobase_project_url_here
74+
# LLM for evaluation
75+
MODEL=gpt-4o-mini
76+
# embedding model for evaluation
77+
EMBEDDING_MODEL=nomic-embed-text:latest
5378

5479
## Internet search & preference memory
80+
# Enable web search
5581
ENABLE_INTERNET=false
82+
# API key for BOCHA Search
5683
BOCHA_API_KEY= # required if ENABLE_INTERNET=true
84+
# default search mode
5785
SEARCH_MODE=fast # fast | fine | mixture
58-
FAST_GRAPH=false
59-
BM25_CALL=false
60-
VEC_COT_CALL=false
86+
# Slow retrieval strategy configuration, rewrite is the rewrite strategy
6187
FINE_STRATEGY=rewrite # rewrite | recreate | deep_search
62-
ENABLE_ACTIVATION_MEMORY=false
88+
# Whether to enable preference memory
6389
ENABLE_PREFERENCE_MEMORY=true
90+
# Preference Memory Add Mode
6491
PREFERENCE_ADDER_MODE=fast # fast | safe
92+
# Whether to deduplicate explicit preferences based on factual memory
6593
DEDUP_PREF_EXP_BY_TEXTUAL=false
6694

6795
## Reader chunking
@@ -72,101 +100,115 @@ MEM_READER_CHAT_CHUNK_SESS_SIZE=10 # sessions per chunk (default mode)
72100
MEM_READER_CHAT_CHUNK_OVERLAP=2 # overlap between chunks
73101

74102
## Scheduler (MemScheduler / API)
103+
# Enable or disable the main switch for configuring the memory scheduler during MemOS class initialization
75104
MOS_ENABLE_SCHEDULER=false
105+
# Determine the number of most relevant memory entries that the scheduler retrieves or processes during runtime (such as reordering or updating working memory)
76106
MOS_SCHEDULER_TOP_K=10
107+
# The time interval (in seconds) for updating "Activation Memory" (usually referring to caching or short-term memory mechanisms)
77108
MOS_SCHEDULER_ACT_MEM_UPDATE_INTERVAL=300
109+
# The size of the context window considered by the scheduler when processing tasks (such as the number of recent messages or conversation rounds)
78110
MOS_SCHEDULER_CONTEXT_WINDOW_SIZE=5
111+
# The maximum number of working threads allowed in the scheduler thread pool for concurrent task execution
79112
MOS_SCHEDULER_THREAD_POOL_MAX_WORKERS=10000
113+
# The polling interval (in seconds) for the scheduler to consume new messages/tasks from the queue. The smaller the value, the faster the response, but the CPU usage may be higher
80114
MOS_SCHEDULER_CONSUME_INTERVAL_SECONDS=0.01
115+
# Whether to enable the parallel distribution function of the scheduler to improve the throughput of concurrent operations
81116
MOS_SCHEDULER_ENABLE_PARALLEL_DISPATCH=true
117+
# The specific switch to enable or disable the "Activate Memory" function in the scheduler logic
82118
MOS_SCHEDULER_ENABLE_ACTIVATION_MEMORY=false
119+
# Control whether the scheduler instance is actually started during server initialization. If false, the scheduler object may be created but its background loop will not be started
83120
API_SCHEDULER_ON=true
121+
# Specifically define the window size for API search operations in OptimizedScheduler. It is passed to the ScherderrAPIModule to control the scope of the search context
84122
API_SEARCH_WINDOW_SIZE=5
123+
# Specify how many rounds of previous conversations (history) to retrieve and consider during the 'hybrid search' (fast search+asynchronous fine search). This helps provide context aware search results
85124
API_SEARCH_HISTORY_TURNS=5
86125

87126
## Graph / vector stores
127+
# Neo4j database selection mode
88128
NEO4J_BACKEND=neo4j-community # neo4j-community | neo4j | nebular | polardb
129+
# Neo4j database url
89130
NEO4J_URI=bolt://localhost:7687 # required when backend=neo4j*
131+
# Neo4j database user
90132
NEO4J_USER=neo4j # required when backend=neo4j*
133+
# Neo4j database password
91134
NEO4J_PASSWORD=12345678 # required when backend=neo4j*
135+
# Neo4j database name
92136
NEO4J_DB_NAME=neo4j # required for shared-db mode
137+
# Neo4j database data sharing with Memos
93138
MOS_NEO4J_SHARED_DB=false
94139
QDRANT_HOST=localhost
95140
QDRANT_PORT=6333
96141
# For Qdrant Cloud / remote endpoint (takes priority if set):
97142
QDRANT_URL=your_qdrant_url
98143
QDRANT_API_KEY=your_qdrant_key
144+
# milvus server uri
99145
MILVUS_URI=http://localhost:19530 # required when ENABLE_PREFERENCE_MEMORY=true
100146
MILVUS_USER_NAME=root # same as above
101147
MILVUS_PASSWORD=12345678 # same as above
102-
NEBULAR_HOSTS=["localhost"]
103-
NEBULAR_USER=root
104-
NEBULAR_PASSWORD=xxxxxx
105-
NEBULAR_SPACE=shared-tree-textual-memory
106-
NEBULAR_WORKING_MEMORY=20
107-
NEBULAR_LONGTERM_MEMORY=1000000
108-
NEBULAR_USER_MEMORY=1000000
109-
110-
## Relational DB (user manager / PolarDB)
111-
MOS_USER_MANAGER_BACKEND=sqlite # sqlite | mysql
112-
MYSQL_HOST=localhost # required when backend=mysql
113-
MYSQL_PORT=3306
114-
MYSQL_USERNAME=root
115-
MYSQL_PASSWORD=12345678
116-
MYSQL_DATABASE=memos_users
117-
MYSQL_CHARSET=utf8mb4
148+
149+
# PolarDB endpoint/host
118150
POLAR_DB_HOST=localhost
151+
# PolarDB port
119152
POLAR_DB_PORT=5432
153+
# PolarDB username
120154
POLAR_DB_USER=root
155+
# PolarDB password
121156
POLAR_DB_PASSWORD=123456
157+
# PolarDB database name
122158
POLAR_DB_DB_NAME=shared_memos_db
159+
# PolarDB Server Mode:
160+
# If set to true, use Multi-Database Mode where each user has their own independent database (physical isolation).
161+
# If set to false (default), use Shared Database Mode where all users share one database with logical isolation via username.
123162
POLAR_DB_USE_MULTI_DB=false
163+
# PolarDB connection pool size
164+
POLARDB_POOL_MAX_CONN=100
124165

125-
## Redis (scheduler queue) — fill only if you want scheduler queues in Redis; otherwise in-memory queue is used
126-
REDIS_HOST=localhost # global Redis endpoint (preferred over MEMSCHEDULER_*)
127-
REDIS_PORT=6379
128-
REDIS_DB=0
129-
REDIS_PASSWORD=
130-
REDIS_SOCKET_TIMEOUT=
131-
REDIS_SOCKET_CONNECT_TIMEOUT=
166+
## Related configurations of Redis
167+
# Reddimq sends scheduling information and synchronization information for some variables
132168
MEMSCHEDULER_REDIS_HOST= # fallback keys if not using the global ones
133169
MEMSCHEDULER_REDIS_PORT=
134170
MEMSCHEDULER_REDIS_DB=
135171
MEMSCHEDULER_REDIS_PASSWORD=
136172
MEMSCHEDULER_REDIS_TIMEOUT=
137173
MEMSCHEDULER_REDIS_CONNECT_TIMEOUT=
138174

139-
## MemScheduler LLM
140-
MEMSCHEDULER_OPENAI_API_KEY= # LLM key for scheduler’s own calls (OpenAI-compatible); leave empty if scheduler not using LLM
141-
MEMSCHEDULER_OPENAI_BASE_URL= # Base URL for the above; can reuse OPENAI_API_BASE
142-
MEMSCHEDULER_OPENAI_DEFAULT_MODEL=gpt-4o-mini
143175

144176
## Nacos (optional config center)
177+
# Nacos turns off long polling listening, defaults to true
145178
NACOS_ENABLE_WATCH=false
179+
# The monitoring interval for long rotation training is 60 seconds, and the default 30 seconds can be left unconfigured
146180
NACOS_WATCH_INTERVAL=60
181+
# nacos server address
147182
NACOS_SERVER_ADDR=
183+
# nacos dataid
148184
NACOS_DATA_ID=
185+
# nacos group
149186
NACOS_GROUP=DEFAULT_GROUP
187+
# nacos namespace
150188
NACOS_NAMESPACE=
189+
# nacos ak
151190
AK=
191+
# nacos sk
152192
SK=
153193

154-
## DingTalk bot & OSS upload
155-
ENABLE_DINGDING_BOT=false # set true -> fields below required
156-
DINGDING_ACCESS_TOKEN_USER=
157-
DINGDING_SECRET_USER=
158-
DINGDING_ACCESS_TOKEN_ERROR=
159-
DINGDING_SECRET_ERROR=
160-
DINGDING_ROBOT_CODE=
161-
DINGDING_APP_KEY=
162-
DINGDING_APP_SECRET=
163-
OSS_ENDPOINT= # bot image upload depends on OSS
164-
OSS_REGION=
165-
OSS_BUCKET_NAME=
166-
OSS_ACCESS_KEY_ID=
167-
OSS_ACCESS_KEY_SECRET=
168-
OSS_PUBLIC_BASE_URL=
169-
170-
## SDK / external client
171-
MEMOS_API_KEY=
172-
MEMOS_BASE_URL=https://memos.memtensor.cn/api/openmem/v1
194+
# chat model for chat api
195+
CHAT_MODEL_LIST='[{
196+
"backend": "deepseek",
197+
"api_base": "http://localhost:1234",
198+
"api_key": "your-api-key",
199+
"model_name_or_path": "deepseek-r1",
200+
"support_models": ["deepseek-r1"]
201+
}]'
202+
203+
# RabbitMQ host name for message-log pipeline
204+
MEMSCHEDULER_RABBITMQ_HOST_NAME=
205+
# RabbitMQ user name for message-log pipeline
206+
MEMSCHEDULER_RABBITMQ_USER_NAME=
207+
# RabbitMQ password for message-log pipeline
208+
MEMSCHEDULER_RABBITMQ_PASSWORD=
209+
# RabbitMQ virtual host for message-log pipeline
210+
MEMSCHEDULER_RABBITMQ_VIRTUAL_HOST=memos
211+
# Erase connection state on connect for message-log pipeline
212+
MEMSCHEDULER_RABBITMQ_ERASE_ON_CONNECT=true
213+
# RabbitMQ port for message-log pipeline
214+
MEMSCHEDULER_RABBITMQ_PORT=5672

docker/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,4 +32,4 @@ ENV PYTHONPATH=/app/src
3232
EXPOSE 8000
3333

3434
# Start the docker
35-
CMD ["uvicorn", "memos.api.product_api:app", "--host", "0.0.0.0", "--port", "8000", "--reload"]
35+
CMD ["uvicorn", "memos.api.server_api:app", "--host", "0.0.0.0", "--port", "8000", "--reload"]

0 commit comments

Comments
 (0)