Skip to content

Commit eb46380

Browse files
authored
Merge pull request #99 from redis/fix/mcp-search-missing-background-tasks
Fix MCP search_long_term_memory missing background_tasks parameter
2 parents c1c166a + c4480fb commit eb46380

File tree

12 files changed

+304
-44
lines changed

12 files changed

+304
-44
lines changed

README.md

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -159,6 +159,35 @@ uv run agent-memory mcp
159159
uv run agent-memory mcp --mode sse --port 9000 --no-worker
160160
```
161161

162+
### MCP config via uvx (recommended)
163+
164+
Use this in your MCP tool configuration (e.g., Claude Desktop mcp.json):
165+
166+
```json
167+
{
168+
"mcpServers": {
169+
"memory": {
170+
"command": "uvx",
171+
"args": ["--from", "agent-memory-server", "agent-memory", "mcp"],
172+
"env": {
173+
"DISABLE_AUTH": "true",
174+
"REDIS_URL": "redis://localhost:6379",
175+
"OPENAI_API_KEY": "<your-openai-key>"
176+
}
177+
}
178+
}
179+
}
180+
```
181+
182+
Notes:
183+
- API keys: Set either `OPENAI_API_KEY` (default models use OpenAI) or switch to Anthropic by setting `ANTHROPIC_API_KEY` and `GENERATION_MODEL` to an Anthropic model (e.g., `claude-3-5-haiku-20241022`).
184+
185+
- Make sure your MCP host can find `uvx` (on its PATH or by using an absolute command path).
186+
- macOS: `brew install uv`
187+
- If not on PATH, set `"command"` to the absolute path (e.g., `/opt/homebrew/bin/uvx` on Apple Silicon, `/usr/local/bin/uvx` on Intel macOS). On Linux, `~/.local/bin/uvx` is common. See https://docs.astral.sh/uv/getting-started/
188+
- For production, remove `DISABLE_AUTH` and configure proper authentication.
189+
190+
162191
## Documentation
163192

164193
📚 **[Full Documentation](https://redis.github.io/agent-memory-server/)** - Complete guides, API reference, and examples

agent_memory_server/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
"""Redis Agent Memory Server - A memory system for conversational AI."""
22

3-
__version__ = "0.12.4"
3+
__version__ = "0.12.5"

agent_memory_server/config.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,7 @@ class Settings(BaseSettings):
235235
# Cloud
236236
## Cloud region
237237
region_name: str | None = None
238+
238239
## AWS Cloud credentials
239240
aws_access_key_id: str | None = None
240241
aws_secret_access_key: str | None = None

agent_memory_server/extraction.py

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@
55
import ulid
66
from tenacity.asyncio import AsyncRetrying
77
from tenacity.stop import stop_after_attempt
8-
from transformers import AutoModelForTokenClassification, AutoTokenizer, pipeline
98

9+
# Lazy-import transformers in get_ner_model to avoid heavy deps at startup
1010
from agent_memory_server.config import settings
1111
from agent_memory_server.filters import DiscreteMemoryExtracted, MemoryType
1212
from agent_memory_server.llms import (
@@ -61,9 +61,27 @@ def get_ner_model() -> Any:
6161
"""
6262
global _ner_model, _ner_tokenizer
6363
if _ner_model is None:
64+
# Lazy import to avoid importing heavy ML frameworks at process startup
65+
try:
66+
from transformers import (
67+
AutoModelForTokenClassification,
68+
AutoTokenizer,
69+
pipeline as hf_pipeline,
70+
)
71+
except Exception as e:
72+
logger.warning(
73+
"Transformers not available or failed to import; NER disabled: %s", e
74+
)
75+
raise
76+
6477
_ner_tokenizer = AutoTokenizer.from_pretrained(settings.ner_model)
6578
_ner_model = AutoModelForTokenClassification.from_pretrained(settings.ner_model)
66-
return pipeline("ner", model=_ner_model, tokenizer=_ner_tokenizer)
79+
return hf_pipeline("ner", model=_ner_model, tokenizer=_ner_tokenizer)
80+
81+
# If already initialized, import the lightweight symbol and return a new pipeline
82+
from transformers import pipeline as hf_pipeline # type: ignore
83+
84+
return hf_pipeline("ner", model=_ner_model, tokenizer=_ner_tokenizer)
6785

6886

6987
def extract_entities(text: str) -> list[str]:

agent_memory_server/long_term_memory.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -893,6 +893,25 @@ async def search_long_term_memories(
893893
Returns:
894894
MemoryRecordResults containing matching memories
895895
"""
896+
# If no query text is provided, perform a filter-only listing (no semantic search).
897+
# This enables patterns like: "return all memories for this user/namespace".
898+
if not (text or "").strip():
899+
adapter = await get_vectorstore_adapter()
900+
return await adapter.list_memories(
901+
session_id=session_id,
902+
user_id=user_id,
903+
namespace=namespace,
904+
created_at=created_at,
905+
last_accessed=last_accessed,
906+
topics=topics,
907+
entities=entities,
908+
memory_type=memory_type,
909+
event_date=event_date,
910+
memory_hash=memory_hash,
911+
limit=limit,
912+
offset=offset,
913+
)
914+
896915
# Optimize query for vector search if requested.
897916
search_query = text
898917
optimized_applied = False

agent_memory_server/mcp.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -521,8 +521,12 @@ async def search_long_term_memory(
521521
limit=limit,
522522
offset=offset,
523523
)
524+
# Create a background tasks instance for the MCP call
525+
from agent_memory_server.dependencies import HybridBackgroundTasks
526+
527+
background_tasks = HybridBackgroundTasks()
524528
results = await core_search_long_term_memory(
525-
payload, optimize_query=optimize_query
529+
payload, background_tasks=background_tasks, optimize_query=optimize_query
526530
)
527531
return MemoryRecordResults(
528532
total=results.total,

agent_memory_server/vectorstore_adapter.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -403,6 +403,12 @@ def parse_datetime(dt_val: str | float | None) -> datetime | None:
403403
# Unix timestamp from Redis
404404
return datetime.fromtimestamp(dt_val, tz=UTC)
405405
if isinstance(dt_val, str):
406+
# Try to parse as float first (Unix timestamp as string)
407+
try:
408+
timestamp = float(dt_val)
409+
return datetime.fromtimestamp(timestamp, tz=UTC)
410+
except ValueError:
411+
pass
406412
# ISO string from other backends
407413
return datetime.fromisoformat(dt_val)
408414
return None

docker-compose.yml

Lines changed: 18 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,12 @@ services:
1818
image: redislabs/agent-memory-server:${REDIS_AGENT_MEMORY_VERSION:-latest}
1919
ports:
2020
- "8000:8000"
21+
env_file:
22+
- path: .env
23+
required: false
2124
environment:
2225
- REDIS_URL=redis://redis:6379
2326
- PORT=8000
24-
- OPENAI_API_KEY=${OPENAI_API_KEY}
25-
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
2627
- GENERATION_MODEL=gpt-4o-mini
2728
- EMBEDDING_MODEL=text-embedding-3-small
2829
- LONG_TERM_MEMORY=True
@@ -44,11 +45,12 @@ services:
4445
mcp:
4546
profiles: ["standard", ""]
4647
image: redislabs/agent-memory-server:${REDIS_AGENT_MEMORY_VERSION:-latest}
48+
env_file:
49+
- path: .env
50+
required: false
4751
environment:
4852
- REDIS_URL=redis://redis:6379
4953
- PORT=9050
50-
- OPENAI_API_KEY=${OPENAI_API_KEY}
51-
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
5254
- GENERATION_MODEL=gpt-4o-mini
5355
- EMBEDDING_MODEL=text-embedding-3-small
5456
- LONG_TERM_MEMORY=True
@@ -66,10 +68,11 @@ services:
6668
task-worker:
6769
profiles: ["standard", ""]
6870
image: redislabs/agent-memory-server:${REDIS_AGENT_MEMORY_VERSION:-latest}
71+
env_file:
72+
- path: .env
73+
required: false
6974
environment:
7075
- REDIS_URL=redis://redis:6379
71-
- OPENAI_API_KEY=${OPENAI_API_KEY}
72-
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
7376
- GENERATION_MODEL=gpt-4o-mini
7477
- EMBEDDING_MODEL=text-embedding-3-small
7578
- LONG_TERM_MEMORY=True
@@ -94,16 +97,12 @@ services:
9497
image: redislabs/agent-memory-server-aws:${REDIS_AGENT_MEMORY_AWS_VERSION:-latest}
9598
ports:
9699
- "8000:8000"
100+
env_file:
101+
- path: .env
102+
required: false
97103
environment:
98104
- REDIS_URL=redis://redis:6379
99105
- PORT=8000
100-
- OPENAI_API_KEY=${OPENAI_API_KEY}
101-
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
102-
# AWS Bedrock configuration
103-
- REGION_NAME=${REGION_NAME}
104-
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
105-
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
106-
- AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN}
107106
- GENERATION_MODEL=anthropic.claude-haiku-4-5-20251001-v1:0
108107
- EMBEDDING_MODEL=amazon.titan-embed-text-v2:0
109108
- LONG_TERM_MEMORY=True
@@ -125,16 +124,12 @@ services:
125124
mcp-aws:
126125
profiles: ["aws"]
127126
image: redislabs/agent-memory-server-aws:${REDIS_AGENT_MEMORY_AWS_VERSION:-latest}
127+
env_file:
128+
- path: .env
129+
required: false
128130
environment:
129131
- REDIS_URL=redis://redis:6379
130132
- PORT=9050
131-
- OPENAI_API_KEY=${OPENAI_API_KEY}
132-
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
133-
# AWS Bedrock configuration
134-
- REGION_NAME=${REGION_NAME}
135-
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
136-
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
137-
- AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN}
138133
- GENERATION_MODEL=anthropic.claude-haiku-4-5-20251001-v1:0
139134
- EMBEDDING_MODEL=amazon.titan-embed-text-v2:0
140135
- LONG_TERM_MEMORY=True
@@ -152,15 +147,11 @@ services:
152147
task-worker-aws:
153148
profiles: ["aws"]
154149
image: redislabs/agent-memory-server-aws:${REDIS_AGENT_MEMORY_AWS_VERSION:-latest}
150+
env_file:
151+
- path: .env
152+
required: false
155153
environment:
156154
- REDIS_URL=redis://redis:6379
157-
- OPENAI_API_KEY=${OPENAI_API_KEY}
158-
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
159-
# AWS Bedrock configuration
160-
- REGION_NAME=${REGION_NAME}
161-
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
162-
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
163-
- AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN}
164155
- GENERATION_MODEL=anthropic.claude-haiku-4-5-20251001-v1:0
165156
- EMBEDDING_MODEL=amazon.titan-embed-text-v2:0
166157
- LONG_TERM_MEMORY=True

docs/getting-started.md

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,32 @@ uv run agent-memory mcp --mode sse --no-worker
4848
uv run agent-memory mcp --mode sse
4949
```
5050

51+
### Using uvx in MCP clients
52+
53+
When configuring MCP-enabled apps (e.g., Claude Desktop), prefer `uvx` so the app can run the server without a local checkout:
54+
55+
```json
56+
{
57+
"mcpServers": {
58+
"memory": {
59+
"command": "uvx",
60+
"args": ["--from", "agent-memory-server", "agent-memory", "mcp"],
61+
"env": {
62+
"DISABLE_AUTH": "true",
63+
"REDIS_URL": "redis://localhost:6379",
64+
"OPENAI_API_KEY": "<your-openai-key>"
65+
}
66+
}
67+
}
68+
}
69+
```
70+
71+
Notes:
72+
- API keys: Default models use OpenAI. Set `OPENAI_API_KEY`. To use Anthropic instead, set `ANTHROPIC_API_KEY` and also `GENERATION_MODEL` to an Anthropic model (e.g., `claude-3-5-haiku-20241022`).
73+
- Make sure your MCP host can find `uvx` (on its PATH or by using an absolute command path). macOS: `brew install uv`. If not on PATH, set `"command"` to an absolute path (e.g., `/opt/homebrew/bin/uvx` on Apple Silicon, `/usr/local/bin/uvx` on Intel macOS).
74+
- For production, remove `DISABLE_AUTH` and configure auth.
75+
76+
5177
**For production deployments**, you'll need to run a separate worker process:
5278

5379
```bash

docs/mcp.md

Lines changed: 46 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -67,29 +67,68 @@ You can use the MCP server that comes with this project in any application or SD
6767

6868
<img src="../claude.png">
6969

70-
For example, with Claude, use the following configuration:
70+
For Claude, the easiest way is to use uvx (recommended):
7171

7272
```json
7373
{
7474
"mcpServers": {
75-
"redis-memory-server": {
75+
"memory": {
76+
"command": "uvx",
77+
"args": ["--from", "agent-memory-server", "agent-memory", "mcp"],
78+
"env": {
79+
"DISABLE_AUTH": "true",
80+
"REDIS_URL": "redis://localhost:6379",
81+
"OPENAI_API_KEY": "<your-openai-key>"
82+
}
83+
}
84+
}
85+
}
86+
```
87+
88+
Notes:
89+
- API keys: Default models use OpenAI. Set `OPENAI_API_KEY`. To use Anthropic instead, set `ANTHROPIC_API_KEY` and also `GENERATION_MODEL` to an Anthropic model (e.g., `claude-3-5-haiku-20241022`).
90+
- Make sure your MCP host can find `uvx` (on its PATH or by using an absolute command path).
91+
- macOS: `brew install uv`
92+
- If not on PATH, set `"command"` to an absolute path (e.g., `/opt/homebrew/bin/uvx` on Apple Silicon, `/usr/local/bin/uvx` on Intel macOS). On Linux, `~/.local/bin/uvx` is common. See https://docs.astral.sh/uv/getting-started/ for distro specifics
93+
- Set `DISABLE_AUTH=false` in production and configure proper auth per the Authentication guide.
94+
95+
If you’re running from a local checkout instead of PyPI, you can use `uv run` with a directory:
96+
97+
```json
98+
{
99+
"mcpServers": {
100+
"memory": {
76101
"command": "uv",
77102
"args": [
78103
"--directory",
79104
"/ABSOLUTE/PATH/TO/REPO/DIRECTORY/agent-memory-server",
80105
"run",
81106
"agent-memory",
82-
"mcp",
83-
"--mode",
84-
"stdio"
107+
"mcp"
85108
]
86109
}
87110
}
88111
}
89112
```
90113

91-
**NOTE:** On a Mac, this configuration requires that you use `brew install uv` to install uv. Probably any method that makes the `uv`
92-
command globally accessible, so Claude can find it, would work.
114+
Alternative (Anthropic):
115+
116+
```json
117+
{
118+
"mcpServers": {
119+
"memory": {
120+
"command": "uvx",
121+
"args": ["--from", "agent-memory-server", "agent-memory", "mcp"],
122+
"env": {
123+
"DISABLE_AUTH": "true",
124+
"REDIS_URL": "redis://localhost:6379",
125+
"ANTHROPIC_API_KEY": "<your-anthropic-key>",
126+
"GENERATION_MODEL": "claude-3-5-haiku-20241022"
127+
}
128+
}
129+
}
130+
}
131+
```
93132

94133
### Cursor
95134

0 commit comments

Comments
 (0)