Skip to content

Commit eb91fb4

Browse files
committed
Clean up vector store examples
1 parent f858a07 commit eb91fb4

File tree

2 files changed

+11
-133
lines changed

2 files changed

+11
-133
lines changed

docs/contextual-grounding.md

Lines changed: 4 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ With grounding: "John was really helpful with the project"
3030
Replaces pronouns with their actual referents from conversation context.
3131

3232
**Examples:**
33+
3334
- "He likes coffee" → "John likes coffee"
3435
- "She recommended the book" → "Sarah recommended the book"
3536
- "They are meeting tomorrow" → "Alice and Bob are meeting tomorrow"
@@ -40,6 +41,7 @@ Replaces pronouns with their actual referents from conversation context.
4041
Converts relative time references to specific dates and times.
4142

4243
**Examples:**
44+
4345
- "Yesterday" → "January 15, 2024"
4446
- "Last week" → "The week of January 8-14, 2024"
4547
- "Tomorrow" → "January 17, 2024"
@@ -50,6 +52,7 @@ Converts relative time references to specific dates and times.
5052
Resolves location references to specific places mentioned in context.
5153

5254
**Examples:**
55+
5356
- "That place" → "Starbucks on Main Street"
5457
- "There" → "The office conference room"
5558
- "Here" → "The user's home office"
@@ -59,6 +62,7 @@ Resolves location references to specific places mentioned in context.
5962
Links vague references to specific entities from the conversation.
6063

6164
**Examples:**
65+
6266
- "The project" → "The website redesign project"
6367
- "The meeting" → "The quarterly review meeting"
6468
- "The document" → "The project proposal document"
@@ -229,29 +233,6 @@ grounding_quality = {
229233
}
230234
```
231235

232-
## Best Practices
233-
234-
### Conversation Design
235-
236-
1. **Provide context early**: Introduce entities, people, and places clearly
237-
2. **Use specific names**: Avoid excessive pronoun use when clarity matters
238-
3. **Maintain conversation threads**: Keep related discussions in the same session
239-
4. **Include temporal markers**: Use specific dates when discussing events
240-
241-
### Memory Quality
242-
243-
1. **Review extracted memories**: Check that grounding resolved references correctly
244-
2. **Provide feedback**: Use memory editing to correct grounding errors
245-
3. **Monitor patterns**: Identify common grounding failures for improvement
246-
4. **Test edge cases**: Verify grounding works with complex conversations
247-
248-
### Performance Optimization
249-
250-
1. **Limit conversation history**: Very long conversations may impact grounding quality
251-
2. **Use appropriate models**: Balance accuracy vs. speed based on your needs
252-
3. **Monitor token usage**: Grounding requires additional context tokens
253-
4. **Cache frequently referenced entities**: Consistent entity names improve grounding
254-
255236
## Troubleshooting
256237

257238
### Common Issues

docs/vector-store-advanced.md

Lines changed: 7 additions & 110 deletions
Original file line numberDiff line numberDiff line change
@@ -4,110 +4,7 @@ This guide covers advanced configuration patterns, performance optimization, cus
44

55
## Advanced Factory Patterns
66

7-
### Multi-Environment Factory
87

9-
Create factories that adapt to different environments:
10-
11-
```python
12-
# my_vectorstores.py
13-
import os
14-
from langchain_core.embeddings import Embeddings
15-
from langchain_redis import Redis as LangchainRedis
16-
from langchain_chroma import Chroma
17-
from langchain_pinecone import PineconeVectorStore
18-
19-
def create_adaptive_vectorstore(embeddings: Embeddings) -> VectorStore:
20-
"""Dynamically choose vectorstore based on environment."""
21-
22-
environment = os.getenv("ENVIRONMENT", "development")
23-
24-
if environment == "production":
25-
# Use Pinecone for production
26-
return PineconeVectorStore(
27-
index_name=os.getenv("PINECONE_INDEX_NAME"),
28-
embedding=embeddings,
29-
api_key=os.getenv("PINECONE_API_KEY"),
30-
environment=os.getenv("PINECONE_ENVIRONMENT")
31-
)
32-
elif environment == "staging":
33-
# Use Redis for staging
34-
return LangchainRedis(
35-
redis_url=os.getenv("REDIS_URL"),
36-
index_name="staging_memories",
37-
embeddings=embeddings
38-
)
39-
else:
40-
# Use Chroma for development
41-
return Chroma(
42-
persist_directory="./dev_chroma_data",
43-
collection_name="dev_memories",
44-
embedding_function=embeddings
45-
)
46-
```
47-
48-
### High-Availability Factory
49-
50-
Create factories with resilience and failover capabilities:
51-
52-
```python
53-
# resilient_factory.py
54-
import os
55-
from langchain_core.embeddings import Embeddings
56-
from langchain_core.vectorstores import VectorStore
57-
58-
def create_resilient_vectorstore(embeddings: Embeddings) -> VectorStore:
59-
"""Create vectorstore with built-in resilience patterns."""
60-
61-
# Try multiple backends in order of preference
62-
backend_preferences = [
63-
("redis", _create_redis_backend),
64-
("chroma", _create_chroma_backend),
65-
("memory", _create_memory_backend) # Fallback to in-memory
66-
]
67-
68-
last_error = None
69-
for backend_name, factory_func in backend_preferences:
70-
try:
71-
vectorstore = factory_func(embeddings)
72-
print(f"Successfully initialized {backend_name} vectorstore")
73-
return vectorstore
74-
except Exception as e:
75-
print(f"Failed to initialize {backend_name}: {e}")
76-
last_error = e
77-
continue
78-
79-
raise Exception(f"All vectorstore backends failed. Last error: {last_error}")
80-
81-
def _create_redis_backend(embeddings: Embeddings) -> VectorStore:
82-
"""Try Redis with connection validation."""
83-
from langchain_redis import Redis as LangchainRedis
84-
85-
vectorstore = LangchainRedis(
86-
redis_url=os.getenv("REDIS_URL", "redis://localhost:6379"),
87-
index_name="resilient_memories",
88-
embeddings=embeddings
89-
)
90-
91-
# Validate connection
92-
vectorstore.client.ping()
93-
return vectorstore
94-
95-
def _create_chroma_backend(embeddings: Embeddings) -> VectorStore:
96-
"""Fallback to Chroma."""
97-
from langchain_chroma import Chroma
98-
99-
return Chroma(
100-
persist_directory=os.getenv("BACKUP_PERSIST_DIR", "./backup_chroma"),
101-
collection_name="backup_memories",
102-
embedding_function=embeddings
103-
)
104-
105-
def _create_memory_backend(embeddings: Embeddings) -> VectorStore:
106-
"""Final fallback to in-memory store."""
107-
from langchain_core.vectorstores import InMemoryVectorStore
108-
109-
return InMemoryVectorStore(embeddings)
110-
```
1118

1129
### Multi-Backend Hybrid Factory
11310

@@ -616,17 +513,17 @@ class VectorStoreMigrator:
616513
return await self.import_memories(memories, batch_size)
617514

618515
# Usage example
619-
async def migrate_redis_to_pinecone():
620-
"""Example: Migrate from Redis to Pinecone."""
516+
async def migrate_pinecone_to_redis():
517+
"""Example: Migrate from Pinecone to Redis."""
621518

622-
# Source (Redis)
519+
# Source (Pinecone)
623520
source_client = MemoryAPIClient(
624-
base_url="http://localhost:8000", # Current Redis setup
521+
base_url="http://localhost:8000", # Current Pinecone setup
625522
)
626523

627-
# Target (Pinecone) - Temporarily switch backend
524+
# Target (Redis) - New Redis-based setup
628525
target_client = MemoryAPIClient(
629-
base_url="http://localhost:8001", # New Pinecone setup
526+
base_url="http://localhost:8001", # New Redis setup
630527
)
631528

632529
migrator = VectorStoreMigrator(source_client, target_client)
@@ -637,7 +534,7 @@ async def migrate_redis_to_pinecone():
637534

638535
# Option 2: File-based migration (safer for large datasets)
639536
await migrator.export_to_file("memory_export.json")
640-
# ... Stop old server, start new server with Pinecone backend ...
537+
# ... Stop old server, start new server with Redis backend ...
641538
imported = await migrator.import_from_file("memory_export.json")
642539
print(f"Imported {imported} memories from file")
643540
```

0 commit comments

Comments
 (0)