Skip to content

Commit a8257ba

Browse files
improve imports, tests, and rename things
1 parent 3b907a5 commit a8257ba

File tree

17 files changed

+131
-249
lines changed

17 files changed

+131
-249
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ We're excited to announce the support for **RedisVL Extensions**. These modules
238238
Increase application throughput and reduce the cost of using LLM models in production by leveraging previously generated knowledge with the [`SemanticCache`](https://docs.redisvl.com/en/stable/api/cache.html#semanticcache).
239239
240240
```python
241-
from redisvl.extensions.llmcache import SemanticCache
241+
from redisvl.extensions.cache.llm import SemanticCache
242242
243243
# init cache with TTL and semantic distance threshold
244244
llmcache = SemanticCache(

docs/user_guide/03_llmcache.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@
8888
}
8989
],
9090
"source": [
91-
"from redisvl.extensions.llmcache import SemanticCache\n",
91+
"from redisvl.extensions.cache.llm import SemanticCache\n",
9292
"\n",
9393
"llmcache = SemanticCache(\n",
9494
" name=\"llmcache\", # underlying search index name\n",

docs/user_guide/04_vectorizers.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -609,7 +609,7 @@
609609
"metadata": {},
610610
"outputs": [],
611611
"source": [
612-
"from redisvl.extensions.llmcache import SemanticCache\n",
612+
"from redisvl.extensions.cache.llm import SemanticCache\n",
613613
"\n",
614614
"cache = SemanticCache(name=\"custom_cache\", vectorizer=custom_vectorizer)\n",
615615
"\n",

docs/user_guide/09_threshold_optimization.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
"metadata": {},
2525
"outputs": [],
2626
"source": [
27-
"from redisvl.extensions.llmcache import SemanticCache\n",
27+
"from redisvl.extensions.cache.llm import SemanticCache\n",
2828
"\n",
2929
"sem_cache = SemanticCache(\n",
3030
" name=\"sem_cache\", # underlying search index name\n",

docs/user_guide/release_guide/0_5_0_release.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,7 @@
248248
],
249249
"source": [
250250
"from redisvl.utils.optimize import CacheThresholdOptimizer\n",
251-
"from redisvl.extensions.llmcache import SemanticCache\n",
251+
"from redisvl.extensions.cache.llm import SemanticCache\n",
252252
"\n",
253253
"sem_cache = SemanticCache(\n",
254254
" name=\"sem_cache\", # underlying search index name\n",

redisvl/extensions/__init__.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1 @@
1-
"""
2-
Redis Vector Library Extensions
31

4-
These extensions provide additional functionality on top of the
5-
core RedisVL functionality.
6-
"""

redisvl/extensions/cache/base.py

Lines changed: 35 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@ class BaseCache:
1717
including TTL management, connection handling, and basic cache operations.
1818
"""
1919

20+
_redis_client: Optional[Redis]
21+
_async_redis_client: Optional[AsyncRedis]
22+
2023
def __init__(
2124
self,
2225
name: str,
@@ -47,12 +50,15 @@ def __init__(
4750
"connection_kwargs": connection_kwargs,
4851
}
4952

53+
# Initialize Redis clients
54+
self._async_redis_client = None
55+
5056
if redis_client:
5157
self._owns_redis_client = False
5258
self._redis_client = redis_client
5359
else:
5460
self._owns_redis_client = True
55-
self._redis_client = None
61+
self._redis_client = None # type: ignore
5662

5763
def _get_prefix(self) -> str:
5864
"""Get the key prefix for Redis keys.
@@ -102,9 +108,10 @@ def _get_redis_client(self) -> Redis:
102108
Redis: A Redis client instance.
103109
"""
104110
if self._redis_client is None:
105-
self._redis_client = Redis.from_url(
106-
self.redis_kwargs["redis_url"], **self.redis_kwargs["connection_kwargs"]
107-
)
111+
# Create new Redis client
112+
url = self.redis_kwargs["redis_url"]
113+
kwargs = self.redis_kwargs["connection_kwargs"]
114+
self._redis_client = Redis.from_url(url, **kwargs) # type: ignore
108115
return self._redis_client
109116

110117
async def _get_async_redis_client(self) -> AsyncRedis:
@@ -114,9 +121,10 @@ async def _get_async_redis_client(self) -> AsyncRedis:
114121
AsyncRedis: An async Redis client instance.
115122
"""
116123
if not hasattr(self, "_async_redis_client") or self._async_redis_client is None:
117-
self._async_redis_client = AsyncRedis.from_url(
118-
self.redis_kwargs["redis_url"], **self.redis_kwargs["connection_kwargs"]
119-
)
124+
# Create new async Redis client
125+
url = self.redis_kwargs["redis_url"]
126+
kwargs = self.redis_kwargs["connection_kwargs"]
127+
self._async_redis_client = AsyncRedis.from_url(url, **kwargs) # type: ignore
120128
return self._async_redis_client
121129

122130
def expire(self, key: str, ttl: Optional[int] = None) -> None:
@@ -161,25 +169,29 @@ def clear(self) -> None:
161169
prefix = self._get_prefix()
162170

163171
# Scan for all keys with our prefix
164-
cursor = "0"
165-
while cursor != 0:
166-
cursor, keys = client.scan(cursor=cursor, match=f"{prefix}*", count=100)
172+
cursor = 0 # Start with cursor 0
173+
while True:
174+
cursor_int, keys = client.scan(cursor=cursor, match=f"{prefix}*", count=100) # type: ignore
167175
if keys:
168176
client.delete(*keys)
177+
if cursor_int == 0: # Redis returns 0 when scan is complete
178+
break
179+
cursor = cursor_int # Update cursor for next iteration
169180

170181
async def aclear(self) -> None:
171182
"""Async clear the cache of all keys."""
172183
client = await self._get_async_redis_client()
173184
prefix = self._get_prefix()
174185

175186
# Scan for all keys with our prefix
176-
cursor = "0"
177-
while cursor != 0:
178-
cursor, keys = await client.scan(
179-
cursor=cursor, match=f"{prefix}*", count=100
180-
)
187+
cursor = 0 # Start with cursor 0
188+
while True:
189+
cursor_int, keys = await client.scan(cursor=cursor, match=f"{prefix}*", count=100) # type: ignore
181190
if keys:
182191
await client.delete(*keys)
192+
if cursor_int == 0: # Redis returns 0 when scan is complete
193+
break
194+
cursor = cursor_int # Update cursor for next iteration
183195

184196
def disconnect(self) -> None:
185197
"""Disconnect from Redis."""
@@ -188,11 +200,12 @@ def disconnect(self) -> None:
188200

189201
if self._redis_client:
190202
self._redis_client.close()
191-
self._redis_client = None
203+
self._redis_client = None # type: ignore
192204

193205
if hasattr(self, "_async_redis_client") and self._async_redis_client:
194-
self._async_redis_client.close()
195-
self._async_redis_client = None
206+
# Use synchronous close for async client in synchronous context
207+
self._async_redis_client.close() # type: ignore
208+
self._async_redis_client = None # type: ignore
196209

197210
async def adisconnect(self) -> None:
198211
"""Async disconnect from Redis."""
@@ -201,8 +214,9 @@ async def adisconnect(self) -> None:
201214

202215
if self._redis_client:
203216
self._redis_client.close()
204-
self._redis_client = None
217+
self._redis_client = None # type: ignore
205218

206219
if hasattr(self, "_async_redis_client") and self._async_redis_client:
207-
await self._async_redis_client.aclose()
208-
self._async_redis_client = None
220+
# Use proper async close method
221+
await self._async_redis_client.aclose() # type: ignore
222+
self._async_redis_client = None # type: ignore

redisvl/extensions/cache/embeddings/__init__.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,7 @@
44
This module provides embedding caching functionality for RedisVL.
55
"""
66

7-
from redisvl.extensions.cache.embeddings.base import BaseEmbeddingsCache
87
from redisvl.extensions.cache.embeddings.embeddings import EmbeddingsCache
98
from redisvl.extensions.cache.embeddings.schema import CacheEntry
109

11-
__all__ = ["BaseEmbeddingsCache", "EmbeddingsCache", "CacheEntry"]
10+
__all__ = ["EmbeddingsCache", "CacheEntry"]

redisvl/extensions/cache/embeddings/embeddings.py

Lines changed: 16 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,6 @@
1-
"""Embeddings cache implementation for RedisVL.
1+
"""Embeddings cache implementation for RedisVL."""
22

3-
This module provides a concrete implementation of the BaseEmbeddingsCache that
4-
stores and retrieves embedding vectors with exact key matching.
5-
"""
6-
7-
from typing import Any, Dict, List, Optional
3+
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
84

95
from redis import Redis
106

@@ -83,8 +79,18 @@ def _prepare_entry_data(
8379
model_name: str,
8480
embedding: List[float],
8581
metadata: Optional[Dict[str, Any]] = None,
86-
) -> Dict[str, Any]:
87-
"""Prepare data for storage in Redis"""
82+
) -> Tuple[str, Dict[str, Any]]:
83+
"""Prepare data for storage in Redis
84+
85+
Args:
86+
text (str): The text input that was embedded.
87+
model_name (str): The name of the embedding model.
88+
embedding (List[float]): The embedding vector.
89+
metadata (Optional[Dict[str, Any]]): Optional metadata.
90+
91+
Returns:
92+
Tuple[str, Dict[str, Any]]: A tuple of (key, entry_data)
93+
"""
8894
# Create cache entry with entry_id
8995
entry_id = self._make_entry_id(text, model_name)
9096
key = self._make_key(entry_id)
@@ -97,46 +103,6 @@ def _prepare_entry_data(
97103
)
98104
return key, entry.to_dict()
99105

100-
def clear(self) -> None:
101-
"""Clear the cache of all keys.
102-
103-
Removes all entries from the cache that match the cache prefix.
104-
105-
.. code-block:: python
106-
107-
cache.clear()
108-
"""
109-
client = self._get_redis_client()
110-
111-
# Scan for all keys with our prefix
112-
cursor = "0"
113-
while cursor != 0:
114-
cursor, keys = client.scan(
115-
cursor=cursor, match=f"{self.prefix}*", count=100
116-
)
117-
if keys:
118-
client.delete(*keys)
119-
120-
async def aclear(self) -> None:
121-
"""Async clear the cache of all keys.
122-
123-
Asynchronously removes all entries from the cache that match the cache prefix.
124-
125-
.. code-block:: python
126-
127-
await cache.aclear()
128-
"""
129-
client = await self._get_async_redis_client()
130-
131-
# Scan for all keys with our prefix
132-
cursor = "0"
133-
while cursor != 0:
134-
cursor, keys = await client.scan(
135-
cursor=cursor, match=f"{self.prefix}*", count=100
136-
)
137-
if keys:
138-
await client.delete(*keys)
139-
140106
def get(
141107
self,
142108
text: str,
@@ -284,7 +250,7 @@ def set(
284250

285251
# Store in Redis
286252
client = self._get_redis_client()
287-
client.hset(name=key, mapping=cache_entry)
253+
client.hset(name=key, mapping=cache_entry) # type: ignore
288254

289255
# Set TTL if specified
290256
self.expire(key, ttl)
@@ -329,7 +295,7 @@ async def aset(
329295

330296
# Store in Redis
331297
client = await self._get_async_redis_client()
332-
await client.hset(name=key, mapping=cache_entry)
298+
await client.hset(name=key, mapping=cache_entry) # type: ignore
333299

334300
# Set TTL if specified
335301
await self.aexpire(key, ttl)

redisvl/extensions/cache/embeddings/schema.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,6 @@ class CacheEntry(BaseModel):
2323
"""The name of the embedding model used"""
2424
embedding: List[float]
2525
"""The embedding vector representation"""
26-
dimensions: int
27-
"""Number of dimensions in the embedding"""
2826
inserted_at: float = Field(default_factory=current_timestamp)
2927
"""Timestamp of when the entry was added to the cache"""
3028
metadata: Optional[Dict[str, Any]] = Field(default=None)
@@ -44,6 +42,8 @@ def deserialize_cache_entry(cls, values: Dict[str, Any]) -> Dict[str, Any]:
4442
):
4543
values[EMBEDDING_FIELD_NAME] = deserialize(values[EMBEDDING_FIELD_NAME])
4644

45+
return values
46+
4747
def to_dict(self) -> Dict[str, Any]:
4848
"""Convert the cache entry to a dictionary for storage"""
4949
data = self.model_dump(exclude_none=True)

0 commit comments

Comments
 (0)