Skip to content

Commit 08e8b97

Browse files
refactor: replace lru_cache with async_lru for improved caching and performance; remove unused streams endpoint; update manifest to include dynamic catalogs
1 parent d401a30 commit 08e8b97

File tree

8 files changed

+65
-55
lines changed

8 files changed

+65
-55
lines changed

app/api/endpoints/manifest.py

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
from async_lru import alru_cache
2+
from fastapi import Response
13
from fastapi.routing import APIRouter
24

35
from app.core.config import settings
@@ -15,10 +17,7 @@ def get_base_manifest():
1517
"name": settings.ADDON_NAME,
1618
"description": "Movie and series recommendations based on your Stremio library",
1719
"logo": "https://raw.githubusercontent.com/TimilsinaBimal/Watchly/refs/heads/main/static/logo.png",
18-
"resources": [
19-
{"name": "catalog", "types": ["movie", "series"], "idPrefixes": ["tt"]},
20-
{"name": "stream", "types": ["movie", "series"], "idPrefixes": ["tt"]},
21-
],
20+
"resources": [{"name": "catalog", "types": ["movie", "series"], "idPrefixes": ["tt"]}],
2221
"types": ["movie", "series"],
2322
"idPrefixes": ["tt"],
2423
"catalogs": [
@@ -29,32 +28,40 @@ def get_base_manifest():
2928
}
3029

3130

31+
# Cache catalog definitions for 1 hour (3600s)
32+
@alru_cache(maxsize=1000, ttl=3600)
3233
async def fetch_catalogs(token: str | None = None):
3334
if not token:
3435
return []
36+
3537
credentials = await resolve_user_credentials(token)
3638
stremio_service = StremioService(
3739
username=credentials.get("username") or "",
3840
password=credentials.get("password") or "",
3941
auth_key=credentials.get("authKey"),
4042
)
43+
# Note: get_library_items is expensive, but we need it to determine *which* genre catalogs to show.
4144
library_items = await stremio_service.get_library_items()
4245
dynamic_catalog_service = DynamicCatalogService(stremio_service=stremio_service)
46+
47+
# Base catalogs are already in manifest, these are *extra* dynamic ones
4348
catalogs = await dynamic_catalog_service.get_watched_loved_catalogs(library_items=library_items)
4449
catalogs += await dynamic_catalog_service.get_genre_based_catalogs(library_items=library_items)
50+
4551
return catalogs
4652

4753

4854
@router.get("/manifest.json")
4955
@router.get("/{token}/manifest.json")
50-
async def manifest(token: str | None = None):
56+
async def manifest(response: Response, token: str | None = None):
5157
"""Stremio manifest endpoint with optional credential token in the path."""
5258
# Cache manifest for 1 day (86400 seconds)
53-
# response.headers["Cache-Control"] = "public, max-age=86400"
59+
response.headers["Cache-Control"] = "public, max-age=86400"
5460

5561
base_manifest = get_base_manifest()
5662
if token:
5763
catalogs = await fetch_catalogs(token)
5864
if catalogs:
65+
# Append dynamic catalogs to the base ones
5966
base_manifest["catalogs"] += catalogs
6067
return base_manifest

app/api/endpoints/streams.py

Lines changed: 0 additions & 31 deletions
This file was deleted.

app/api/main.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
from .endpoints.catalogs import router as catalogs_router
44
from .endpoints.health import router as health_router
55
from .endpoints.manifest import router as manifest_router
6-
from .endpoints.streams import router as streams_router
76
from .endpoints.tokens import router as tokens_router
87

98
api_router = APIRouter()
@@ -16,6 +15,5 @@ async def root():
1615

1716
api_router.include_router(manifest_router)
1817
api_router.include_router(catalogs_router)
19-
api_router.include_router(streams_router)
2018
api_router.include_router(tokens_router)
2119
api_router.include_router(health_router)

app/services/tmdb_service.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
1-
from functools import lru_cache
2-
31
import httpx
2+
from async_lru import alru_cache
43
from loguru import logger
54

65
from app.core.config import settings
@@ -46,7 +45,9 @@ async def close(self):
4645
await self._addon_client.aclose()
4746
self._addon_client = None
4847

49-
@lru_cache(maxsize=1000)
48+
# Increase cache size significantly as this is a hot path for recommendations
49+
# 5000 items * ~1KB per item = ~5MB memory, very safe
50+
@alru_cache(maxsize=5000)
5051
async def get_addon_meta(self, type: str, id: str) -> dict:
5152
"""Get addon metadata for a specific type and ID."""
5253
url = f"{self.addon_url}/meta/{type}/{id}.json"
@@ -87,7 +88,7 @@ async def _make_request(self, endpoint: str, params: dict | None = None) -> dict
8788
logger.error(f"TMDB API request error for {endpoint}: {e}")
8889
raise
8990

90-
@lru_cache(maxsize=1000)
91+
@alru_cache(maxsize=2000)
9192
async def find_by_imdb_id(self, imdb_id: str) -> tuple[int | None, str | None]:
9293
"""Find TMDB ID and type by IMDB ID."""
9394
try:
@@ -128,33 +129,33 @@ async def find_by_imdb_id(self, imdb_id: str) -> tuple[int | None, str | None]:
128129
logger.warning(f"Unexpected error finding TMDB ID for IMDB {imdb_id}: {e}")
129130
return None, None
130131

131-
@lru_cache(maxsize=1000)
132+
@alru_cache(maxsize=1000)
132133
async def get_movie_details(self, movie_id: int) -> dict:
133134
"""Get details of a specific movie with credits and external IDs."""
134135
params = {"append_to_response": "credits,external_ids"}
135136
return await self._make_request(f"/movie/{movie_id}", params=params)
136137

137-
@lru_cache(maxsize=1000)
138+
@alru_cache(maxsize=1000)
138139
async def get_tv_details(self, tv_id: int) -> dict:
139140
"""Get details of a specific TV series with credits and external IDs."""
140141
params = {"append_to_response": "credits,external_ids"}
141142
return await self._make_request(f"/tv/{tv_id}", params=params)
142143

143-
@lru_cache(maxsize=1000)
144+
@alru_cache(maxsize=1000)
144145
async def get_recommendations(self, tmdb_id: int, media_type: str, page: int = 1) -> dict:
145146
"""Get recommendations based on TMDB ID and media type."""
146147
params = {"page": page}
147148
endpoint = f"/{media_type}/{tmdb_id}/recommendations"
148149
return await self._make_request(endpoint, params=params)
149150

150-
@lru_cache(maxsize=1000)
151+
@alru_cache(maxsize=1000)
151152
async def get_similar(self, tmdb_id: int, media_type: str, page: int = 1) -> dict:
152153
"""Get similar content based on TMDB ID and media type."""
153154
params = {"page": page}
154155
endpoint = f"/{media_type}/{tmdb_id}/similar"
155156
return await self._make_request(endpoint, params=params)
156157

157-
@lru_cache(maxsize=1000)
158+
@alru_cache(maxsize=1000)
158159
async def get_discover(self, media_type: str, params: dict[str, str]) -> dict:
159160
"""Get discover content based on params."""
160161
media_type = "movie" if media_type == "movie" else "tv"

app/services/token_store.py

Lines changed: 26 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
from typing import Any
77

88
import redis.asyncio as redis
9+
from cachetools import TTLCache
910
from cryptography.fernet import Fernet, InvalidToken
1011
from loguru import logger
1112

@@ -20,6 +21,9 @@ class TokenStore:
2021
def __init__(self) -> None:
2122
self._client: redis.Redis | None = None
2223
self._cipher: Fernet | None = None
24+
# Cache decrypted payloads for 1 day (86400s) to reduce Redis hits
25+
# Max size 5000 allows many active users without eviction
26+
self._payload_cache: TTLCache = TTLCache(maxsize=5000, ttl=86400)
2327

2428
if not settings.REDIS_URL:
2529
logger.warning("REDIS_URL is not set. Token storage will fail until a Redis instance is configured.")
@@ -93,15 +97,22 @@ async def store_payload(self, payload: dict[str, Any]) -> tuple[str, bool]:
9397
if settings.TOKEN_TTL_SECONDS and settings.TOKEN_TTL_SECONDS > 0:
9498
await client.setex(key, settings.TOKEN_TTL_SECONDS, encrypted_value)
9599
logger.info(
96-
"Stored encrypted credential payload with TTL %s seconds",
97-
settings.TOKEN_TTL_SECONDS,
100+
f"Stored encrypted credential payload with TTL {settings.TOKEN_TTL_SECONDS} seconds",
98101
)
99102
else:
100103
await client.set(key, encrypted_value)
101104
logger.info("Stored encrypted credential payload without expiration")
105+
106+
# Cache the new payload immediately to avoid next-read hit
107+
self._payload_cache[token] = normalized
108+
102109
return token, not bool(existing)
103110

104111
async def get_payload(self, token: str) -> dict[str, Any] | None:
112+
# Check local LRU cache first
113+
if token in self._payload_cache:
114+
return self._payload_cache[token]
115+
105116
hashed = self._hash_token(token)
106117
key = self._format_key(hashed)
107118
client = await self._get_client()
@@ -113,7 +124,11 @@ async def get_payload(self, token: str) -> dict[str, Any] | None:
113124
try:
114125
# Decrypt -> JSON Decode
115126
decrypted_json = self._get_cipher().decrypt(encrypted_raw.encode()).decode("utf-8")
116-
return json.loads(decrypted_json)
127+
payload = json.loads(decrypted_json)
128+
129+
# Cache for subsequent reads
130+
self._payload_cache[token] = payload
131+
return payload
117132
except (InvalidToken, json.JSONDecodeError, UnicodeDecodeError):
118133
logger.warning("Failed to decrypt or decode cached payload for token. Key might have changed.")
119134
return None
@@ -124,12 +139,16 @@ async def delete_token(self, token: str) -> None:
124139
client = await self._get_client()
125140
await client.delete(key)
126141

142+
# Invalidate local cache
143+
if token in self._payload_cache:
144+
del self._payload_cache[token]
145+
127146
async def iter_payloads(self) -> AsyncIterator[tuple[str, dict[str, Any]]]:
128147
"""Iterate over all stored payloads, yielding key and payload."""
129148
try:
130149
client = await self._get_client()
131150
except (redis.RedisError, OSError) as exc:
132-
logger.warning("Skipping credential iteration; Redis unavailable: %s", exc)
151+
logger.warning(f"Skipping credential iteration; Redis unavailable: {exc}")
133152
return
134153

135154
pattern = f"{self.KEY_PREFIX}*"
@@ -140,7 +159,7 @@ async def iter_payloads(self) -> AsyncIterator[tuple[str, dict[str, Any]]]:
140159
try:
141160
encrypted_raw = await client.get(key)
142161
except (redis.RedisError, OSError) as exc:
143-
logger.warning("Failed to fetch payload for %s: %s", key, exc)
162+
logger.warning(f"Failed to fetch payload for {key}: {exc}")
144163
continue
145164

146165
if encrypted_raw is None:
@@ -150,12 +169,12 @@ async def iter_payloads(self) -> AsyncIterator[tuple[str, dict[str, Any]]]:
150169
decrypted_json = cipher.decrypt(encrypted_raw.encode()).decode("utf-8")
151170
payload = json.loads(decrypted_json)
152171
except (InvalidToken, json.JSONDecodeError, UnicodeDecodeError):
153-
logger.warning("Failed to decrypt payload for key %s. Skipping.", key)
172+
logger.warning(f"Failed to decrypt payload for key {key}. Skipping.")
154173
continue
155174

156175
yield key, payload
157176
except (redis.RedisError, OSError) as exc:
158-
logger.warning("Failed to scan credential tokens: %s", exc)
177+
logger.warning(f"Failed to scan credential tokens: {exc}")
159178

160179

161180
token_store = TokenStore()

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ description = "Add your description here"
55
readme = "README.md"
66
requires-python = ">=3.10"
77
dependencies = [
8+
"async-lru>=2.0.5",
89
"cachetools>=6.2.2",
910
"cryptography>=46.0.3",
1011
"fastapi>=0.104.1",

requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,3 +7,4 @@ loguru>=0.7.2
77
cachetools>=6.2.2
88
redis>=5.0.1
99
cryptography>=41.0.0
10+
async-lru>=2.0.4

uv.lock

Lines changed: 14 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)