Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion app/api/endpoints/catalogs.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ async def get_catalog(type: str, id: str, response: Response, token: str):

logger.info(f"Returning {len(recommendations)} items for {type}")
# Cache catalog responses for 4 hours
response.headers["Cache-Control"] = "public, max-age=14400"
response.headers["Cache-Control"] = "public, max-age=14400" if len(recommendations) > 0 else "no-cache"
return {"metas": recommendations}

except HTTPException:
Expand Down
4 changes: 4 additions & 0 deletions app/core/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,10 @@ class Settings(BaseSettings):

RECOMMENDATION_SOURCE_ITEMS_LIMIT: int = 10

# AI
DEFAULT_GEMINI_MODEL: str = "gemma-3-27b-it"
GEMINI_API_KEY: str | None = None


settings = Settings()

Expand Down
6 changes: 6 additions & 0 deletions app/services/catalog_updater.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from app.services.catalog import DynamicCatalogService
from app.services.stremio_service import StremioService
from app.services.token_store import token_store
from app.services.translation import translation_service

# Max number of concurrent updates to prevent overwhelming external APIs
MAX_CONCURRENT_UPDATES = 5
Expand Down Expand Up @@ -50,6 +51,11 @@ async def refresh_catalogs_for_credentials(token: str, credentials: dict[str, An
catalogs = await dynamic_catalog_service.get_dynamic_catalogs(
library_items=library_items, user_settings=user_settings
)

if user_settings and user_settings.language:
for cat in catalogs:
if name := cat.get("name"):
cat["name"] = await translation_service.translate(name, user_settings.language)
logger.info(f"[{redact_token(token)}] Prepared {len(catalogs)} catalogs")
return await stremio_service.update_catalogs(catalogs, auth_key)
except Exception as e:
Expand Down
65 changes: 65 additions & 0 deletions app/services/gemini.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
from google import genai
from loguru import logger
from pydantic import BaseModel, Field

from app.core.config import settings

# class CatalogRow(BaseModel):
# name: str = Field(description="Name of the catalog/row.")
# translated_name: str = Field(description="Translated name of the catalog/row.")


class Catalog(BaseModel):
title: str = Field(description="Simplified title of the catalog")


class GeminiService:
def __init__(self, model: str = settings.DEFAULT_GEMINI_MODEL):
self.model = model
self.client = None
if api_key := settings.GEMINI_API_KEY:
try:
self.client = genai.Client(api_key=api_key)
except Exception as e:
logger.warning(f"Failed to initialize Gemini client: {e}")
else:
logger.warning("GEMINI_API_KEY not set. Gemini features will be disabled.")

@staticmethod
def get_prompt():
return """
You are a content catalog naming expert.
Given filters like genre, keywords, countries, or years, generate natural,
engaging catalog row titles that streaming platforms would use.
Examples:
- Genre: Action, Country: South Korea → "Korean Action Thrillers"
- Keyword: "space", Genre: Sci-Fi → "Space Exploration Adventures"
- Genre: Drama, Country: France → "Acclaimed French Cinema"
- Country: "USA" + Genre: "Sci-Fi and Fantasy" → "Hollywood Sci-Fi and Fantasy"
- Keywords: "revenge" + "martial arts" → "Revenge & Martial Arts"
Keep titles:
- Short (2-5 words)
- Natural and engaging
- Focused on what makes the content appealing
- Only return a single best title and nothing else.
"""

def generate_content(self, prompt: str) -> str:
system_prompt = self.get_prompt()
if not self.client:
logger.warning("Gemini client not initialized. Gemini features will be disabled.")
return ""
try:
response = self.client.models.generate_content(
model=self.model,
contents=system_prompt + "\n\n" + prompt,
)
return response.text.strip()
except Exception as e:
logger.error(f"Error generating content: {e}")
return ""


gemini_service = GeminiService()
79 changes: 57 additions & 22 deletions app/services/row_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from pydantic import BaseModel

from app.models.profile import UserTasteProfile
from app.services.gemini import gemini_service
from app.services.tmdb.countries import COUNTRY_ADJECTIVES
from app.services.tmdb.genre import movie_genres, series_genres
from app.services.tmdb_service import TMDBService
Expand Down Expand Up @@ -62,38 +63,66 @@ def get_cname(code):
return random.choice(adjectives)
return ""

# Strategy 1: Pure Keyword Row (Top Priority)
if top_keywords:
k_id = top_keywords[0][0]
kw_name = await self._get_keyword_name(k_id)
if kw_name:
# Strategy 1: Combined Keyword Row (Top Priority)
if len(top_keywords) >= 2:
k_id1, k_id2 = top_keywords[0][0], top_keywords[1][0]
kw_name1 = await self._get_keyword_name(k_id1)
kw_name2 = await self._get_keyword_name(k_id2)
title = ""
if kw_name1 and kw_name2:
title = gemini_service.generate_content(f"Keywords: {kw_name1} + {kw_name2}")

if title:
rows.append(
RowDefinition(
title=title,
id=f"watchly.theme.k{k_id1}.k{k_id2}",
keywords=[k_id1, k_id2],
)
)
elif kw_name1:
rows.append(
RowDefinition(
title=f"{normalize_keyword(kw_name)}",
id=f"watchly.theme.k{k_id}",
keywords=[k_id],
title=normalize_keyword(kw_name1),
id=f"watchly.theme.k{k_id1}",
keywords=[k_id1],
)
)
elif top_keywords:
k_id1 = top_keywords[0][0]
kw_name1 = await self._get_keyword_name(k_id1)
if kw_name1:
rows.append(
RowDefinition(
title=normalize_keyword(kw_name1),
id=f"watchly.theme.k{k_id1}",
keywords=[k_id1],
)
)

# Strategy 2: Keyword + Genre (Specific Niche)
if top_genres and len(top_keywords) > 1:
if top_genres and len(top_keywords) > 2:
g_id = top_genres[0][0]
# get random keywords: Just to surprise user in every refresh
k_id = random.choice(top_keywords[1:])[0]
k_id = random.choice(top_keywords[2:])[0]

if k_id:
kw_name = await self._get_keyword_name(k_id)
if kw_name:
title = f"{normalize_keyword(kw_name)} {get_gname(g_id)}"
# keyword and genre can have same name sometimes, remove if so
words = title.split()
seen_words = set()
unique_words = []
for word in words:
if word not in seen_words:
unique_words.append(word)
seen_words.add(word)
title = " ".join(unique_words)
title = gemini_service.generate_content(
f"Genre: {get_gname(g_id)} + Keyword: {normalize_keyword(kw_name)}"
)
if not title:
title = f"{get_gname(g_id)} {normalize_keyword(kw_name)}"
# keyword and genre can have same name sometimes, remove if so
words = title.split()
seen_words = set()
unique_words = []
for word in words:
if word not in seen_words:
unique_words.append(word)
seen_words.add(word)
title = " ".join(unique_words)

rows.append(
RowDefinition(
Expand All @@ -110,9 +139,12 @@ def get_cname(code):
c_code = top_countries[0][0]
c_adj = get_cname(c_code)
if c_adj:
title = gemini_service.generate_content(f"Genre: {get_gname(g_id)} + Country: {c_adj}")
if not title:
title = f"{get_gname(g_id)} {c_adj}"
rows.append(
RowDefinition(
title=f"{c_adj} {get_gname(g_id)}",
title=title,
id=f"watchly.theme.g{g_id}.ct{c_code}", # ct for country
genres=[g_id],
country=c_code,
Expand All @@ -130,9 +162,12 @@ def get_cname(code):
# # Only do this if decade is valid and somewhat old (nostalgia factor)
if 1970 <= decade_start <= 2010:
decade_str = str(decade_start)[2:] + "s" # "90s"
title = gemini_service.generate_content(f"Genre: {get_gname(g_id)} + Era: {decade_str}")
if not title:
title = f"{get_gname(g_id)} {decade_str}"
rows.append(
RowDefinition(
title=f"{decade_str} {get_gname(g_id)}",
title=title,
id=f"watchly.theme.g{g_id}.y{decade_start}",
genres=[g_id],
year_range=(decade_start, decade_start + 9),
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ dependencies = [
"cryptography>=46.0.3",
"deep-translator>=1.11.4",
"fastapi>=0.104.1",
"google-genai>=1.54.0",
"httpx>=0.25.2",
"loguru>=0.7.2",
"pydantic>=2.5.0",
Expand Down
73 changes: 14 additions & 59 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,59 +1,14 @@
annotated-doc==0.0.4
annotated-types==0.7.0
anyio==4.11.0
apscheduler==3.11.1
async-lru==2.0.5
async-timeout==5.0.1
beautifulsoup4==4.14.3
black==25.11.0
cachetools==6.2.2
certifi==2025.11.12
cffi==2.0.0
cfgv==3.5.0
charset-normalizer==3.4.4
click==8.3.1
cryptography==46.0.3
deep-translator==1.11.4
distlib==0.4.0
exceptiongroup==1.3.0
fastapi==0.121.2
filelock==3.20.0
flake9==3.8.3.post2
h11==0.16.0
httpcore==1.0.9
httptools==0.7.1
httpx==0.28.1
identify==2.6.15
idna==3.11
loguru==0.7.3
mccabe==0.6.1
mypy-extensions==1.1.0
nodeenv==1.9.1
packaging==25.0
pathspec==0.12.1
platformdirs==4.5.0
pre-commit==4.4.0
pycodestyle==2.6.0
pycparser==2.23
pydantic==2.12.4
pydantic-core==2.41.5
pydantic-settings==2.12.0
pyflakes==2.2.0
python-dotenv==1.2.1
pytokens==0.3.0
pyyaml==6.0.3
redis==7.1.0
requests==2.32.5
sniffio==1.3.1
soupsieve==2.8
starlette==0.49.3
tomli==2.3.0
typing-extensions==4.15.0
typing-inspection==0.4.2
tzlocal==5.3.1
urllib3==2.6.0
uvicorn==0.38.0
uvloop==0.22.1
virtualenv==20.35.4
watchfiles==1.1.1
websockets==15.0.1
apscheduler>=3.11.1
async-lru>=2.0.5
cachetools>=6.2.2
cryptography>=46.0.3
deep-translator>=1.11.4
fastapi>=0.104.1
google-genai>=1.54.0
httpx>=0.25.2
loguru>=0.7.2
pydantic>=2.5.0
pydantic-settings>=2.1.0
redis>=5.0.1
tomli>=2.3.0
uvicorn[standard]>=0.24.0
Loading