Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions backend/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,3 +100,21 @@ async def auth_middleware(request: Request, call_next):
@app.get("/api/health")
async def health():
return {"status": "ok"}


@app.post("/api/admin/cache/clear")
async def clear_caches():
"""Release all in-memory caches to free RAM."""
import gc
from services.f1_data import clear_session_cache
from routers.replay import clear_replay_cache

sessions_cleared = clear_session_cache()
replay_cleared = clear_replay_cache()
gc.collect()

return {
"status": "ok",
"sessions_cleared": sessions_cleared,
"replay_entries_cleared": replay_cleared,
}
37 changes: 28 additions & 9 deletions backend/routers/replay.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
import asyncio
import gc
import logging
import math
import os
import re
from collections import OrderedDict

from fastapi import APIRouter, WebSocket, WebSocketDisconnect, Query
from services.storage import get_json
Expand All @@ -10,8 +13,9 @@
logger = logging.getLogger(__name__)
router = APIRouter(tags=["replay"])

# In-memory cache for replay frames loaded from R2
_replay_cache: dict[str, list[dict]] = {}
# In-memory LRU cache for replay frames (evicts oldest when exceeding limit)
_REPLAY_CACHE_MAX = int(os.environ.get("REPLAY_CACHE_MAX", "3"))
_replay_cache: OrderedDict[str, list[dict]] = OrderedDict()

# In-memory cache for pit loss data
_pit_loss_cache: dict | None = None
Expand Down Expand Up @@ -146,16 +150,31 @@ def _sanitize_frame(frame: dict) -> dict:

def _get_frames_sync(year: int, round_num: int, session_type: str) -> list[dict]:
key = f"{year}_{round_num}_{session_type}"
if key not in _replay_cache:
frames = get_json(f"sessions/{year}/{round_num}/{session_type}/replay.json")
if frames is None:
frames = []
for f in frames:
_sanitize_frame(f)
_replay_cache[key] = frames
if key in _replay_cache:
_replay_cache.move_to_end(key)
return _replay_cache[key]
frames = get_json(f"sessions/{year}/{round_num}/{session_type}/replay.json")
if frames is None:
frames = []
for f in frames:
_sanitize_frame(f)
_replay_cache[key] = frames
# Evict oldest entries if over limit
while len(_replay_cache) > _REPLAY_CACHE_MAX:
evicted_key, _ = _replay_cache.popitem(last=False)
logger.info(f"Replay cache evicted: {evicted_key} (limit={_REPLAY_CACHE_MAX})")
return _replay_cache[key]


def clear_replay_cache():
"""Clear all cached replay frames and force garbage collection."""
count = len(_replay_cache)
_replay_cache.clear()
gc.collect()
logger.info(f"Replay cache cleared ({count} entries evicted)")
return count


async def _get_frames(year: int, round_num: int, session_type: str) -> list[dict]:
return await asyncio.to_thread(_get_frames_sync, year, round_num, session_type)

Expand Down
25 changes: 23 additions & 2 deletions backend/services/f1_data.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
from __future__ import annotations

import asyncio
import gc
import os
import logging
import threading
from collections import OrderedDict
from datetime import datetime, timezone
from functools import lru_cache

Expand All @@ -27,8 +29,9 @@
os.makedirs(CACHE_DIR, exist_ok=True)
fastf1.Cache.enable_cache(CACHE_DIR)

# In-memory cache for loaded sessions (with lock to prevent concurrent duplicate loads)
_session_cache: dict[str, fastf1.core.Session] = {}
# In-memory LRU cache for loaded sessions (evicts oldest when exceeding limit)
_SESSION_CACHE_MAX = int(os.environ.get("SESSION_CACHE_MAX", "2"))
_session_cache: OrderedDict[str, fastf1.core.Session] = OrderedDict()
_session_lock = threading.Lock()


Expand Down Expand Up @@ -189,11 +192,13 @@ async def get_season_events(year: int) -> list[dict]:
def _load_session(year: int, round_num: int, session_type: str) -> fastf1.core.Session:
key = _cache_key(year, round_num, session_type)
if key in _session_cache:
_session_cache.move_to_end(key)
return _session_cache[key]

with _session_lock:
# Double-check after acquiring lock
if key in _session_cache:
_session_cache.move_to_end(key)
return _session_cache[key]

logger.info(f"Loading session {year}/{round_num}/{session_type} from FastF1...")
Expand All @@ -208,11 +213,27 @@ def _load_session(year: int, round_num: int, session_type: str) -> fastf1.core.S
# Only cache if we actually got meaningful data
if len(session.laps) > 0:
_session_cache[key] = session
# Evict oldest entries if over limit
while len(_session_cache) > _SESSION_CACHE_MAX:
evicted_key, _ = _session_cache.popitem(last=False)
logger.info(f"Session cache evicted: {evicted_key} (limit={_SESSION_CACHE_MAX})")
gc.collect()

logger.info(f"Session {year}/{round_num}/{session_type} loaded.")
return session


def clear_session_cache():
"""Clear all cached sessions and force garbage collection."""
with _session_lock:
count = len(_session_cache)
_session_cache.clear()
_availability_cache.clear()
gc.collect()
logger.info(f"Session cache cleared ({count} sessions evicted)")
return count


def _get_session_info_sync(year: int, round_num: int, session_type: str = "R") -> dict:
session = _load_session(year, round_num, session_type)
drivers = []
Expand Down
Loading