Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 20 additions & 2 deletions runtime/caches/lrucache.ts
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,27 @@ function createLruCacheStorage(cacheStorageInner: CacheStorage): CacheStorage {
assertNoOptions(options);
const cacheKey = await requestURLSHA1(request);
if (fileCache.has(cacheKey)) {
const result = cacheInner.match(cacheKey);
return result;
return cacheInner.match(cacheKey);
}
// Lazy re-index: on a cold LRU (e.g. after pod restart), check if the file
// exists on disk. If still valid, re-index into the LRU so eviction is managed
// normally from this point on. If expired, delete the orphaned file and miss.
// TODO: add a background sweep at startup that deletes expired orphaned files
// that are never re-accessed — they accumulate on disk across restarts and are
// never evicted without this sweep.
const response = await cacheInner.match(cacheKey);
if (!response) return undefined;
const expires = response.headers.get("expires");
const length = response.headers.get("content-length");
if (expires && length) {
const ttl = Date.parse(expires) - Date.now() + STALE_TTL_PERIOD;
if (ttl > 0) {
fileCache.set(cacheKey, true, { size: parseInt(length), ttl });
return response;
}
}
// Expired or missing metadata — delete the orphaned file and treat as a miss.
cacheInner.delete(cacheKey).catch(() => {});
return undefined;
Comment on lines +63 to 82
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Lazy re-index logic is sound; fix the CI formatting failure.

The logic correctly handles the cold-LRU scenario: validating TTL with STALE_TTL_PERIOD extension (consistent with put()), re-indexing valid entries, and cleaning up expired/orphaned files.

The CI failure indicates a formatting issue on line 76. Run deno fmt to fix.

🔧 Suggested formatting fix (run `deno fmt` to confirm exact output)
-              fileCache.set(cacheKey, true, { size: parseInt(length, 10), ttl });
+              fileCache.set(cacheKey, true, {
+                size: parseInt(length, 10),
+                ttl,
+              });
🧰 Tools
🪛 GitHub Actions: ci

[error] 76-76: deno fmt --check failed: Found unformatted file(s). Example diff indicates formatting changes around fileCache.set(cacheKey, true, { size: parseInt(length, 10), ttl }).

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@runtime/caches/lrucache.ts` around lines 63 - 82, The CI is failing due to
formatting in the lazy re-index block in runtime/caches/lrucache.ts (the area
using cacheInner.match, response.headers.get("expires"/"content-length"),
STALE_TTL_PERIOD, fileCache.set, and cacheInner.delete); run the formatter (deno
fmt) and commit the resulting changes so the spacing/indentation and line breaks
around that block match project style—no logic changes required, just apply the
automatic formatting to the function containing the lazy re-index code.

},
put: async (
Expand Down
151 changes: 151 additions & 0 deletions runtime/caches/mod.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -137,3 +137,154 @@ Deno.test({
});

// TODO TESTAR O CENARIO ONDE O RESPONSE N TEM LENGTH

// ---------------------------------------------------------------------------
// Lazy re-index tests
// ---------------------------------------------------------------------------

const STALE_TTL_PERIOD_MS = parseInt(
Deno.env.get("STALE_TTL_PERIOD") ?? "30000",
);

/**
* Helper: build a testCacheStorage where `open` returns a *shared* map so we
* can simulate "disk" surviving an LRU restart.
*/
const sharedMapCacheStorage = (
map: Map<RequestInfo | URL, Response>,
): CacheStorage => {
const getUrl = (request: RequestInfo | URL) =>
(request as Request).url ?? request.toString();
return {
delete: () => {
map.clear();
return Promise.resolve(true);
},
has: NOT_IMPLEMENTED,
keys: NOT_IMPLEMENTED,
match: NOT_IMPLEMENTED,
open: () =>
Promise.resolve({
add: NOT_IMPLEMENTED,
addAll: NOT_IMPLEMENTED,
delete: (request: RequestInfo | URL) =>
Promise.resolve(Boolean(map.delete(getUrl(request)))),
keys: NOT_IMPLEMENTED,
match: (request: RequestInfo | URL) =>
Promise.resolve(map.get(getUrl(request))),
matchAll: NOT_IMPLEMENTED,
put: (request: RequestInfo | URL, response: Response) => {
map.set(getUrl(request), response);
return Promise.resolve();
},
} as Cache),
};
};

Deno.test({
name: "lru_cache_lazy_reindex: valid entry is served after LRU restart",
sanitizeResources: false,
sanitizeOps: false,
}, async () => {
const disk = new Map<RequestInfo | URL, Response>();
const storage1 = sharedMapCacheStorage(disk);

// --- first LRU lifetime: put an entry ---
const cache1 = await headersCache(lruCache(storage1)).open(CACHE_NAME);
const futureExpires = new Date(Date.now() + 60_000).toUTCString();
await cache1.put(
createRequest(100),
new Response("cached-body", {
headers: {
"content-length": "11",
expires: futureExpires,
},
}),
);

// --- simulate pod restart: new LRU over the *same* disk map ---
const storage2 = sharedMapCacheStorage(disk);
const cache2 = await headersCache(lruCache(storage2)).open(CACHE_NAME);

const response = await cache2.match(createRequest(100));
assertNotEquals(response, undefined);
});

Deno.test({
name: "lru_cache_lazy_reindex: truly expired entry is evicted on access",
sanitizeResources: false,
sanitizeOps: false,
}, async () => {
const disk = new Map<RequestInfo | URL, Response>();
const storage1 = sharedMapCacheStorage(disk);

const cache1 = await headersCache(lruCache(storage1)).open(CACHE_NAME);
// Expired well before now, even accounting for STALE_TTL_PERIOD
const pastExpires = new Date(
Date.now() - STALE_TTL_PERIOD_MS - 60_000,
).toUTCString();
await cache1.put(
createRequest(200),
new Response("old-body", {
headers: {
"content-length": "8",
expires: pastExpires,
},
}),
);

// --- simulate pod restart ---
const storage2 = sharedMapCacheStorage(disk);
const cache2 = await headersCache(lruCache(storage2)).open(CACHE_NAME);

const response = await cache2.match(createRequest(200));
assertEquals(response, undefined);
});

Deno.test({
name: "lru_cache_lazy_reindex: entry missing from disk is a miss",
sanitizeResources: false,
sanitizeOps: false,
}, async () => {
// Empty disk — nothing was ever written
const disk = new Map<RequestInfo | URL, Response>();
const storage = sharedMapCacheStorage(disk);
const cache = await headersCache(lruCache(storage)).open(CACHE_NAME);

const response = await cache.match(createRequest(300));
assertEquals(response, undefined);
});

Deno.test({
name:
"lru_cache_lazy_reindex: re-indexed entry stays accessible on subsequent accesses",
sanitizeResources: false,
sanitizeOps: false,
}, async () => {
const disk = new Map<RequestInfo | URL, Response>();
const storage1 = sharedMapCacheStorage(disk);

const cache1 = await headersCache(lruCache(storage1)).open(CACHE_NAME);
const futureExpires = new Date(Date.now() + 60_000).toUTCString();
await cache1.put(
createRequest(400),
new Response("repeat-body", {
headers: {
"content-length": "11",
expires: futureExpires,
},
}),
);

// --- simulate pod restart ---
const storage2 = sharedMapCacheStorage(disk);
const cache2 = await headersCache(lruCache(storage2)).open(CACHE_NAME);

// First access triggers lazy re-index
const first = await cache2.match(createRequest(400));
assertNotEquals(first, undefined);

// Second access should hit the LRU directly (no re-index needed)
const second = await cache2.match(createRequest(400));
assertNotEquals(second, undefined);
});
Loading