Skip to content

Commit cdc482e

Browse files
committed
release v0-3-0: improve package.json for esm module resolution; add more tests
1 parent f82bc04 commit cdc482e

File tree

3 files changed

+320
-21
lines changed

3 files changed

+320
-21
lines changed

package.json

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,19 @@
11
{
22
"name": "thumbdrive",
3-
"version": "0.2.0",
3+
"version": "0.3.0",
44
"description": "An OPFS wrapper offering sync access and multi-tab support",
55
"type": "module",
66
"main": "dist/index.js",
7+
"module": "dist/index.js",
78
"types": "dist/index.d.ts",
9+
"exports": {
10+
".": {
11+
"types": "./dist/index.d.ts",
12+
"import": "./dist/index.js",
13+
"default": "./dist/index.js"
14+
}
15+
},
16+
"sideEffects": false,
817
"repository": "gadget-inc/thumbdrive",
918
"author": "Gadget Software Inc.",
1019
"license": "MIT",

spec/e2e/opfs-broker.spec.ts

Lines changed: 286 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,12 @@ declare global {
1414
}
1515
}
1616

17-
function url(ns: string, arena: string) {
18-
return `/testapp/index.html?ns=${encodeURIComponent(ns)}&arena=${encodeURIComponent(arena)}`;
17+
function url(ns: string, arena: string, options?: { timeout?: number }) {
18+
let u = `/testapp/index.html?ns=${encodeURIComponent(ns)}&arena=${encodeURIComponent(arena)}`;
19+
if (options?.timeout !== undefined) {
20+
u += `&timeout=${options.timeout}`;
21+
}
22+
return u;
1923
}
2024

2125
async function setupPage(page: Page) {
@@ -216,3 +220,283 @@ test("leader failover: when leader stops, another becomes leader and RPCs contin
216220

217221
await context.close();
218222
});
223+
224+
test("leader crash: abrupt tab close triggers failover and RPCs recover", async ({ browser }) => {
225+
const context = await browser.newContext();
226+
const [a, b] = await Promise.all([context.newPage(), context.newPage()]);
227+
const ns = `ns-${Date.now()}-${Math.random().toString(36).slice(2)}`;
228+
const arena = `arena-${Math.random().toString(36).slice(2)}`;
229+
230+
[a, b].forEach(setupPage);
231+
await Promise.all([a.goto(url(ns, arena)), b.goto(url(ns, arena))]);
232+
await Promise.all([waitForReady(a), waitForReady(b)]);
233+
await Promise.all([startCandidate(a), startCandidate(b)]);
234+
235+
await expect
236+
.poll(async () => ((await isLeader(a)) ? 1 : 0) + ((await isLeader(b)) ? 1 : 0), { timeout: 5000 })
237+
.toBe(1);
238+
239+
const leaderPage = (await isLeader(a)) ? a : b;
240+
const followerPage = leaderPage === a ? b : a;
241+
242+
// Write a file before crash
243+
await writeFile(leaderPage, "/pre-crash.txt", "before");
244+
await expect.poll(() => readFile(followerPage, "/pre-crash.txt"), { timeout: 5000 }).toBe("before");
245+
246+
// Crash the leader (abrupt close, no graceful shutdown)
247+
await leaderPage.close();
248+
249+
// Follower should become the new leader
250+
await expect.poll(() => isLeader(followerPage), { timeout: 10000 }).toBe(true);
251+
252+
// RPCs should work on the new leader
253+
await writeFile(followerPage, "/post-crash.txt", "after");
254+
await expect.poll(() => readFile(followerPage, "/post-crash.txt"), { timeout: 5000 }).toBe("after");
255+
256+
await context.close();
257+
});
258+
259+
test("follower request timeout when no leader responds", async ({ browser }) => {
260+
const context = await browser.newContext();
261+
const lockHolder = await context.newPage();
262+
const follower = await context.newPage();
263+
const ns = `ns-${Date.now()}-${Math.random().toString(36).slice(2)}`;
264+
const arena = `arena-${Math.random().toString(36).slice(2)}`;
265+
266+
setupPage(lockHolder);
267+
setupPage(follower);
268+
269+
// Navigate lock holder to same origin so it can hold the Web Lock
270+
await lockHolder.goto(url(ns, arena));
271+
await waitForReady(lockHolder);
272+
273+
// Hold the ns-scoped Web Lock, preventing any broker from becoming leader
274+
const lockName = `opfs-worker-lock-${ns}`;
275+
await lockHolder.evaluate((name: string) => {
276+
return new Promise<void>((resolve) => {
277+
navigator.locks.request(name, () => {
278+
resolve(); // signal that we have the lock
279+
return new Promise(() => {}); // hold it forever
280+
});
281+
});
282+
}, lockName);
283+
284+
// Start the follower with a short timeout (1s)
285+
await follower.goto(url(ns, arena, { timeout: 1000 }));
286+
await waitForReady(follower);
287+
await startCandidate(follower);
288+
289+
// Follower should not be leader
290+
await expect.poll(() => isLeader(follower), { timeout: 2000 }).toBe(false);
291+
292+
// Try to write — should fail since no leader exists to handle the request
293+
const error = await follower.evaluate(async () => {
294+
try {
295+
await window.thumbdriveTest.writeFile("/timeout-test.txt", "should-fail");
296+
return null;
297+
} catch (e: any) {
298+
return e.message || String(e);
299+
}
300+
});
301+
302+
expect(error).toBeTruthy();
303+
expect(error).toContain("timeout");
304+
305+
await context.close();
306+
});
307+
308+
test("restart cycle: stop and start preserves OPFS data", async ({ page }) => {
309+
const ns = `ns-${Date.now()}-${Math.random().toString(36).slice(2)}`;
310+
const arena = `arena-${Math.random().toString(36).slice(2)}`;
311+
312+
setupPage(page);
313+
await page.goto(url(ns, arena));
314+
await waitForReady(page);
315+
await startCandidate(page);
316+
await expect.poll(() => isLeader(page), { timeout: 5000 }).toBe(true);
317+
318+
// Write a file
319+
await writeFile(page, "/restart-test.txt", "before-restart");
320+
await expect.poll(() => readFile(page, "/restart-test.txt"), { timeout: 5000 }).toBe("before-restart");
321+
322+
// Shutdown gracefully
323+
await shutdownLeader(page);
324+
await expect.poll(() => isLeader(page), { timeout: 5000 }).toBe(false);
325+
326+
// Start again
327+
await startCandidate(page);
328+
await expect.poll(() => isLeader(page), { timeout: 5000 }).toBe(true);
329+
330+
// New writes work after restart
331+
await writeFile(page, "/restart-test-2.txt", "after-restart");
332+
await expect.poll(() => readFile(page, "/restart-test-2.txt"), { timeout: 5000 }).toBe("after-restart");
333+
334+
// Can shutdown again cleanly
335+
await shutdownLeader(page);
336+
await expect.poll(() => isLeader(page), { timeout: 5000 }).toBe(false);
337+
});
338+
339+
test("concurrent writes from multiple tabs with overlapping JSONRPC IDs", async ({ browser }) => {
340+
const context = await browser.newContext();
341+
const [a, b, c] = await Promise.all([context.newPage(), context.newPage(), context.newPage()]);
342+
const ns = `ns-${Date.now()}-${Math.random().toString(36).slice(2)}`;
343+
const arena = `arena-${Math.random().toString(36).slice(2)}`;
344+
345+
[a, b, c].forEach(setupPage);
346+
await Promise.all([a.goto(url(ns, arena)), b.goto(url(ns, arena)), c.goto(url(ns, arena))]);
347+
await Promise.all([waitForReady(a), waitForReady(b), waitForReady(c)]);
348+
await Promise.all([startCandidate(a), startCandidate(b), startCandidate(c)]);
349+
350+
await expect
351+
.poll(async () => ((await isLeader(a)) ? 1 : 0) + ((await isLeader(b)) ? 1 : 0) + ((await isLeader(c)) ? 1 : 0), { timeout: 5000 })
352+
.toBe(1);
353+
354+
// Fire concurrent writes from all three tabs simultaneously.
355+
// Each tab's vscode-jsonrpc connection assigns IDs starting from 0,
356+
// so the broker's ID rewriting must disambiguate them.
357+
await Promise.all([
358+
writeFile(a, "/c1.txt", "from-a"),
359+
writeFile(b, "/c2.txt", "from-b"),
360+
writeFile(c, "/c3.txt", "from-c"),
361+
]);
362+
363+
// All files should be readable from any tab
364+
await expect.poll(() => readFile(b, "/c1.txt"), { timeout: 5000 }).toBe("from-a");
365+
await expect.poll(() => readFile(c, "/c2.txt"), { timeout: 5000 }).toBe("from-b");
366+
await expect.poll(() => readFile(a, "/c3.txt"), { timeout: 5000 }).toBe("from-c");
367+
368+
await context.close();
369+
});
370+
371+
test("multiple sequential failovers across three tabs", async ({ browser }) => {
372+
const context = await browser.newContext();
373+
const [a, b, c] = await Promise.all([context.newPage(), context.newPage(), context.newPage()]);
374+
const ns = `ns-${Date.now()}-${Math.random().toString(36).slice(2)}`;
375+
const arena = `arena-${Math.random().toString(36).slice(2)}`;
376+
377+
[a, b, c].forEach(setupPage);
378+
await Promise.all([a.goto(url(ns, arena)), b.goto(url(ns, arena)), c.goto(url(ns, arena))]);
379+
await Promise.all([waitForReady(a), waitForReady(b), waitForReady(c)]);
380+
await Promise.all([startCandidate(a), startCandidate(b), startCandidate(c)]);
381+
382+
const pages = [a, b, c];
383+
384+
async function findLeaderIdx() {
385+
const states = await Promise.all(pages.map((p) => isLeader(p).catch(() => false)));
386+
return states.findIndex((x) => x);
387+
}
388+
389+
// Wait for initial leader
390+
let leaderIdx = -1;
391+
await expect
392+
.poll(async () => {
393+
leaderIdx = await findLeaderIdx();
394+
return leaderIdx >= 0;
395+
}, { timeout: 5000 })
396+
.toBe(true);
397+
398+
// Write from first leader
399+
await writeFile(pages[leaderIdx], "/failover-1.txt", "leader-1");
400+
401+
// --- First failover: crash the leader ---
402+
const firstLeaderIdx = leaderIdx;
403+
await pages[firstLeaderIdx].close();
404+
405+
const remaining = [0, 1, 2].filter((i) => i !== firstLeaderIdx);
406+
407+
// Wait for a new leader among the remaining tabs
408+
await expect
409+
.poll(async () => {
410+
const states = await Promise.all(remaining.map((i) => isLeader(pages[i]).catch(() => false)));
411+
return states.some((x) => x);
412+
}, { timeout: 10000 })
413+
.toBe(true);
414+
415+
// Find new leader
416+
let secondLeaderIdx = -1;
417+
for (const i of remaining) {
418+
if (await isLeader(pages[i]).catch(() => false)) {
419+
secondLeaderIdx = i;
420+
break;
421+
}
422+
}
423+
424+
// Write from second leader and verify cross-tab read works
425+
const lastIdx = remaining.find((i) => i !== secondLeaderIdx)!;
426+
await writeFile(pages[secondLeaderIdx], "/failover-2.txt", "leader-2");
427+
await expect.poll(() => readFile(pages[lastIdx], "/failover-2.txt"), { timeout: 5000 }).toBe("leader-2");
428+
429+
// --- Second failover: crash the second leader ---
430+
await pages[secondLeaderIdx].close();
431+
432+
// Last page standing should become leader
433+
await expect.poll(() => isLeader(pages[lastIdx]), { timeout: 10000 }).toBe(true);
434+
435+
// RPCs still work after two consecutive failovers
436+
await writeFile(pages[lastIdx], "/failover-3.txt", "leader-3");
437+
await expect.poll(() => readFile(pages[lastIdx], "/failover-3.txt"), { timeout: 5000 }).toBe("leader-3");
438+
439+
await context.close();
440+
});
441+
442+
test("RPCs sent during failover transition eventually resolve", async ({ browser }) => {
443+
const context = await browser.newContext();
444+
const [a, b, c] = await Promise.all([context.newPage(), context.newPage(), context.newPage()]);
445+
const ns = `ns-${Date.now()}-${Math.random().toString(36).slice(2)}`;
446+
const arena = `arena-${Math.random().toString(36).slice(2)}`;
447+
448+
// Use a short broker timeout so failed RPCs don't block the test for 20s
449+
[a, b, c].forEach(setupPage);
450+
await Promise.all([
451+
a.goto(url(ns, arena, { timeout: 2000 })),
452+
b.goto(url(ns, arena, { timeout: 2000 })),
453+
c.goto(url(ns, arena, { timeout: 2000 })),
454+
]);
455+
await Promise.all([waitForReady(a), waitForReady(b), waitForReady(c)]);
456+
await Promise.all([startCandidate(a), startCandidate(b), startCandidate(c)]);
457+
458+
const pages = [a, b, c];
459+
460+
async function findLeaderIdx() {
461+
const states = await Promise.all(pages.map((p) => isLeader(p).catch(() => false)));
462+
return states.findIndex((x) => x);
463+
}
464+
465+
let leaderIdx = -1;
466+
await expect
467+
.poll(async () => {
468+
leaderIdx = await findLeaderIdx();
469+
return leaderIdx >= 0;
470+
}, { timeout: 5000 })
471+
.toBe(true);
472+
473+
// Identify the two survivors
474+
const survivors = [0, 1, 2].filter((i) => i !== leaderIdx).map((i) => pages[i]);
475+
476+
// Crash the leader
477+
await pages[leaderIdx].close();
478+
479+
// Immediately fire RPCs from both survivors — the first attempt may time out
480+
// if the broadcast arrives before the new leader is ready, so we retry once.
481+
async function writeWithRetry(page: Page, path: string, content: string) {
482+
try {
483+
await writeFile(page, path, content);
484+
} catch {
485+
// First attempt failed (likely timeout during transition). Retry after
486+
// giving the new leader time to finish booting.
487+
await page.waitForTimeout(500);
488+
await writeFile(page, path, content);
489+
}
490+
}
491+
492+
await Promise.all([
493+
writeWithRetry(survivors[0], "/race-1.txt", "race-1"),
494+
writeWithRetry(survivors[1], "/race-2.txt", "race-2"),
495+
]);
496+
497+
// Both writes should be readable from either survivor
498+
await expect.poll(() => readFile(survivors[0], "/race-2.txt"), { timeout: 5000 }).toBe("race-2");
499+
await expect.poll(() => readFile(survivors[1], "/race-1.txt"), { timeout: 5000 }).toBe("race-1");
500+
501+
await context.close();
502+
});

0 commit comments

Comments
 (0)