Skip to content

Commit 730e2da

Browse files
Copiloteliandoran
andcommitted
Improve test reliability by using vi.waitFor() for async checks
Replaced hardcoded timeouts with vi.waitFor() for better test reliability. Co-authored-by: eliandoran <[email protected]>
1 parent 18a1984 commit 730e2da

File tree

1 file changed

+25
-28
lines changed

1 file changed

+25
-28
lines changed

apps/server/src/routes/api/llm.spec.ts

Lines changed: 25 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -625,19 +625,18 @@ describe("LLM API Tests", () => {
625625

626626
expect(response.status).toBe(200); // Still returns 200
627627

628-
// Wait for async streaming operations to complete
629-
await new Promise(resolve => setTimeout(resolve, 100));
630-
631628
// Import ws service to access mock
632629
const ws = (await import("../../services/ws.js")).default;
633630

634-
// Verify error message was sent via WebSocket
635-
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
636-
type: 'llm-stream',
637-
chatNoteId: testChatId,
638-
error: 'Error during streaming: Pipeline error',
639-
done: true
640-
});
631+
// Wait for async streaming operations to complete
632+
await vi.waitFor(() => {
633+
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
634+
type: 'llm-stream',
635+
chatNoteId: testChatId,
636+
error: 'Error during streaming: Pipeline error',
637+
done: true
638+
});
639+
}, { timeout: 1000, interval: 50 });
641640
});
642641

643642
it("should handle AI disabled state", async () => {
@@ -656,19 +655,18 @@ describe("LLM API Tests", () => {
656655

657656
expect(response.status).toBe(200);
658657

659-
// Wait for async streaming operations to complete
660-
await new Promise(resolve => setTimeout(resolve, 100));
661-
662658
// Import ws service to access mock
663659
const ws = (await import("../../services/ws.js")).default;
664660

665-
// Verify error message about AI being disabled
666-
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
667-
type: 'llm-stream',
668-
chatNoteId: testChatId,
669-
error: 'Error during streaming: AI features are disabled. Please enable them in the settings.',
670-
done: true
671-
});
661+
// Wait for async streaming operations to complete
662+
await vi.waitFor(() => {
663+
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
664+
type: 'llm-stream',
665+
chatNoteId: testChatId,
666+
error: 'Error during streaming: AI features are disabled. Please enable them in the settings.',
667+
done: true
668+
});
669+
}, { timeout: 1000, interval: 50 });
672670
});
673671

674672
it("should save chat messages after streaming completion", async () => {
@@ -758,17 +756,16 @@ describe("LLM API Tests", () => {
758756

759757
expect(response.status).toBe(200);
760758

761-
// Wait for async streaming operations to complete
762-
await new Promise(resolve => setTimeout(resolve, 100));
763-
764759
// Import ws service to access mock
765760
const ws = (await import("../../services/ws.js")).default;
766761

767-
// Verify multiple chunks were sent
768-
const streamCalls = (ws.sendMessageToAllClients as any).mock.calls.filter(
769-
call => call[0].type === 'llm-stream' && call[0].content
770-
);
771-
expect(streamCalls.length).toBeGreaterThan(5);
762+
// Wait for async streaming operations to complete and verify multiple chunks were sent
763+
await vi.waitFor(() => {
764+
const streamCalls = (ws.sendMessageToAllClients as any).mock.calls.filter(
765+
call => call[0].type === 'llm-stream' && call[0].content
766+
);
767+
expect(streamCalls.length).toBeGreaterThan(5);
768+
}, { timeout: 1000, interval: 50 });
772769
});
773770
});
774771

0 commit comments

Comments
 (0)