Skip to content
This repository was archived by the owner on Jun 24, 2025. It is now read-only.

Commit 1dce202

Browse files
committed
test(server): try to reduce number to avoid CI crashing
1 parent 2d5754c commit 1dce202

File tree

1 file changed

+12
-12
lines changed

1 file changed

+12
-12
lines changed

apps/server/src/services/llm/providers/integration/streaming.spec.ts

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ describe('Provider Streaming Integration Tests', () => {
7070
expect(result.completeText).toBe('Hello world!');
7171
expect(result.chunkCount).toBe(4);
7272
expect(receivedChunks.length).toBeGreaterThan(0);
73-
73+
7474
// Verify callback received content chunks
7575
const contentChunks = receivedChunks.filter(c => c.text);
7676
expect(contentChunks.length).toBe(3);
@@ -101,10 +101,10 @@ describe('Provider Streaming Integration Tests', () => {
101101
choices: [{ delta: { content: 'The answer is 4' } }],
102102
model: 'gpt-3.5-turbo'
103103
},
104-
{
104+
{
105105
choices: [{ finish_reason: 'stop' }],
106106
model: 'gpt-3.5-turbo',
107-
done: true
107+
done: true
108108
}
109109
];
110110

@@ -174,7 +174,7 @@ describe('Provider Streaming Integration Tests', () => {
174174

175175
expect(result.completeText).toBe('The weather today is sunny.');
176176
expect(result.chunkCount).toBe(4);
177-
177+
178178
// Verify final chunk has usage stats
179179
expect(result.finalChunk.prompt_eval_count).toBe(15);
180180
expect(result.finalChunk.eval_count).toBe(8);
@@ -319,7 +319,7 @@ describe('Provider Streaming Integration Tests', () => {
319319
);
320320

321321
expect(result.completeText).toBe('Based on my analysis, the answer is 42.');
322-
322+
323323
// Verify thinking states were captured
324324
const thinkingChunks = receivedChunks.filter(c => c.chunk?.message?.thinking);
325325
expect(thinkingChunks.length).toBe(2);
@@ -332,7 +332,7 @@ describe('Provider Streaming Integration Tests', () => {
332332
async *[Symbol.asyncIterator]() {
333333
yield { message: { content: 'Starting...' } };
334334
// Simulate timeout
335-
await new Promise((_, reject) =>
335+
await new Promise((_, reject) =>
336336
setTimeout(() => reject(new Error('Request timeout')), 100)
337337
);
338338
}
@@ -481,15 +481,15 @@ describe('Provider Streaming Integration Tests', () => {
481481
describe('Memory Management', () => {
482482
it('should not leak memory during long streaming sessions', async () => {
483483
// Reduced chunk count for CI stability - still tests memory management
484-
const chunkCount = 1000; // Reduced from 10000
484+
const chunkCount = 500; // Reduced from 10000
485485
const longSessionIterator = {
486486
async *[Symbol.asyncIterator]() {
487487
for (let i = 0; i < chunkCount; i++) {
488488
yield {
489489
message: { content: `Chunk ${i} with some additional content to increase memory usage` },
490490
done: i === (chunkCount - 1)
491491
};
492-
492+
493493
// Periodic yield to event loop to prevent blocking
494494
if (i % 50 === 0) { // More frequent yields for shorter test
495495
await new Promise(resolve => setImmediate(resolve));
@@ -499,16 +499,16 @@ describe('Provider Streaming Integration Tests', () => {
499499
};
500500

501501
const initialMemory = process.memoryUsage();
502-
502+
503503
const result = await processProviderStream(
504504
longSessionIterator,
505505
mockProviderOptions
506506
);
507507

508508
const finalMemory = process.memoryUsage();
509-
509+
510510
expect(result.chunkCount).toBe(chunkCount);
511-
511+
512512
// Memory increase should be reasonable (less than 20MB for smaller test)
513513
const memoryIncrease = finalMemory.heapUsed - initialMemory.heapUsed;
514514
expect(memoryIncrease).toBeLessThan(20 * 1024 * 1024);
@@ -581,4 +581,4 @@ describe('Provider Streaming Integration Tests', () => {
581581
)).rejects.toThrow('Invalid stream iterator');
582582
});
583583
});
584-
});
584+
});

0 commit comments

Comments
 (0)