11import { MongoLogManager , mongoLogId } from '.' ;
22import { ObjectId } from 'bson' ;
33import { once } from 'events' ;
4+ import type { Stats , Dir } from 'fs' ;
45import { promises as fs } from 'fs' ;
56import path from 'path' ;
67import os from 'os' ;
@@ -27,6 +28,7 @@ describe('MongoLogManager', function () {
2728 } ) ;
2829 afterEach ( async function ( ) {
2930 await fs . rmdir ( directory , { recursive : true } ) ;
31+ sinon . restore ( ) ;
3032 } ) ;
3133
3234 it ( 'allows creating and writing to log files' , async function ( ) {
@@ -86,6 +88,19 @@ describe('MongoLogManager', function () {
8688 }
8789 } ) ;
8890
91+ const getFilesState = async ( paths : string [ ] ) => {
92+ return (
93+ await Promise . all (
94+ paths . map ( ( path ) =>
95+ fs . stat ( path ) . then (
96+ ( ) => 1 ,
97+ ( ) => 0
98+ )
99+ )
100+ )
101+ ) . join ( '' ) ;
102+ } ;
103+
89104 it ( 'cleans up least recent log files when requested' , async function ( ) {
90105 const manager = new MongoLogManager ( {
91106 directory,
@@ -106,21 +121,256 @@ describe('MongoLogManager', function () {
106121 paths . unshift ( filename ) ;
107122 }
108123
109- const getFiles = async ( ) => {
110- return (
111- await Promise . all (
112- paths . map ( ( path ) =>
113- fs . stat ( path ) . then (
114- ( ) => 1 ,
115- ( ) => 0
116- )
117- )
118- )
119- ) . join ( '' ) ;
120- } ;
121- expect ( await getFiles ( ) ) . to . equal ( '1111111111' ) ;
124+ expect ( await getFilesState ( paths ) ) . to . equal ( '1111111111' ) ;
122125 await manager . cleanupOldLogFiles ( ) ;
123- expect ( await getFiles ( ) ) . to . equal ( '0000011111' ) ;
126+ expect ( await getFilesState ( paths ) ) . to . equal ( '0000011111' ) ;
127+ } ) ;
128+
129+ it ( 'if fs.stat fails, it errors and is not considered towards the logs limit' , async function ( ) {
130+ const manager = new MongoLogManager ( {
131+ directory,
132+ retentionDays,
133+ retentionGB : 3 ,
134+ onwarn,
135+ onerror,
136+ } ) ;
137+
138+ const offset = Math . floor ( Date . now ( ) / 1000 ) ;
139+
140+ const faultyFile = path . join (
141+ directory ,
142+ ObjectId . createFromTime ( offset - 10 ) . toHexString ( ) + '_log'
143+ ) ;
144+ await fs . writeFile ( faultyFile , '' ) ;
145+
146+ const faultyFileError = new Error ( 'test error' ) ;
147+
148+ const validFiles : string [ ] = [ ] ;
149+ // Create 5 valid files.
150+ for ( let i = 5 ; i >= 0 ; i -- ) {
151+ const filename = path . join (
152+ directory ,
153+ ObjectId . createFromTime ( offset - i ) . toHexString ( ) + '_log'
154+ ) ;
155+ await fs . writeFile ( filename , '' ) ;
156+ validFiles . push ( filename ) ;
157+ }
158+
159+ expect ( onerror ) . not . called ;
160+
161+ const fsStatStub = sinon . stub ( fs , 'stat' ) ;
162+
163+ fsStatStub . resolves ( {
164+ size : 1024 * 1024 * 1024 ,
165+ } as Stats ) ;
166+ fsStatStub . withArgs ( faultyFile ) . rejects ( faultyFileError ) ;
167+
168+ await manager . cleanupOldLogFiles ( ) ;
169+
170+ expect ( onerror ) . calledOnceWithExactly ( faultyFileError , faultyFile ) ;
171+
172+ // fs.stat is stubbed so getFilesState will not be accurate.
173+ const leftoverFiles = ( await fs . readdir ( directory ) )
174+ . sort ( )
175+ . map ( ( file ) => path . join ( directory , file ) ) ;
176+
177+ expect ( leftoverFiles ) . to . have . lengthOf ( 4 ) ;
178+ expect ( leftoverFiles ) . deep . equals ( [ faultyFile , ...validFiles . slice ( 3 ) ] ) ;
179+ } ) ;
180+
181+ it ( 'cleans up least recent log files when over a storage limit' , async function ( ) {
182+ const manager = new MongoLogManager ( {
183+ directory,
184+ retentionDays,
185+ maxLogFileCount : 1000 ,
186+ // 6 KB
187+ retentionGB : 6 / 1024 / 1024 ,
188+ onwarn,
189+ onerror,
190+ } ) ;
191+
192+ const paths : string [ ] = [ ] ;
193+ const offset = Math . floor ( Date . now ( ) / 1000 ) ;
194+
195+ // Create 10 files of 1 KB each.
196+ for ( let i = 0 ; i < 10 ; i ++ ) {
197+ const filename = path . join (
198+ directory ,
199+ ObjectId . createFromTime ( offset - i ) . toHexString ( ) + '_log'
200+ ) ;
201+ await fs . writeFile ( filename , '0' . repeat ( 1024 ) ) ;
202+ paths . unshift ( filename ) ;
203+ }
204+
205+ expect ( await getFilesState ( paths ) ) . to . equal ( '1111111111' ) ;
206+ await manager . cleanupOldLogFiles ( ) ;
207+ expect ( await getFilesState ( paths ) ) . to . equal ( '0000111111' ) ;
208+ } ) ;
209+
210+ describe ( 'with a random file order' , function ( ) {
211+ let paths : string [ ] = [ ] ;
212+ const times = [ 92 , 90 , 1 , 2 , 3 , 91 ] ;
213+
214+ beforeEach ( async function ( ) {
215+ const fileNames : string [ ] = [ ] ;
216+ paths = [ ] ;
217+ const offset = Math . floor ( Date . now ( ) / 1000 ) ;
218+
219+ for ( const time of times ) {
220+ const fileName =
221+ ObjectId . createFromTime ( offset - time ) . toHexString ( ) + '_log' ;
222+ const fullPath = path . join ( directory , fileName ) ;
223+ await fs . writeFile ( fullPath , '0' . repeat ( 1024 ) ) ;
224+ fileNames . push ( fileName ) ;
225+ paths . push ( fullPath ) ;
226+ }
227+
228+ sinon . replace ( fs , 'opendir' , async ( ) =>
229+ Promise . resolve ( {
230+ [ Symbol . asyncIterator ] : function * ( ) {
231+ for ( const fileName of fileNames ) {
232+ yield {
233+ name : fileName ,
234+ isFile : ( ) => true ,
235+ } ;
236+ }
237+ } ,
238+ } as unknown as Dir )
239+ ) ;
240+ } ) ;
241+
242+ it ( 'cleans up in the expected order with maxLogFileCount' , async function ( ) {
243+ const manager = new MongoLogManager ( {
244+ directory,
245+ retentionDays,
246+ maxLogFileCount : 3 ,
247+ onwarn,
248+ onerror,
249+ } ) ;
250+
251+ expect ( await getFilesState ( paths ) ) . to . equal ( '111111' ) ;
252+
253+ await manager . cleanupOldLogFiles ( ) ;
254+
255+ expect ( await getFilesState ( paths ) ) . to . equal ( '001110' ) ;
256+ } ) ;
257+
258+ it ( 'cleans up in the expected order with retentionGB' , async function ( ) {
259+ const manager = new MongoLogManager ( {
260+ directory,
261+ retentionDays,
262+ retentionGB : 3 / 1024 / 1024 ,
263+ onwarn,
264+ onerror,
265+ } ) ;
266+
267+ expect ( await getFilesState ( paths ) ) . to . equal ( '111111' ) ;
268+
269+ await manager . cleanupOldLogFiles ( ) ;
270+
271+ expect ( await getFilesState ( paths ) ) . to . equal ( '001110' ) ;
272+ } ) ;
273+ } ) ;
274+
275+ describe ( 'with multiple log retention settings' , function ( ) {
276+ it ( 'with retention days, file count, and max size maintains all conditions' , async function ( ) {
277+ const manager = new MongoLogManager ( {
278+ directory,
279+ retentionDays : 1 ,
280+ maxLogFileCount : 3 ,
281+ retentionGB : 2 / 1024 / 1024 ,
282+ onwarn,
283+ onerror,
284+ } ) ;
285+
286+ const paths : string [ ] = [ ] ;
287+
288+ // Create 4 files which are all older than 1 day and 4 which are from today.
289+ for ( let i = 0 ; i < 4 ; i ++ ) {
290+ const today = Math . floor ( Date . now ( ) / 1000 ) ;
291+ const yesterday = today - 25 * 60 * 60 ;
292+ const todayFile = path . join (
293+ directory ,
294+ ObjectId . createFromTime ( today - i ) . toHexString ( ) + '_log'
295+ ) ;
296+ await fs . writeFile ( todayFile , '0' . repeat ( 1024 ) ) ;
297+
298+ const yesterdayFile = path . join (
299+ directory ,
300+ ObjectId . createFromTime ( yesterday - i ) . toHexString ( ) + '_log'
301+ ) ;
302+ await fs . writeFile ( yesterdayFile , '0' . repeat ( 1024 ) ) ;
303+
304+ paths . unshift ( todayFile ) ;
305+ paths . unshift ( yesterdayFile ) ;
306+ }
307+
308+ expect ( await getFilesState ( paths ) ) . to . equal ( '11111111' ) ;
309+
310+ await manager . cleanupOldLogFiles ( ) ;
311+
312+ // All yesterdays files, 2 of today's files should be deleted.
313+ // (because of file count and file size)
314+ expect ( await getFilesState ( paths ) ) . to . equal ( '00000101' ) ;
315+ } ) ;
316+
317+ it ( 'with low GB but high file count maintains both conditions' , async function ( ) {
318+ const manager = new MongoLogManager ( {
319+ directory,
320+ retentionDays,
321+ maxLogFileCount : 3 ,
322+ // 2 KB, so 2 files
323+ retentionGB : 2 / 1024 / 1024 ,
324+ onwarn,
325+ onerror,
326+ } ) ;
327+
328+ const paths : string [ ] = [ ] ;
329+ const offset = Math . floor ( Date . now ( ) / 1000 ) ;
330+
331+ // Create 10 files of 1 KB each.
332+ for ( let i = 0 ; i < 10 ; i ++ ) {
333+ const filename = path . join (
334+ directory ,
335+ ObjectId . createFromTime ( offset - i ) . toHexString ( ) + '_log'
336+ ) ;
337+ await fs . writeFile ( filename , '0' . repeat ( 1024 ) ) ;
338+ paths . unshift ( filename ) ;
339+ }
340+
341+ expect ( await getFilesState ( paths ) ) . to . equal ( '1111111111' ) ;
342+ await manager . cleanupOldLogFiles ( ) ;
343+ expect ( await getFilesState ( paths ) ) . to . equal ( '0000000011' ) ;
344+ } ) ;
345+
346+ it ( 'with high GB but low file count maintains both conditions' , async function ( ) {
347+ const manager = new MongoLogManager ( {
348+ directory,
349+ retentionDays,
350+ maxLogFileCount : 2 ,
351+ // 3 KB, so 3 files
352+ retentionGB : 3 / 1024 / 1024 ,
353+ onwarn,
354+ onerror,
355+ } ) ;
356+
357+ const paths : string [ ] = [ ] ;
358+ const offset = Math . floor ( Date . now ( ) / 1000 ) ;
359+
360+ // Create 10 files of 1 KB each.
361+ for ( let i = 0 ; i < 10 ; i ++ ) {
362+ const filename = path . join (
363+ directory ,
364+ ObjectId . createFromTime ( offset - i ) . toHexString ( ) + '_log'
365+ ) ;
366+ await fs . writeFile ( filename , '0' . repeat ( 1024 ) ) ;
367+ paths . unshift ( filename ) ;
368+ }
369+
370+ expect ( await getFilesState ( paths ) ) . to . equal ( '1111111111' ) ;
371+ await manager . cleanupOldLogFiles ( ) ;
372+ expect ( await getFilesState ( paths ) ) . to . equal ( '0000000011' ) ;
373+ } ) ;
124374 } ) ;
125375
126376 it ( 'cleaning up old log files is a no-op by default' , async function ( ) {
0 commit comments