@@ -17,26 +17,19 @@ async function* fileContentStreamer(
1717 localFilePath : string ,
1818 chunkSize : number = DEFAULT_CHUNK_SIZE ,
1919) : AsyncGenerator < Buffer , void , void > {
20- let fd : fs . promises . FileHandle | undefined ;
20+ const fd = await fs . promises . open ( localFilePath , 'r' ) ;
2121 try {
22- // Open the file for reading.
23- fd = await fs . promises . open ( localFilePath , 'r' ) ;
2422 const buffer = Buffer . alloc ( chunkSize ) ;
2523 while ( true ) {
26- // Read a chunk from the file into our buffer.
2724 const { bytesRead } = await fd . read ( buffer , 0 , chunkSize , null ) ;
2825 if ( bytesRead === 0 ) {
29- // No more bytes to read, end of file.
3026 break ;
3127 }
32- // Yield only the portion of the buffer that contains actual data.
3328 yield buffer . subarray ( 0 , bytesRead ) ;
3429 }
3530 } finally {
36- // Crucially, ensure the file handle is closed, even if errors occur.
37- if ( fd ) {
38- await fd . close ( ) ;
39- }
31+ await fd . close ( ) ;
32+ console . log ( ` -> Closed read handle for: ${ path . basename ( localFilePath ) } ` ) ;
4033 }
4134}
4235
@@ -104,7 +97,6 @@ async function* streamDirectoryAsTar(
10497 // to the VirtualTarGenerator instance.
10598 async function walkAndTar ( currentFsPath : string , currentArchivePath : string ) {
10699 const entries = await fs . promises . readdir ( currentFsPath , { withFileTypes : true } ) ;
107- // Using Promise.all to handle entries in parallel, which can be more efficient.
108100 await Promise . all (
109101 entries . map ( async ( entry ) => {
110102 const fullFsPath = path . join ( currentFsPath , entry . name ) ;
0 commit comments