@@ -224,7 +224,7 @@ function generateCsvReadSql(options: CsvReadOptions): string {
224224 // Add debug_node column for multi-node tables (per-node data like node_queries)
225225 // Skip debug_node for single-file tables (cluster-wide data like cluster_settings)
226226 const selectClause = nodeId !== undefined
227- ? `SELECT ${ nodeId } AS debug_node, * FROM read_csv('${ fileName } ', ${ csvParams } )`
227+ ? `SELECT CAST( ${ nodeId } AS INTEGER) AS debug_node, * FROM read_csv('${ fileName } ', ${ csvParams } )`
228228 : `SELECT * FROM read_csv('${ fileName } ', ${ csvParams } )` ;
229229
230230 if ( operation === 'create' ) {
@@ -270,7 +270,7 @@ async function loadLargeFileIncrementally(
270270 FROM information_schema.tables
271271 WHERE table_name = '${ tableName } ' AND table_schema = 'main'
272272 ` ) ;
273- tableExists = checkResult . toArray ( ) [ 0 ] . count > 0 ;
273+ tableExists = Number ( checkResult . toArray ( ) [ 0 ] . count ) > 0 ;
274274 } else {
275275 // Drop table if exists (only for non-multi-node tables)
276276 await conn . query ( `DROP TABLE IF EXISTS ${ quotedTableName } ` ) ;
@@ -482,18 +482,18 @@ async function loadLargeFileIncrementally(
482482 const countBeforeResult = await conn . query (
483483 `SELECT COUNT(*) as count FROM ${ quotedTableName } WHERE debug_node != ${ nodeId } ` ,
484484 ) ;
485- const countBefore = countBeforeResult . toArray ( ) [ 0 ] . count ;
485+ const countBefore = Number ( countBeforeResult . toArray ( ) [ 0 ] . count ) ;
486486 const countAfterResult = await conn . query (
487487 `SELECT COUNT(*) as count FROM ${ quotedTableName } ` ,
488488 ) ;
489- const countAfter = countAfterResult . toArray ( ) [ 0 ] . count ;
489+ const countAfter = Number ( countAfterResult . toArray ( ) [ 0 ] . count ) ;
490490 finalRowCount = countAfter - countBefore ;
491491 } else {
492492 // For CREATE operations or single-node tables, get total count
493493 const countResult = await conn . query (
494494 `SELECT COUNT(*) as count FROM ${ quotedTableName } ` ,
495495 ) ;
496- finalRowCount = countResult . toArray ( ) [ 0 ] . count ;
496+ finalRowCount = Number ( countResult . toArray ( ) [ 0 ] . count ) ;
497497 }
498498
499499 console . log ( `✅ Successfully loaded large table with ${ finalRowCount } rows` ) ;
@@ -798,8 +798,24 @@ async function startTableLoading(message: StartTableLoadingMessage) {
798798}
799799
800800async function loadSingleTableFromMessage ( message : LoadSingleTableMessage ) {
801- const { table } = message ;
802- await loadSingleTable ( table ) ;
801+ const { table, id } = message ;
802+ try {
803+ await loadSingleTable ( table ) ;
804+ // Send success response
805+ sendResponse ( message , {
806+ type : "loadSingleTableComplete" ,
807+ id,
808+ success : true ,
809+ } ) ;
810+ } catch ( error ) {
811+ // Send error response
812+ sendResponse ( message , {
813+ type : "loadSingleTableComplete" ,
814+ id,
815+ success : false ,
816+ error : error instanceof Error ? error . message : "Unknown error" ,
817+ } ) ;
818+ }
803819}
804820
805821interface TableInfo {
@@ -852,20 +868,75 @@ async function loadSingleTable(table: TableInfo) {
852868 size,
853869 } ) ;
854870
855- // Handle multi-node tables
871+ // Handle multi-node tables with sliding window parallel loading
856872 if ( nodeFiles && nodeFiles . length > 0 ) {
857-
858873 let totalRowCount = 0 ;
859- for ( const nodeFile of nodeFiles ) {
860- // Skip error files - don't try to load them into DuckDB
861- if ( nodeFile . isError ) {
862- continue ;
874+ let filesProcessed = 0 ;
875+ const totalFiles = nodeFiles . filter ( f => ! f . isError ) . length ;
876+ const validFiles = nodeFiles . filter ( f => ! f . isError ) ;
877+
878+ // Sliding window: limit by file count AND byte size
879+ const MAX_QUEUED_FILES = 20 ; // Max files in-flight
880+ const MAX_QUEUED_BYTES = 8 * 1024 * 1024 ; // Max 8MB of compressed data in-flight
881+ const pendingPromises : Array < Promise < { nodeFile : typeof validFiles [ 0 ] , response : any } > > = [ ] ;
882+ let nextFileIndex = 0 ;
883+ let queuedBytes = 0 ;
884+
885+ // Fill initial window (stop when either limit is reached)
886+ while (
887+ nextFileIndex < validFiles . length &&
888+ pendingPromises . length < MAX_QUEUED_FILES &&
889+ queuedBytes < MAX_QUEUED_BYTES
890+ ) {
891+ const nodeFile = validFiles [ nextFileIndex ] ;
892+ pendingPromises . push (
893+ sendMessageToZipWorker ( {
894+ type : "readFileChunked" ,
895+ path : nodeFile . path ,
896+ } ) . then ( response => ( { nodeFile, response } ) )
897+ ) ;
898+ queuedBytes += nodeFile . size ;
899+ nextFileIndex ++ ;
900+ }
901+
902+ // Process files as they complete, maintaining the window
903+ while ( pendingPromises . length > 0 ) {
904+ // Wait for first promise to complete (FIFO order)
905+ const { nodeFile, response : fileResponse } = await pendingPromises . shift ( ) ! ;
906+
907+ filesProcessed ++ ;
908+ queuedBytes -= nodeFile . size ; // Remove completed file from queue size
909+
910+ // Queue more files to maintain thresholds (stop when either limit is reached)
911+ while (
912+ nextFileIndex < validFiles . length &&
913+ pendingPromises . length < MAX_QUEUED_FILES &&
914+ queuedBytes < MAX_QUEUED_BYTES
915+ ) {
916+ const nextFile = validFiles [ nextFileIndex ] ;
917+ pendingPromises . push (
918+ sendMessageToZipWorker ( {
919+ type : "readFileChunked" ,
920+ path : nextFile . path ,
921+ } ) . then ( response => ( { nodeFile : nextFile , response } ) )
922+ ) ;
923+ queuedBytes += nextFile . size ;
924+ nextFileIndex ++ ;
863925 }
864926
865- // Request file from zip worker
866- const fileResponse = await sendMessageToZipWorker ( {
867- type : "readFileChunked" ,
868- path : nodeFile . path ,
927+ // Send progress update
928+ self . postMessage ( {
929+ type : "tableLoadProgress" ,
930+ tableName,
931+ status : "loading" ,
932+ nodeId,
933+ originalName,
934+ isError,
935+ fileProgress : {
936+ current : filesProcessed ,
937+ total : totalFiles ,
938+ percentage : Math . round ( ( filesProcessed / totalFiles ) * 100 ) ,
939+ } ,
869940 } ) ;
870941
871942 if ( ! fileResponse . success ) {
@@ -897,7 +968,7 @@ async function loadSingleTable(table: TableInfo) {
897968 ) ;
898969 totalRowCount += rowCount ;
899970 } else {
900- // Load from text
971+ // Load from text - DuckDB operations are sequential, but zip worker stays busy
901972 const rowCount = await loadTableFromText (
902973 tableName ,
903974 text ,
@@ -931,7 +1002,7 @@ async function loadSingleTable(table: TableInfo) {
9311002 const countResult = await conn . query (
9321003 `SELECT COUNT(*) as count FROM ${ quotedTableName } ` ,
9331004 ) ;
934- const rowCount = countResult . toArray ( ) [ 0 ] . count ;
1005+ const rowCount = Number ( countResult . toArray ( ) [ 0 ] . count ) ;
9351006
9361007 self . postMessage ( {
9371008 type : "tableLoadProgress" ,
@@ -1053,7 +1124,7 @@ async function loadTableFromText(
10531124 const countResult = await conn . query (
10541125 `SELECT COUNT(*) as count FROM ${ quotedTableName } ` ,
10551126 ) ;
1056- return countResult . toArray ( ) [ 0 ] . count ;
1127+ return Number ( countResult . toArray ( ) [ 0 ] . count ) ;
10571128 }
10581129
10591130 try {
@@ -1102,7 +1173,7 @@ async function loadTableFromText(
11021173 FROM information_schema.tables
11031174 WHERE table_name = '${ tableName } ' AND table_schema = 'main'
11041175 ` ) ;
1105- tableExists = checkResult . toArray ( ) [ 0 ] . count > 0 ;
1176+ tableExists = Number ( checkResult . toArray ( ) [ 0 ] . count ) > 0 ;
11061177 }
11071178
11081179 // Create table from CSV with auto-detection or explicit types
@@ -1232,7 +1303,7 @@ async function loadTableFromText(
12321303 const countResult = await conn . query (
12331304 `SELECT COUNT(*) as count FROM ${ quotedTableName } ` ,
12341305 ) ;
1235- count = countResult . toArray ( ) [ 0 ] . count ;
1306+ count = Number ( countResult . toArray ( ) [ 0 ] . count ) ;
12361307 }
12371308
12381309 loadedTables . add ( tableName ) ;
0 commit comments