@@ -292,6 +292,59 @@ ChunkStats ResultSetRegistry::getChunkStats(int table_id,
292
292
return frag.meta .at (columnId (col_idx))->chunkStats ();
293
293
}
294
294
295
+ TableStats ResultSetRegistry::getTableStats (int table_id) const {
296
+ mapd_shared_lock<mapd_shared_mutex> data_lock (data_mutex_);
297
+ CHECK_EQ (tables_.count (table_id), (size_t )1 );
298
+ auto & table = *tables_.at (table_id);
299
+ mapd_shared_lock<mapd_shared_mutex> table_lock (table.mutex );
300
+ data_lock.unlock ();
301
+
302
+ if (!table.table_stats .empty ()) {
303
+ return table.table_stats ;
304
+ }
305
+
306
+ for (auto & frag : table.fragments ) {
307
+ mapd_shared_lock<mapd_shared_mutex> frag_read_lock (*frag.mutex );
308
+ if (frag.meta .empty ()) {
309
+ frag_read_lock.unlock ();
310
+ mapd_unique_lock<mapd_shared_mutex> frag_write_lock (*frag.mutex );
311
+ if (frag.meta .empty ()) {
312
+ frag.meta = synthesizeMetadata (frag.rs .get ());
313
+ }
314
+ }
315
+ }
316
+
317
+ auto table_stats = buildTableStatsNoLock (table_id);
318
+ table_lock.unlock ();
319
+ mapd_unique_lock<mapd_shared_mutex> table_write_lock (table.mutex );
320
+ if (table.table_stats .empty ()) {
321
+ table.table_stats = table_stats;
322
+ }
323
+ return table_stats;
324
+ }
325
+
326
+ TableStats ResultSetRegistry::buildTableStatsNoLock (int table_id) const {
327
+ // This method is only called when all fragments have computed metadata
328
+ // and table is read-locked.
329
+ CHECK (tables_.count (table_id));
330
+ auto & table = *tables_.at (table_id);
331
+ TableStats table_stats;
332
+ {
333
+ auto & first_frag = table.fragments .front ();
334
+ mapd_shared_lock<mapd_shared_mutex> frag_lock (*first_frag.mutex );
335
+ for (auto & pr : first_frag.meta ) {
336
+ table_stats.emplace (pr.first , pr.second ->chunkStats ());
337
+ }
338
+ }
339
+ for (size_t frag_idx = 1 ; frag_idx < table.fragments .size (); ++frag_idx) {
340
+ mapd_shared_lock<mapd_shared_mutex> frag_lock (*table.fragments [frag_idx].mutex );
341
+ for (auto & pr : table.fragments [frag_idx].meta ) {
342
+ mergeStats (table_stats.at (pr.first ), pr.second ->chunkStats (), pr.second ->type ());
343
+ }
344
+ }
345
+ return table_stats;
346
+ }
347
+
295
348
void ResultSetRegistry::fetchBuffer (const ChunkKey& key,
296
349
Data_Namespace::AbstractBuffer* dest,
297
350
const size_t num_bytes) {
@@ -386,6 +439,7 @@ TableFragmentsInfo ResultSetRegistry::getTableMetadata(int db_id, int table_id)
386
439
387
440
TableFragmentsInfo res;
388
441
res.setPhysicalNumTuples (table.row_count );
442
+ bool has_lazy_stats = false ;
389
443
for (size_t frag_idx = 0 ; frag_idx < table.fragments .size (); ++frag_idx) {
390
444
auto & frag = table.fragments [frag_idx];
391
445
auto & frag_info = res.fragments .emplace_back ();
@@ -416,13 +470,34 @@ TableFragmentsInfo ResultSetRegistry::getTableMetadata(int db_id, int table_id)
416
470
stats = this ->getChunkStats (table_id, frag_idx, col_idx);
417
471
});
418
472
frag_info.setChunkMetadata (columnId (col_idx), meta);
473
+ has_lazy_stats = true ;
419
474
}
420
475
}
421
476
} else {
422
477
frag_info.setChunkMetadataMap (frag.meta );
423
478
}
424
479
}
425
480
481
+ if (table.table_stats .empty ()) {
482
+ if (has_lazy_stats) {
483
+ res.setTableStatsMaterializeFn (
484
+ [this , table_id](TableStats& stats) { stats = this ->getTableStats (table_id); });
485
+ } else {
486
+ // We can get here if all stats were materialized in the loop above.
487
+ // In this case, build and assigne table stats.
488
+ TableStats table_stats = buildTableStatsNoLock (table_id);
489
+ res.setTableStats (table_stats);
490
+
491
+ table_lock.unlock ();
492
+ mapd_unique_lock<mapd_shared_mutex> table_write_lock (table.mutex );
493
+ if (table.table_stats .empty ()) {
494
+ table.table_stats = std::move (table_stats);
495
+ }
496
+ }
497
+ } else {
498
+ res.setTableStats (table.table_stats );
499
+ }
500
+
426
501
return res;
427
502
}
428
503
0 commit comments