@@ -485,15 +485,15 @@ int64_t TieredStorage::UploadBudget() const {
485485}
486486
487487void TieredStorage::RunOffloading (DbIndex dbid) {
488- const size_t kMaxIterations = 500 ;
489-
488+ using namespace tiering ::literals;
490489 if (SliceSnapshot::IsSnaphotInProgress ())
491490 return ;
492491
492+ const auto start_cycles = base::CycleClock::Now ();
493+
493494 // Don't run offloading if there's only very little space left
494495 auto disk_stats = op_manager_->GetStats ().disk_stats ;
495- if (disk_stats.allocated_bytes + kMaxIterations / 2 * tiering::kPageSize >
496- disk_stats.max_file_size )
496+ if (disk_stats.allocated_bytes + 1_MB > disk_stats.max_file_size )
497497 return ;
498498
499499 string tmp;
@@ -511,14 +511,19 @@ void TieredStorage::RunOffloading(DbIndex dbid) {
511511
512512 PrimeTable& table = op_manager_->db_slice_ .GetDBTable (dbid)->prime ;
513513
514- // Loop while we haven't traversed all entries or reached our stash io device limit.
515- // Keep number of iterations below resonable limit to keep datastore always responsive
516- size_t iterations = 0 ;
514+ // Loop over entry with time and max stash budget.
515+ uint64_t cycles = 0 ;
517516 do {
517+ offloading_cursor_ = table.TraverseBySegmentOrder (offloading_cursor_, cb);
518+
518519 if (op_manager_->GetStats ().pending_stash_cnt >= config_.write_depth_limit )
519520 break ;
520- offloading_cursor_ = table.TraverseBySegmentOrder (offloading_cursor_, cb);
521- } while (offloading_cursor_ && iterations++ < kMaxIterations );
521+
522+ // TODO: yield as background fiber to perform more work on idle
523+ cycles = base::CycleClock::Now () - start_cycles;
524+ if (base::CycleClock::ToUsec (cycles) >= 100 )
525+ break ;
526+ } while (offloading_cursor_);
522527}
523528
524529size_t TieredStorage::ReclaimMemory (size_t goal) {
0 commit comments