@@ -52,7 +52,7 @@ ABSL_FLAG(float, tiered_offload_threshold, 0.5,
5252ABSL_FLAG (float , tiered_upload_threshold, 0.1 ,
5353 " Ratio of free memory (free/max memory) below which uploading stops" );
5454
55- ABSL_FLAG (bool , tiered_experimental_hash_offload , false , " Experimental hash datatype offloading" );
55+ ABSL_FLAG (bool , tiered_experimental_hash_support , false , " Experimental hash datatype offloading" );
5656
5757namespace dfly {
5858
@@ -81,9 +81,12 @@ tiering::DiskSegment FromCoolItem(const PrimeValue::CoolItem& item) {
8181 return {item.record ->page_index * tiering::kPageSize + item.page_offset , item.serialized_size };
8282}
8383
84+ // Determine required byte size and encoding type based on value.
85+ // TODO(vlad): Maybe split into different accessors?
8486// Do NOT enforce rules depending on dynamic runtime values as this is called
8587// when scheduling stash and just before succeeeding and is expected to return the same results
86- optional<std::pair<size_t , CompactObj::ExternalRep>> EstimateSerializedSize (const PrimeValue& pv) {
88+ optional<pair<size_t /* size*/ , CompactObj::ExternalRep>> EstimateSerializedSize (
89+ const PrimeValue& pv) {
8790 switch (pv.ObjType ()) {
8891 case OBJ_STRING:
8992 return std::make_pair (pv.GetRawString ().view ().size (), CompactObj::ExternalRep::STRING);
@@ -111,11 +114,11 @@ size_t Serialize(CompactObj::ExternalRep rep, const PrimeValue& pv, io::MutableB
111114 case CompactObj::ExternalRep::SERIALIZED_MAP: {
112115 DCHECK_EQ (pv.Encoding (), kEncodingListPack );
113116
117+ // TODO(vlad): Optimize copy for serialization
114118 detail::ListpackWrap lw{static_cast <uint8_t *>(pv.RObjPtr ())};
115119 vector<pair<string, string>> entries (lw.begin (), lw.end ());
116- vector<pair<string_view, string_view>> entries_sv (entries.begin (), entries.end ());
117120 return tiering::SerializedMap::Serialize (
118- entries_sv , {reinterpret_cast <char *>(buffer.data ()), buffer.length ()});
121+ entries , {reinterpret_cast <char *>(buffer.data ()), buffer.length ()});
119122 }
120123 };
121124 return 0 ;
@@ -218,13 +221,13 @@ class TieredStorage::ShardOpManager : public tiering::OpManager {
218221 stats->tiered_used_bytes += segment.length ;
219222 stats_.total_stashes ++;
220223
224+ CompactObj::ExternalRep rep = EstimateSerializedSize (*pv)->second ;
221225 if (ts_->config_ .experimental_cooling ) {
222226 RetireColdEntries (pv->MallocUsed ());
223- ts_->CoolDown (key.first , key.second , segment, pv);
227+ ts_->CoolDown (key.first , key.second , segment, rep, pv);
224228 } else {
225229 stats->AddTypeMemoryUsage (pv->ObjType (), -pv->MallocUsed ());
226- auto estimation = EstimateSerializedSize (*pv);
227- pv->SetExternal (segment.offset , segment.length , estimation->second );
230+ pv->SetExternal (segment.offset , segment.length , rep);
228231 }
229232 } else {
230233 LOG (DFATAL) << " Should not reach here" ;
@@ -459,7 +462,7 @@ std::optional<util::fb2::Future<bool>> TieredStorage::TryStash(DbIndex dbid, str
459462 error_code ec;
460463
461464 value->SetStashPending (true );
462- if (true /* OccupiesWholePages(* estimated) */ ) { // large enough for own page
465+ if (OccupiesWholePages (estimated-> first ) ) { // large enough for own page
463466 id = KeyRef (dbid, key);
464467 if (auto prepared = op_manager_->PrepareStash (estimated->first ); prepared) {
465468 auto [offset, buf] = *prepared;
@@ -560,6 +563,7 @@ TieredStats TieredStorage::GetStats() const {
560563 stats.small_bins_cnt = bins_stats.stashed_bins_cnt ;
561564 stats.small_bins_entries_cnt = bins_stats.stashed_entries_cnt ;
562565 stats.small_bins_filling_bytes = bins_stats.current_bin_bytes ;
566+ stats.small_bins_filling_entries_cnt = bins_stats.current_entries_cnt ;
563567 }
564568
565569 { // Own stats
@@ -584,14 +588,14 @@ void TieredStorage::UpdateFromFlags() {
584588 .write_depth_limit = absl::GetFlag (FLAGS_tiered_storage_write_depth),
585589 .offload_threshold = absl::GetFlag (FLAGS_tiered_offload_threshold),
586590 .upload_threshold = absl::GetFlag (FLAGS_tiered_upload_threshold),
587- .experimental_hash_offload = absl::GetFlag (FLAGS_tiered_experimental_hash_offload ),
591+ .experimental_hash_offload = absl::GetFlag (FLAGS_tiered_experimental_hash_support ),
588592 };
589593}
590594
591595std::vector<std::string> TieredStorage::GetMutableFlagNames () {
592596 return base::GetFlagNames (FLAGS_tiered_min_value_size, FLAGS_tiered_experimental_cooling,
593597 FLAGS_tiered_storage_write_depth, FLAGS_tiered_offload_threshold,
594- FLAGS_tiered_upload_threshold, FLAGS_tiered_experimental_hash_offload );
598+ FLAGS_tiered_upload_threshold, FLAGS_tiered_experimental_hash_support );
595599}
596600
597601bool TieredStorage::ShouldOffload () const {
@@ -702,7 +706,8 @@ bool TieredStorage::ShouldStash(const PrimeValue& pv) const {
702706}
703707
704708void TieredStorage::CoolDown (DbIndex db_ind, std::string_view str,
705- const tiering::DiskSegment& segment, PrimeValue* pv) {
709+ const tiering::DiskSegment& segment, CompactObj::ExternalRep rep,
710+ PrimeValue* pv) {
706711 detail::TieredColdRecord* record = CompactObj::AllocateMR<detail::TieredColdRecord>();
707712 cool_queue_.push_front (*record);
708713 stats_.cool_memory_used += (sizeof (detail::TieredColdRecord) + pv->MallocUsed ());
@@ -712,7 +717,7 @@ void TieredStorage::CoolDown(DbIndex db_ind, std::string_view str,
712717 record->page_index = segment.offset / tiering::kPageSize ;
713718 record->value = std::move (*pv);
714719
715- pv->SetCool (segment.offset , segment.length , record);
720+ pv->SetCool (segment.offset , segment.length , rep, record);
716721}
717722
718723PrimeValue TieredStorage::Warmup (DbIndex dbid, PrimeValue::CoolItem item) {
@@ -721,10 +726,6 @@ PrimeValue TieredStorage::Warmup(DbIndex dbid, PrimeValue::CoolItem item) {
721726 // We remove it from both cool storage and the offline storage.
722727 PrimeValue hot = DeleteCool (item.record );
723728 op_manager_->DeleteOffloaded (dbid, segment);
724-
725- // Bring it back to the PrimeTable.
726- DCHECK (hot.ObjType () == OBJ_STRING);
727-
728729 return hot;
729730}
730731
0 commit comments