@@ -413,10 +413,12 @@ class Segment {
413
413
414
414
// Returns (iterator, true) if insert succeeds,
415
415
// (iterator, false) for duplicate and (invalid-iterator, false) if it's full
416
- template <typename K, typename V, typename Pred>
417
- std::pair<Iterator, bool > Insert (K&& key, V&& value, Hash_t key_hash, Pred&& pred);
416
+ template <typename K, typename V, typename Pred, typename OnMoveCb>
417
+ std::pair<Iterator, bool > Insert (K&& key, V&& value, Hash_t key_hash, Pred&& pred,
418
+ OnMoveCb&& on_move_cb);
418
419
419
- template <typename HashFn> void Split (HashFn&& hfunc, Segment* dest);
420
+ template <typename HashFn, typename OnMoveCb>
421
+ void Split (HashFn&& hfunc, Segment* dest, OnMoveCb&& on_move_cb);
420
422
421
423
void Delete (const Iterator& it, Hash_t key_hash);
422
424
@@ -523,8 +525,8 @@ class Segment {
523
525
// otherwise chooses home bucket first.
524
526
// TODO: I am actually not sure if spread optimization is helpful. Worth checking
525
527
// whether we get higher occupancy rates when using it.
526
- template <typename U, typename V>
527
- Iterator InsertUniq (U&& key, V&& value, Hash_t key_hash, bool spread);
528
+ template <typename U, typename V, typename OnMoveCb >
529
+ Iterator InsertUniq (U&& key, V&& value, Hash_t key_hash, bool spread, OnMoveCb&& on_move_cb );
528
530
529
531
// capture version change in case of insert.
530
532
// Returns ids of buckets whose version would cross ver_threshold upon insertion of key_hash
@@ -558,19 +560,25 @@ class Segment {
558
560
}
559
561
560
562
// Bumps up this entry making it more "important" for the eviction policy.
561
- template <typename BumpPolicy>
562
- Iterator BumpUp (PhysicalBid bid, SlotId slot, Hash_t key_hash, const BumpPolicy& ev);
563
+ template <typename BumpPolicy, typename OnMoveCb>
564
+ Iterator BumpUp (PhysicalBid bid, SlotId slot, Hash_t key_hash, const BumpPolicy& ev,
565
+ OnMoveCb&& cb);
563
566
564
567
// Tries to move stash entries back to their normal buckets (exact or neighbour).
565
568
// Returns number of entries that succeeded to unload.
566
569
// Important! Affects versions of the moved items and the items in the destination
567
570
// buckets.
568
- template <typename HFunc> unsigned UnloadStash (HFunc&& hfunc);
571
+ template <typename HFunc, typename OnMoveCb > unsigned UnloadStash (HFunc&& hfunc, OnMoveCb&& cb );
569
572
570
573
unsigned num_buckets () const {
571
574
return kBucketNum + kStashBucketNum ;
572
575
}
573
576
577
+ // needed only when DashTable grows its segment table.
578
+ void set_segment_id (uint32_t new_id) {
579
+ segment_id_ = new_id;
580
+ }
581
+
574
582
private:
575
583
static_assert (sizeof (Iterator) == 2 );
576
584
@@ -1084,15 +1092,16 @@ auto Segment<Key, Value, Policy>::TryMoveFromStash(unsigned stash_id, unsigned s
1084
1092
}
1085
1093
1086
1094
template <typename Key, typename Value, typename Policy>
1087
- template <typename U, typename V, typename Pred>
1088
- auto Segment<Key, Value, Policy>::Insert(U&& key, V&& value, Hash_t key_hash, Pred&& pred)
1089
- -> std::pair<Iterator, bool > {
1095
+ template <typename U, typename V, typename Pred, typename OnMoveCb >
1096
+ auto Segment<Key, Value, Policy>::Insert(U&& key, V&& value, Hash_t key_hash, Pred&& pred,
1097
+ OnMoveCb&& on_move_cb) -> std::pair<Iterator, bool > {
1090
1098
Iterator it = FindIt (key_hash, pred);
1091
1099
if (it.found ()) {
1092
1100
return std::make_pair (it, false ); /* duplicate insert*/
1093
1101
}
1094
1102
1095
- it = InsertUniq (std::forward<U>(key), std::forward<V>(value), key_hash, true );
1103
+ it = InsertUniq (std::forward<U>(key), std::forward<V>(value), key_hash, true ,
1104
+ std::forward<OnMoveCb>(on_move_cb));
1096
1105
1097
1106
return std::make_pair (it, it.found ());
1098
1107
}
@@ -1210,8 +1219,8 @@ void Segment<Key, Value, Policy>::Delete(const Iterator& it, Hash_t key_hash) {
1210
1219
// Split items from the left segment to the right during the growth phase.
1211
1220
// right segment will have all the items with lsb at local_depth ==1 .
1212
1221
template <typename Key, typename Value, typename Policy>
1213
- template <typename HFunc>
1214
- void Segment<Key, Value, Policy>::Split(HFunc&& hfn, Segment* dest_right) {
1222
+ template <typename HFunc, typename MoveCb >
1223
+ void Segment<Key, Value, Policy>::Split(HFunc&& hfn, Segment* dest_right, MoveCb&& on_move_cb ) {
1215
1224
++local_depth_;
1216
1225
dest_right->local_depth_ = local_depth_;
1217
1226
@@ -1243,8 +1252,11 @@ void Segment<Key, Value, Policy>::Split(HFunc&& hfn, Segment* dest_right) {
1243
1252
1244
1253
invalid_mask |= (1u << slot);
1245
1254
1255
+ // We pass dummy callback because we are not interested to track movements in the newly
1256
+ // created segment.
1246
1257
Iterator it = dest_right->InsertUniq (std::forward<Key_t>(bucket->key [slot]),
1247
- std::forward<Value_t>(bucket->value [slot]), hash, false );
1258
+ std::forward<Value_t>(bucket->value [slot]), hash, false ,
1259
+ [](auto &&...) {});
1248
1260
1249
1261
// we move items residing in a regular bucket to a new segment.
1250
1262
// Note 1: in case we are somehow attacked with items that after the split
@@ -1266,6 +1278,7 @@ void Segment<Key, Value, Policy>::Split(HFunc&& hfn, Segment* dest_right) {
1266
1278
// selective bias will be able to hit our dashtable with items with the same bucket id.
1267
1279
assert (it.found ());
1268
1280
update_version (*bucket, it.index );
1281
+ on_move_cb (segment_id_, i, dest_right->segment_id_ , it.index );
1269
1282
};
1270
1283
1271
1284
bucket_[i].ForEachSlot (std::move (cb));
@@ -1286,17 +1299,20 @@ void Segment<Key, Value, Policy>::Split(HFunc&& hfn, Segment* dest_right) {
1286
1299
Iterator it = TryMoveFromStash (i, slot, hash);
1287
1300
if (it.found ()) {
1288
1301
invalid_mask |= (1u << slot);
1302
+ on_move_cb (segment_id_, i, segment_id_, it.index );
1289
1303
}
1290
1304
1291
1305
return ;
1292
1306
}
1293
1307
1294
1308
invalid_mask |= (1u << slot);
1295
1309
auto it = dest_right->InsertUniq (std::forward<Key_t>(bucket->key [slot]),
1296
- std::forward<Value_t>(bucket->value [slot]), hash, false );
1310
+ std::forward<Value_t>(bucket->value [slot]), hash, false ,
1311
+ /* not interested in these movements */ [](auto &&...) {});
1297
1312
(void )it;
1298
1313
assert (it.index != kNanBid );
1299
1314
update_version (*bucket, it.index );
1315
+ on_move_cb (segment_id_, i, dest_right->segment_id_ , it.index );
1300
1316
1301
1317
// Remove stash reference pointing to stash bucket i.
1302
1318
RemoveStashReference (i, hash);
@@ -1348,9 +1364,9 @@ bool Segment<Key, Value, Policy>::CheckIfMovesToOther(bool own_items, unsigned f
1348
1364
}
1349
1365
1350
1366
template <typename Key, typename Value, typename Policy>
1351
- template <typename U, typename V>
1352
- auto Segment<Key, Value, Policy>::InsertUniq(U&& key, V&& value, Hash_t key_hash, bool spread)
1353
- -> Iterator {
1367
+ template <typename U, typename V, typename OnMoveCb >
1368
+ auto Segment<Key, Value, Policy>::InsertUniq(U&& key, V&& value, Hash_t key_hash, bool spread,
1369
+ OnMoveCb&& on_move_cb) -> Iterator {
1354
1370
const uint8_t bid = HomeIndex (key_hash);
1355
1371
const uint8_t nid = NextBid (bid);
1356
1372
@@ -1385,13 +1401,15 @@ auto Segment<Key, Value, Policy>::InsertUniq(U&& key, V&& value, Hash_t key_hash
1385
1401
int displace_index = MoveToOther (true , nid, NextBid (nid));
1386
1402
if (displace_index >= 0 ) {
1387
1403
neighbor.Insert (displace_index, std::forward<U>(key), std::forward<V>(value), meta_hash, true );
1404
+ on_move_cb (segment_id_, nid, NextBid (nid));
1388
1405
return Iterator{nid, uint8_t (displace_index)};
1389
1406
}
1390
1407
1391
1408
unsigned prev_idx = PrevBid (bid);
1392
1409
displace_index = MoveToOther (false , bid, prev_idx);
1393
1410
if (displace_index >= 0 ) {
1394
1411
target.Insert (displace_index, std::forward<U>(key), std::forward<V>(value), meta_hash, false );
1412
+ on_move_cb (segment_id_, bid, prev_idx);
1395
1413
return Iterator{bid, uint8_t (displace_index)};
1396
1414
}
1397
1415
@@ -1597,9 +1615,9 @@ auto Segment<Key, Value, Policy>::FindValidStartingFrom(PhysicalBid bid, unsigne
1597
1615
}
1598
1616
1599
1617
template <typename Key, typename Value, typename Policy>
1600
- template <typename BumpPolicy>
1618
+ template <typename BumpPolicy, typename OnMoveCb >
1601
1619
auto Segment<Key, Value, Policy>::BumpUp(uint8_t bid, SlotId slot, Hash_t key_hash,
1602
- const BumpPolicy& bp) -> Iterator {
1620
+ const BumpPolicy& bp, OnMoveCb&& on_move_cb ) -> Iterator {
1603
1621
auto & from = GetBucket (bid);
1604
1622
1605
1623
if (!bp.CanBump (from.key [slot])) {
@@ -1624,6 +1642,7 @@ auto Segment<Key, Value, Policy>::BumpUp(uint8_t bid, SlotId slot, Hash_t key_ha
1624
1642
if (Iterator it = TryMoveFromStash (stash_pos, slot, key_hash); it.found ()) {
1625
1643
// TryMoveFromStash handles versions internally.
1626
1644
from.Delete (slot);
1645
+ on_move_cb (segment_id_, bid, it.index );
1627
1646
return it;
1628
1647
}
1629
1648
@@ -1690,12 +1709,14 @@ auto Segment<Key, Value, Policy>::BumpUp(uint8_t bid, SlotId slot, Hash_t key_ha
1690
1709
swapb.SetStashPtr (stash_pos, swap_fp, bucket_ + next_bid);
1691
1710
}
1692
1711
1712
+ on_move_cb (segment_id_, bid, swap_bid);
1713
+ on_move_cb (segment_id_, swap_bid, bid);
1693
1714
return Iterator{swap_bid, kLastSlot };
1694
1715
}
1695
1716
1696
1717
template <typename Key, typename Value, typename Policy>
1697
- template <typename HFunc>
1698
- unsigned Segment<Key, Value, Policy>::UnloadStash(HFunc&& hfunc) {
1718
+ template <typename HFunc, typename OnMoveCb >
1719
+ unsigned Segment<Key, Value, Policy>::UnloadStash(HFunc&& hfunc, OnMoveCb&& on_move_cb ) {
1699
1720
unsigned moved = 0 ;
1700
1721
1701
1722
for (unsigned i = 0 ; i < kStashBucketNum ; ++i) {
@@ -1710,6 +1731,7 @@ unsigned Segment<Key, Value, Policy>::UnloadStash(HFunc&& hfunc) {
1710
1731
if (res.found ()) {
1711
1732
++moved;
1712
1733
invalid_mask |= (1u << slot);
1734
+ on_move_cb (segment_id_, i, res.index );
1713
1735
}
1714
1736
};
1715
1737
0 commit comments