@@ -282,6 +282,11 @@ impl TableLayout {
282
282
}
283
283
}
284
284
285
+ /// A reference to an empty bucket into which an can be inserted.
286
+ pub struct InsertSlot {
287
+ index : usize ,
288
+ }
289
+
285
290
/// A reference to a hash table bucket containing a `T`.
286
291
///
287
292
/// This is usually just a pointer to the element itself. However if the element
@@ -1001,19 +1006,26 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
1001
1006
}
1002
1007
1003
1008
/// Removes an element from the table, returning it.
1009
+ ///
1010
+ /// This also returns an `InsertSlot` pointing to the newly free bucket.
1004
1011
#[ cfg_attr( feature = "inline-more" , inline) ]
1005
1012
#[ allow( clippy:: needless_pass_by_value) ]
1006
- pub unsafe fn remove ( & mut self , item : Bucket < T > ) -> T {
1013
+ pub unsafe fn remove ( & mut self , item : Bucket < T > ) -> ( T , InsertSlot ) {
1007
1014
self . erase_no_drop ( & item) ;
1008
- item. read ( )
1015
+ (
1016
+ item. read ( ) ,
1017
+ InsertSlot {
1018
+ index : self . bucket_index ( & item) ,
1019
+ } ,
1020
+ )
1009
1021
}
1010
1022
1011
1023
/// Finds and removes an element from the table, returning it.
1012
1024
#[ cfg_attr( feature = "inline-more" , inline) ]
1013
1025
pub fn remove_entry ( & mut self , hash : u64 , eq : impl FnMut ( & T ) -> bool ) -> Option < T > {
1014
1026
// Avoid `Option::map` because it bloats LLVM IR.
1015
1027
match self . find ( hash, eq) {
1016
- Some ( bucket) => Some ( unsafe { self . remove ( bucket) } ) ,
1028
+ Some ( bucket) => Some ( unsafe { self . remove ( bucket) . 0 } ) ,
1017
1029
None => None ,
1018
1030
}
1019
1031
}
@@ -1161,22 +1173,18 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
1161
1173
#[ cfg_attr( feature = "inline-more" , inline) ]
1162
1174
pub fn insert ( & mut self , hash : u64 , value : T , hasher : impl Fn ( & T ) -> u64 ) -> Bucket < T > {
1163
1175
unsafe {
1164
- let mut index = self . table . find_insert_slot ( hash) ;
1176
+ let mut slot = self . table . find_insert_slot ( hash) ;
1165
1177
1166
1178
// We can avoid growing the table once we have reached our load
1167
1179
// factor if we are replacing a tombstone. This works since the
1168
1180
// number of EMPTY slots does not change in this case.
1169
- let old_ctrl = * self . table . ctrl ( index) ;
1181
+ let old_ctrl = * self . table . ctrl ( slot . index ) ;
1170
1182
if unlikely ( self . table . growth_left == 0 && special_is_empty ( old_ctrl) ) {
1171
1183
self . reserve ( 1 , hasher) ;
1172
- index = self . table . find_insert_slot ( hash) ;
1184
+ slot = self . table . find_insert_slot ( hash) ;
1173
1185
}
1174
1186
1175
- self . table . record_item_insert_at ( index, old_ctrl, hash) ;
1176
-
1177
- let bucket = self . bucket ( index) ;
1178
- bucket. write ( value) ;
1179
- bucket
1187
+ self . insert_in_slot ( hash, slot, value)
1180
1188
}
1181
1189
}
1182
1190
@@ -1244,7 +1252,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
1244
1252
let old_ctrl = * self . table . ctrl ( index) ;
1245
1253
debug_assert ! ( self . is_bucket_full( index) ) ;
1246
1254
let old_growth_left = self . table . growth_left ;
1247
- let item = self . remove ( bucket) ;
1255
+ let item = self . remove ( bucket) . 0 ;
1248
1256
if let Some ( new_item) = f ( item) {
1249
1257
self . table . growth_left = old_growth_left;
1250
1258
self . table . set_ctrl ( index, old_ctrl) ;
@@ -1256,20 +1264,47 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
1256
1264
}
1257
1265
}
1258
1266
1259
- /// Searches for an element in the table,
1260
- /// or a potential slot where that element could be inserted.
1267
+ /// Searches for an element in the table. If the element is not found,
1268
+ /// returns `Err` with the position of a slot where an element with the
1269
+ /// same hash could be inserted.
1270
+ ///
1271
+ /// This function may resize the table if additional space is required for
1272
+ /// inserting an element.
1261
1273
#[ inline]
1262
- pub fn find_potential ( & self , hash : u64 , mut eq : impl FnMut ( & T ) -> bool ) -> ( usize , bool ) {
1263
- self . table . find_potential_inner ( hash, & mut |index| unsafe {
1264
- eq ( self . bucket ( index) . as_ref ( ) )
1265
- } )
1274
+ pub fn find_or_find_insert_slot (
1275
+ & mut self ,
1276
+ hash : u64 ,
1277
+ mut eq : impl FnMut ( & T ) -> bool ,
1278
+ hasher : impl Fn ( & T ) -> u64 ,
1279
+ ) -> Result < Bucket < T > , InsertSlot > {
1280
+ self . reserve ( 1 , hasher) ;
1281
+
1282
+ match self
1283
+ . table
1284
+ . find_or_find_insert_slot_inner ( hash, & mut |index| unsafe {
1285
+ eq ( self . bucket ( index) . as_ref ( ) )
1286
+ } ) {
1287
+ Ok ( index) => Ok ( unsafe { self . bucket ( index) } ) ,
1288
+ Err ( slot) => Err ( slot) ,
1289
+ }
1266
1290
}
1267
1291
1268
- /// Marks an element in the table as inserted.
1292
+ /// Inserts a new element into the table in the given slot, and returns its
1293
+ /// raw bucket.
1294
+ ///
1295
+ /// # Safety
1296
+ ///
1297
+ /// `slot` must point to a slot previously returned by
1298
+ /// `find_or_find_insert_slot`, and no mutation of the table must have
1299
+ /// occurred since that call.
1269
1300
#[ inline]
1270
- pub unsafe fn mark_inserted ( & mut self , index : usize , hash : u64 ) {
1271
- let old_ctrl = * self . table . ctrl ( index) ;
1272
- self . table . record_item_insert_at ( index, old_ctrl, hash) ;
1301
+ pub unsafe fn insert_in_slot ( & mut self , hash : u64 , slot : InsertSlot , value : T ) -> Bucket < T > {
1302
+ let old_ctrl = * self . table . ctrl ( slot. index ) ;
1303
+ self . table . record_item_insert_at ( slot. index , old_ctrl, hash) ;
1304
+
1305
+ let bucket = self . bucket ( slot. index ) ;
1306
+ bucket. write ( value) ;
1307
+ bucket
1273
1308
}
1274
1309
1275
1310
/// Searches for an element in the table.
@@ -1608,7 +1643,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1608
1643
/// Fixes up an insertion slot due to false positives for groups smaller than the group width.
1609
1644
/// This must only be used on insertion slots found by `find_insert_slot_in_group`.
1610
1645
#[ inline]
1611
- unsafe fn fix_insert_slot ( & self , index : usize ) -> usize {
1646
+ unsafe fn fix_insert_slot ( & self , mut index : usize ) -> InsertSlot {
1612
1647
// In tables smaller than the group width
1613
1648
// (self.buckets() < Group::WIDTH), trailing control
1614
1649
// bytes outside the range of the table are filled with
@@ -1636,12 +1671,11 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1636
1671
// with EMPTY bytes, so this second scan either finds an empty slot (due to the
1637
1672
// load factor) or hits the trailing control bytes (containing EMPTY). See
1638
1673
// `intrinsics::cttz_nonzero` for more information.
1639
- Group :: load_aligned ( self . ctrl ( 0 ) )
1674
+ index = Group :: load_aligned ( self . ctrl ( 0 ) )
1640
1675
. match_empty_or_deleted ( )
1641
- . lowest_set_bit_nonzero ( )
1642
- } else {
1643
- index
1676
+ . lowest_set_bit_nonzero ( ) ;
1644
1677
}
1678
+ InsertSlot { index }
1645
1679
}
1646
1680
1647
1681
/// Finds the position to insert something in a group.
@@ -1663,11 +1697,11 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1663
1697
/// This uses dynamic dispatch to reduce the amount of code generated, but that is
1664
1698
/// eliminated by LLVM optimizations.
1665
1699
#[ inline]
1666
- pub fn find_potential_inner (
1700
+ fn find_or_find_insert_slot_inner (
1667
1701
& self ,
1668
1702
hash : u64 ,
1669
1703
eq : & mut dyn FnMut ( usize ) -> bool ,
1670
- ) -> ( usize , bool ) {
1704
+ ) -> Result < usize , InsertSlot > {
1671
1705
let mut insert_slot = None ;
1672
1706
1673
1707
let h2_hash = h2 ( hash) ;
@@ -1680,7 +1714,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1680
1714
let index = ( probe_seq. pos + bit) & self . bucket_mask ;
1681
1715
1682
1716
if likely ( eq ( index) ) {
1683
- return ( index, true ) ;
1717
+ return Ok ( index) ;
1684
1718
}
1685
1719
}
1686
1720
@@ -1697,7 +1731,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1697
1731
// least one. For tables smaller than the group width, there will still be an
1698
1732
// empty element in the current (and only) group due to the load factor.
1699
1733
unsafe {
1700
- return ( self . fix_insert_slot ( insert_slot. unwrap_unchecked ( ) ) , false ) ;
1734
+ return Err ( self . fix_insert_slot ( insert_slot. unwrap_unchecked ( ) ) ) ;
1701
1735
}
1702
1736
}
1703
1737
@@ -1711,7 +1745,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1711
1745
/// There must be at least 1 empty bucket in the table.
1712
1746
#[ inline]
1713
1747
unsafe fn prepare_insert_slot ( & self , hash : u64 ) -> ( usize , u8 ) {
1714
- let index = self . find_insert_slot ( hash) ;
1748
+ let index = self . find_insert_slot ( hash) . index ;
1715
1749
let old_ctrl = * self . ctrl ( index) ;
1716
1750
self . set_ctrl_h2 ( index, hash) ;
1717
1751
( index, old_ctrl)
@@ -1739,7 +1773,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1739
1773
///
1740
1774
/// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1741
1775
#[ inline]
1742
- fn find_insert_slot ( & self , hash : u64 ) -> usize {
1776
+ fn find_insert_slot ( & self , hash : u64 ) -> InsertSlot {
1743
1777
let mut probe_seq = self . probe_seq ( hash) ;
1744
1778
loop {
1745
1779
// SAFETY:
@@ -1922,7 +1956,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1922
1956
#[ cfg( feature = "raw" ) ]
1923
1957
#[ inline]
1924
1958
unsafe fn prepare_insert_no_grow ( & mut self , hash : u64 ) -> Result < usize , ( ) > {
1925
- let index = self . find_insert_slot ( hash) ;
1959
+ let index = self . find_insert_slot ( hash) . index ;
1926
1960
let old_ctrl = * self . ctrl ( index) ;
1927
1961
if unlikely ( self . growth_left == 0 && special_is_empty ( old_ctrl) ) {
1928
1962
Err ( ( ) )
@@ -2293,7 +2327,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
2293
2327
let hash = hasher ( * guard, i) ;
2294
2328
2295
2329
// Search for a suitable place to put it
2296
- let new_i = guard. find_insert_slot ( hash) ;
2330
+ let new_i = guard. find_insert_slot ( hash) . index ;
2297
2331
2298
2332
// Probing works by scanning through all of the control
2299
2333
// bytes in groups, which may not be aligned to the group
0 commit comments