@@ -299,12 +299,12 @@ impl<T> Clone for Bucket<T> {
299
299
impl < T > Bucket < T > {
300
300
const IS_ZERO_SIZED_TYPE : bool = mem:: size_of :: < T > ( ) == 0 ;
301
301
302
- /// Create [`Bucket`] that contain pointer to the data.
303
- /// The pointer calculation is performed by calculation the
302
+ /// Creates a [`Bucket`] that contain pointer to the data.
303
+ /// The pointer calculation is performed by calculating the
304
304
/// offset from given `base` pointer (convenience for
305
305
/// `base.as_ptr().sub(index)`).
306
306
///
307
- /// `index` is in units of `T`; e.g., a `index` of 3 represents a pointer
307
+ /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer
308
308
/// offset of `3 * size_of::<T>()` bytes.
309
309
///
310
310
/// If the `T` is a ZST, then we instead track the index of the element
@@ -313,16 +313,16 @@ impl<T> Bucket<T> {
313
313
///
314
314
/// # Safety
315
315
///
316
- /// If `mem::size_of::<T>() != 0`, than the safety rules are directly derived
317
- /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
316
+ /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
317
+ /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety
318
318
/// rules of [`NonNull::new_unchecked`] function.
319
319
///
320
- /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method
320
+ /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method
321
321
/// and [`NonNull::new_unchecked`] function, as well as for the correct
322
322
/// logic of the work of this crate, the following rules are necessary and
323
323
/// sufficient:
324
324
///
325
- /// * `base` cantained pointer must not be `dangling` and must points to the
325
+ /// * the `base` pointer must not be `dangling` and must points to the
326
326
/// end of the first `value element` from the `data part` of the table, i.e.
327
327
/// must be the pointer that returned by [`RawTable::data_end`] or by
328
328
/// [`RawTableInner::data_end<T>`];
@@ -347,29 +347,29 @@ impl<T> Bucket<T> {
347
347
/// [`RawTableInner::buckets`]: crate::raw::RawTableInner::buckets
348
348
#[ inline]
349
349
unsafe fn from_base_index ( base : NonNull < T > , index : usize ) -> Self {
350
- // If mem::size_of::<T>() != 0 than return a pointer to a `element` in
350
+ // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
351
351
// the data part of the table (we start counting from "0", so that
352
352
// in the expression T[last], the "last" index actually one less than the
353
353
// "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
354
354
//
355
- // `from_base_index(base, 1).as_ptr()` returns pointer that
356
- // points here in tha data part of the table
355
+ // `from_base_index(base, 1).as_ptr()` returns a pointer that
356
+ // points here in the data part of the table
357
357
// (to the start of T1)
358
358
// |
359
- // | `base: NonNull<T>` must points here
359
+ // | `base: NonNull<T>` must point here
360
360
// | (to the end of T0 or to the start of C0)
361
361
// v v
362
362
// [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
363
363
// ^
364
- // `from_base_index(base, 1)` returns pointer
365
- // that points here in tha data part of the table
364
+ // `from_base_index(base, 1)` returns a pointer
365
+ // that points here in the data part of the table
366
366
// (to the end of T1)
367
367
//
368
368
// where: T0...Tlast - our stored data; C0...Clast - control bytes
369
369
// or metadata for data.
370
370
let ptr = if Self :: IS_ZERO_SIZED_TYPE {
371
371
// won't overflow because index must be less than length (bucket_mask)
372
- // and bucket_mask guaranteed less than `isize::MAX`
372
+ // and bucket_mask is guaranteed to be less than `isize::MAX`
373
373
// (see TableLayout::calculate_layout_for method)
374
374
( index + 1 ) as * mut T
375
375
} else {
@@ -380,31 +380,31 @@ impl<T> Bucket<T> {
380
380
}
381
381
}
382
382
383
- /// Calculates the index of [`Bucket`] as distance between two pointers
383
+ /// Calculates the index of a [`Bucket`] as distance between two pointers
384
384
/// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`).
385
385
/// The returned value is in units of T: the distance in bytes divided by
386
386
/// [`core::mem::size_of::<T>()`].
387
387
///
388
- /// If the `T` is a ZST, then we instead return the index of the element in
389
- /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`)
388
+ /// If the `T` is a ZST, then we return the index of the element in
389
+ /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`).
390
390
///
391
391
/// This function is the inverse of [`from_base_index`].
392
392
///
393
393
/// # Safety
394
394
///
395
- /// If `mem::size_of::<T>() != 0`, than the safety rules are directly derived
395
+ /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
396
396
/// from the safety rules for [`<*const T>::offset_from`] method of `*const T`.
397
397
///
398
398
/// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`]
399
399
/// method, as well as for the correct logic of the work of this crate, the
400
400
/// following rules are necessary and sufficient:
401
401
///
402
- /// * `base` cantained pointer must not be `dangling` and must points to the
402
+ /// * `base` contained pointer must not be `dangling` and must point to the
403
403
/// end of the first `element` from the `data part` of the table, i.e.
404
- /// must be the pointer that returned by [`RawTable::data_end`] or by
404
+ /// must be a pointer that returns by [`RawTable::data_end`] or by
405
405
/// [`RawTableInner::data_end<T>`];
406
406
///
407
- /// * `self` also must not contains dangling pointer;
407
+ /// * `self` also must not contain dangling pointer;
408
408
///
409
409
/// * both `self` and `base` must be created from the same [`RawTable`]
410
410
/// (or [`RawTableInner`]).
@@ -420,15 +420,15 @@ impl<T> Bucket<T> {
420
420
/// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from
421
421
#[ inline]
422
422
unsafe fn to_base_index ( & self , base : NonNull < T > ) -> usize {
423
- // If mem::size_of::<T>() != 0 than return a index under which we used to store the
423
+ // If mem::size_of::<T>() != 0 then return an index under which we used to store the
424
424
// `element` in the data part of the table (we start counting from "0", so
425
- // that in the expression T[last], the "last" index actually one less than the
425
+ // that in the expression T[last], the "last" index actually is one less than the
426
426
// "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
427
- // For example for 5th element in table calculation performed like this:
427
+ // For example for 5th element in table calculation is performed like this:
428
428
//
429
429
// mem::size_of::<T>()
430
430
// |
431
- // | `self = from_base_index(base, 5)` that return pointer
431
+ // | `self = from_base_index(base, 5)` that returns pointer
432
432
// | that points here in tha data part of the table
433
433
// | (to the end of T5)
434
434
// | | `base: NonNull<T>` must point here
@@ -453,13 +453,13 @@ impl<T> Bucket<T> {
453
453
///
454
454
/// # Note
455
455
///
456
- /// If `T` is not [`Copy`], do not use `*mut T` methods that can case of calling the
456
+ /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the
457
457
/// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because
458
- /// for properly dropping the data we need also to clear `data` control bytes. If we
459
- /// drop data, but do not clear `data control byte` it lead to double drop when
458
+ /// for properly dropping the data we also need to clear `data` control bytes. If we
459
+ /// drop data, but do not clear `data control byte` it leads to double drop when
460
460
/// [`RawTable`] goes out of scope.
461
461
///
462
- /// If you modified an already initialized `value`, so [`Hash`] and [`Eq`] on the new
462
+ /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new
463
463
/// `T` value and its borrowed form *must* match those for the old `T` value, as the map
464
464
/// will not re-evaluate where the new value should go, meaning the value may become
465
465
/// "lost" if their location does not reflect their state.
@@ -514,7 +514,7 @@ impl<T> Bucket<T> {
514
514
}
515
515
516
516
/// Create a new [`Bucket`] that is offset from the `self` by the given
517
- /// `offset`. The pointer calculation is performed by calculation the
517
+ /// `offset`. The pointer calculation is performed by calculating the
518
518
/// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`).
519
519
/// This function is used for iterators.
520
520
///
@@ -523,7 +523,7 @@ impl<T> Bucket<T> {
523
523
///
524
524
/// # Safety
525
525
///
526
- /// If `mem::size_of::<T>() != 0`, than the safety rules are directly derived
526
+ /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
527
527
/// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
528
528
/// rules of [`NonNull::new_unchecked`] function.
529
529
///
@@ -532,7 +532,7 @@ impl<T> Bucket<T> {
532
532
/// logic of the work of this crate, the following rules are necessary and
533
533
/// sufficient:
534
534
///
535
- /// * `self` cantained pointer must not be `dangling`;
535
+ /// * `self` contained pointer must not be `dangling`;
536
536
///
537
537
/// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
538
538
/// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other
@@ -571,7 +571,7 @@ impl<T> Bucket<T> {
571
571
/// You should use [`RawTable::erase`] instead of this function,
572
572
/// or be careful with calling this function directly, because for
573
573
/// properly dropping the data we need also clear `data` control bytes.
574
- /// If we drop data, but do not erase `data control byte` it lead to
574
+ /// If we drop data, but do not erase `data control byte` it leads to
575
575
/// double drop when [`RawTable`] goes out of scope.
576
576
///
577
577
/// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html
@@ -592,7 +592,7 @@ impl<T> Bucket<T> {
592
592
/// You should use [`RawTable::remove`] instead of this function,
593
593
/// or be careful with calling this function directly, because compiler
594
594
/// calls its destructor when readed `value` goes out of scope. It
595
- /// can cause double dropping when [`RawTable`] also goes out of scope,
595
+ /// can cause double dropping when [`RawTable`] goes out of scope,
596
596
/// because of not erased `data control byte`.
597
597
///
598
598
/// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
@@ -620,52 +620,6 @@ impl<T> Bucket<T> {
620
620
/// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html
621
621
/// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
622
622
/// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
623
- ///
624
- /// # Examples
625
- ///
626
- /// ```
627
- /// # #[cfg(feature = "raw")]
628
- /// # fn test() {
629
- /// use core::hash::{BuildHasher, Hash};
630
- /// use hashbrown::raw::{Bucket, RawTable};
631
- ///
632
- /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
633
- ///
634
- /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
635
- /// use core::hash::Hasher;
636
- /// let mut state = hash_builder.build_hasher();
637
- /// key.hash(&mut state);
638
- /// state.finish()
639
- /// }
640
- ///
641
- /// let hash_builder = NewHashBuilder::default();
642
- /// let mut table = RawTable::new();
643
- ///
644
- /// let value: (String, String) = ("One".to_owned(), "First".to_owned());
645
- /// let hash = make_hash(&hash_builder, &value.0);
646
- ///
647
- /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
648
- ///
649
- /// let bucket: Bucket<(String, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap();
650
- ///
651
- /// // First properly drop old value
652
- /// unsafe { bucket.as_ptr().drop_in_place() };
653
- ///
654
- /// // Than replase with new one with the "same hash and equality function"
655
- /// unsafe { bucket.write(("One".to_owned(), "is a number".to_owned())) };
656
- ///
657
- /// let bucket: Bucket<(String, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap();
658
- ///
659
- /// assert_eq!(
660
- /// unsafe { &*bucket.as_ptr() },
661
- /// &("One".to_owned(), "is a number".to_owned())
662
- /// );
663
- /// # }
664
- /// # fn main() {
665
- /// # #[cfg(feature = "raw")]
666
- /// # test()
667
- /// # }
668
- /// ```
669
623
#[ inline]
670
624
pub ( crate ) unsafe fn write ( & self , val : T ) {
671
625
self . as_ptr ( ) . write ( val) ;
0 commit comments