Skip to content

Commit 136fccb

Browse files
committed
Apply suggestions
1 parent 9bd3c02 commit 136fccb

File tree

1 file changed

+34
-80
lines changed

1 file changed

+34
-80
lines changed

src/raw/mod.rs

Lines changed: 34 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -299,12 +299,12 @@ impl<T> Clone for Bucket<T> {
299299
impl<T> Bucket<T> {
300300
const IS_ZERO_SIZED_TYPE: bool = mem::size_of::<T>() == 0;
301301

302-
/// Create [`Bucket`] that contain pointer to the data.
303-
/// The pointer calculation is performed by calculation the
302+
/// Creates a [`Bucket`] that contain pointer to the data.
303+
/// The pointer calculation is performed by calculating the
304304
/// offset from given `base` pointer (convenience for
305305
/// `base.as_ptr().sub(index)`).
306306
///
307-
/// `index` is in units of `T`; e.g., a `index` of 3 represents a pointer
307+
/// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer
308308
/// offset of `3 * size_of::<T>()` bytes.
309309
///
310310
/// If the `T` is a ZST, then we instead track the index of the element
@@ -313,16 +313,16 @@ impl<T> Bucket<T> {
313313
///
314314
/// # Safety
315315
///
316-
/// If `mem::size_of::<T>() != 0`, than the safety rules are directly derived
317-
/// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
316+
/// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
317+
/// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety
318318
/// rules of [`NonNull::new_unchecked`] function.
319319
///
320-
/// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method
320+
/// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method
321321
/// and [`NonNull::new_unchecked`] function, as well as for the correct
322322
/// logic of the work of this crate, the following rules are necessary and
323323
/// sufficient:
324324
///
325-
/// * `base` cantained pointer must not be `dangling` and must points to the
325+
/// * the `base` pointer must not be `dangling` and must points to the
326326
/// end of the first `value element` from the `data part` of the table, i.e.
327327
/// must be the pointer that returned by [`RawTable::data_end`] or by
328328
/// [`RawTableInner::data_end<T>`];
@@ -347,29 +347,29 @@ impl<T> Bucket<T> {
347347
/// [`RawTableInner::buckets`]: crate::raw::RawTableInner::buckets
348348
#[inline]
349349
unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
350-
// If mem::size_of::<T>() != 0 than return a pointer to a `element` in
350+
// If mem::size_of::<T>() != 0 then return a pointer to an `element` in
351351
// the data part of the table (we start counting from "0", so that
352352
// in the expression T[last], the "last" index actually one less than the
353353
// "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
354354
//
355-
// `from_base_index(base, 1).as_ptr()` returns pointer that
356-
// points here in tha data part of the table
355+
// `from_base_index(base, 1).as_ptr()` returns a pointer that
356+
// points here in the data part of the table
357357
// (to the start of T1)
358358
// |
359-
// | `base: NonNull<T>` must points here
359+
// | `base: NonNull<T>` must point here
360360
// | (to the end of T0 or to the start of C0)
361361
// v v
362362
// [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
363363
// ^
364-
// `from_base_index(base, 1)` returns pointer
365-
// that points here in tha data part of the table
364+
// `from_base_index(base, 1)` returns a pointer
365+
// that points here in the data part of the table
366366
// (to the end of T1)
367367
//
368368
// where: T0...Tlast - our stored data; C0...Clast - control bytes
369369
// or metadata for data.
370370
let ptr = if Self::IS_ZERO_SIZED_TYPE {
371371
// won't overflow because index must be less than length (bucket_mask)
372-
// and bucket_mask guaranteed less than `isize::MAX`
372+
// and bucket_mask is guaranteed to be less than `isize::MAX`
373373
// (see TableLayout::calculate_layout_for method)
374374
(index + 1) as *mut T
375375
} else {
@@ -380,31 +380,31 @@ impl<T> Bucket<T> {
380380
}
381381
}
382382

383-
/// Calculates the index of [`Bucket`] as distance between two pointers
383+
/// Calculates the index of a [`Bucket`] as distance between two pointers
384384
/// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`).
385385
/// The returned value is in units of T: the distance in bytes divided by
386386
/// [`core::mem::size_of::<T>()`].
387387
///
388-
/// If the `T` is a ZST, then we instead return the index of the element in
389-
/// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`)
388+
/// If the `T` is a ZST, then we return the index of the element in
389+
/// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`).
390390
///
391391
/// This function is the inverse of [`from_base_index`].
392392
///
393393
/// # Safety
394394
///
395-
/// If `mem::size_of::<T>() != 0`, than the safety rules are directly derived
395+
/// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
396396
/// from the safety rules for [`<*const T>::offset_from`] method of `*const T`.
397397
///
398398
/// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`]
399399
/// method, as well as for the correct logic of the work of this crate, the
400400
/// following rules are necessary and sufficient:
401401
///
402-
/// * `base` cantained pointer must not be `dangling` and must points to the
402+
/// * `base` contained pointer must not be `dangling` and must point to the
403403
/// end of the first `element` from the `data part` of the table, i.e.
404-
/// must be the pointer that returned by [`RawTable::data_end`] or by
404+
/// must be a pointer that returns by [`RawTable::data_end`] or by
405405
/// [`RawTableInner::data_end<T>`];
406406
///
407-
/// * `self` also must not contains dangling pointer;
407+
/// * `self` also must not contain dangling pointer;
408408
///
409409
/// * both `self` and `base` must be created from the same [`RawTable`]
410410
/// (or [`RawTableInner`]).
@@ -420,15 +420,15 @@ impl<T> Bucket<T> {
420420
/// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from
421421
#[inline]
422422
unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
423-
// If mem::size_of::<T>() != 0 than return a index under which we used to store the
423+
// If mem::size_of::<T>() != 0 then return an index under which we used to store the
424424
// `element` in the data part of the table (we start counting from "0", so
425-
// that in the expression T[last], the "last" index actually one less than the
425+
// that in the expression T[last], the "last" index actually is one less than the
426426
// "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
427-
// For example for 5th element in table calculation performed like this:
427+
// For example for 5th element in table calculation is performed like this:
428428
//
429429
// mem::size_of::<T>()
430430
// |
431-
// | `self = from_base_index(base, 5)` that return pointer
431+
// | `self = from_base_index(base, 5)` that returns pointer
432432
// | that points here in tha data part of the table
433433
// | (to the end of T5)
434434
// | | `base: NonNull<T>` must point here
@@ -453,13 +453,13 @@ impl<T> Bucket<T> {
453453
///
454454
/// # Note
455455
///
456-
/// If `T` is not [`Copy`], do not use `*mut T` methods that can case of calling the
456+
/// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the
457457
/// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because
458-
/// for properly dropping the data we need also to clear `data` control bytes. If we
459-
/// drop data, but do not clear `data control byte` it lead to double drop when
458+
/// for properly dropping the data we also need to clear `data` control bytes. If we
459+
/// drop data, but do not clear `data control byte` it leads to double drop when
460460
/// [`RawTable`] goes out of scope.
461461
///
462-
/// If you modified an already initialized `value`, so [`Hash`] and [`Eq`] on the new
462+
/// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new
463463
/// `T` value and its borrowed form *must* match those for the old `T` value, as the map
464464
/// will not re-evaluate where the new value should go, meaning the value may become
465465
/// "lost" if their location does not reflect their state.
@@ -514,7 +514,7 @@ impl<T> Bucket<T> {
514514
}
515515

516516
/// Create a new [`Bucket`] that is offset from the `self` by the given
517-
/// `offset`. The pointer calculation is performed by calculation the
517+
/// `offset`. The pointer calculation is performed by calculating the
518518
/// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`).
519519
/// This function is used for iterators.
520520
///
@@ -523,7 +523,7 @@ impl<T> Bucket<T> {
523523
///
524524
/// # Safety
525525
///
526-
/// If `mem::size_of::<T>() != 0`, than the safety rules are directly derived
526+
/// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
527527
/// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
528528
/// rules of [`NonNull::new_unchecked`] function.
529529
///
@@ -532,7 +532,7 @@ impl<T> Bucket<T> {
532532
/// logic of the work of this crate, the following rules are necessary and
533533
/// sufficient:
534534
///
535-
/// * `self` cantained pointer must not be `dangling`;
535+
/// * `self` contained pointer must not be `dangling`;
536536
///
537537
/// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
538538
/// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other
@@ -571,7 +571,7 @@ impl<T> Bucket<T> {
571571
/// You should use [`RawTable::erase`] instead of this function,
572572
/// or be careful with calling this function directly, because for
573573
/// properly dropping the data we need also clear `data` control bytes.
574-
/// If we drop data, but do not erase `data control byte` it lead to
574+
/// If we drop data, but do not erase `data control byte` it leads to
575575
/// double drop when [`RawTable`] goes out of scope.
576576
///
577577
/// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html
@@ -592,7 +592,7 @@ impl<T> Bucket<T> {
592592
/// You should use [`RawTable::remove`] instead of this function,
593593
/// or be careful with calling this function directly, because compiler
594594
/// calls its destructor when readed `value` goes out of scope. It
595-
/// can cause double dropping when [`RawTable`] also goes out of scope,
595+
/// can cause double dropping when [`RawTable`] goes out of scope,
596596
/// because of not erased `data control byte`.
597597
///
598598
/// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
@@ -620,52 +620,6 @@ impl<T> Bucket<T> {
620620
/// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html
621621
/// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
622622
/// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
623-
///
624-
/// # Examples
625-
///
626-
/// ```
627-
/// # #[cfg(feature = "raw")]
628-
/// # fn test() {
629-
/// use core::hash::{BuildHasher, Hash};
630-
/// use hashbrown::raw::{Bucket, RawTable};
631-
///
632-
/// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
633-
///
634-
/// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
635-
/// use core::hash::Hasher;
636-
/// let mut state = hash_builder.build_hasher();
637-
/// key.hash(&mut state);
638-
/// state.finish()
639-
/// }
640-
///
641-
/// let hash_builder = NewHashBuilder::default();
642-
/// let mut table = RawTable::new();
643-
///
644-
/// let value: (String, String) = ("One".to_owned(), "First".to_owned());
645-
/// let hash = make_hash(&hash_builder, &value.0);
646-
///
647-
/// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
648-
///
649-
/// let bucket: Bucket<(String, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap();
650-
///
651-
/// // First properly drop old value
652-
/// unsafe { bucket.as_ptr().drop_in_place() };
653-
///
654-
/// // Than replase with new one with the "same hash and equality function"
655-
/// unsafe { bucket.write(("One".to_owned(), "is a number".to_owned())) };
656-
///
657-
/// let bucket: Bucket<(String, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap();
658-
///
659-
/// assert_eq!(
660-
/// unsafe { &*bucket.as_ptr() },
661-
/// &("One".to_owned(), "is a number".to_owned())
662-
/// );
663-
/// # }
664-
/// # fn main() {
665-
/// # #[cfg(feature = "raw")]
666-
/// # test()
667-
/// # }
668-
/// ```
669623
#[inline]
670624
pub(crate) unsafe fn write(&self, val: T) {
671625
self.as_ptr().write(val);

0 commit comments

Comments
 (0)