Skip to content

Commit 91390f3

Browse files
committed
Simplify Clone
1 parent b2190e2 commit 91390f3

File tree

2 files changed

+189
-24
lines changed

2 files changed

+189
-24
lines changed

src/raw/mod.rs

Lines changed: 188 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
use crate::alloc::alloc::{handle_alloc_error, Layout};
2-
use crate::scopeguard::{guard, ScopeGuard};
2+
use crate::scopeguard::guard;
33
use crate::TryReserveError;
44
use core::iter::FusedIterator;
55
use core::marker::PhantomData;
@@ -2494,7 +2494,11 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
24942494
} else {
24952495
unsafe {
24962496
// Avoid `Result::ok_or_else` because it bloats LLVM IR.
2497-
let new_table = match Self::new_uninitialized(
2497+
//
2498+
// SAFETY: This is safe as we are taking the size of an already allocated table
2499+
// and therefore сapacity overflow cannot occur, `self.table.buckets()` is power
2500+
// of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
2501+
let mut new_table = match Self::new_uninitialized(
24982502
self.table.alloc.clone(),
24992503
self.table.buckets(),
25002504
Fallibility::Infallible,
@@ -2503,29 +2507,29 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
25032507
Err(_) => hint::unreachable_unchecked(),
25042508
};
25052509

2506-
// If cloning fails then we need to free the allocation for the
2507-
// new table. However we don't run its drop since its control
2508-
// bytes are not initialized yet.
2509-
let mut guard = guard(ManuallyDrop::new(new_table), |new_table| {
2510-
new_table.free_buckets();
2511-
});
2512-
2513-
guard.clone_from_spec(self);
2514-
2515-
// Disarm the scope guard and return the newly created table.
2516-
ManuallyDrop::into_inner(ScopeGuard::into_inner(guard))
2510+
// Cloning elements may fail (the clone function may panic). But we don't
2511+
// need to worry about uninitialized control bits, since:
2512+
// 1. The number of items (elements) in the table is zero, which means that
2513+
// the control bits will not be readed by Drop function.
2514+
// 2. The `clone_from_spec` method will first copy all control bits from
2515+
// `self` (thus initializing them). But this will not affect the `Drop`
2516+
// function, since the `clone_from_spec` function sets `items` only after
2517+
// successfully clonning all elements.
2518+
new_table.clone_from_spec(self);
2519+
new_table
25172520
}
25182521
}
25192522
}
25202523

25212524
fn clone_from(&mut self, source: &Self) {
25222525
if source.table.is_empty_singleton() {
2526+
// Dereference drops old `self` table
25232527
*self = Self::new_in(self.table.alloc.clone());
25242528
} else {
25252529
unsafe {
25262530
// Make sure that if any panics occurs, we clear the table and
25272531
// leave it in an empty state.
2528-
let mut self_ = guard(self, |self_| {
2532+
let mut guard = guard(&mut *self, |self_| {
25292533
self_.clear_no_drop();
25302534
});
25312535

@@ -2535,18 +2539,32 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
25352539
//
25362540
// This leak is unavoidable: we can't try dropping more elements
25372541
// since this could lead to another panic and abort the process.
2538-
self_.drop_elements();
2542+
//
2543+
// SAFETY: We clear our table right after dropping the elements,
2544+
// so there is no double drop, since `items` will be equal to zero.
2545+
guard.drop_elements();
2546+
2547+
// Okay, we've successfully dropped all elements, so we'll just set
2548+
// `items` to zero (so that the `Drop` of `RawTable` doesn't try to
2549+
// drop all elements twice) and just forget about the guard.
2550+
guard.table.items = 0;
2551+
mem::forget(guard);
25392552

25402553
// If necessary, resize our table to match the source.
2541-
if self_.buckets() != source.buckets() {
2554+
if self.buckets() != source.buckets() {
25422555
// Skip our drop by using ptr::write.
2543-
if !self_.table.is_empty_singleton() {
2544-
self_.free_buckets();
2556+
if !self.table.is_empty_singleton() {
2557+
// SAFETY: We have verified that the table is allocated.
2558+
self.free_buckets();
25452559
}
2546-
(&mut **self_ as *mut Self).write(
2560+
(self as *mut Self).write(
25472561
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
2562+
//
2563+
// SAFETY: This is safe as we are taking the size of an already allocated table
2564+
// and therefore сapacity overflow cannot occur, `self.table.buckets()` is power
2565+
// of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
25482566
match Self::new_uninitialized(
2549-
self_.table.alloc.clone(),
2567+
self.table.alloc.clone(),
25502568
source.buckets(),
25512569
Fallibility::Infallible,
25522570
) {
@@ -2556,10 +2574,11 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
25562574
);
25572575
}
25582576

2559-
self_.clone_from_spec(source);
2560-
2561-
// Disarm the scope guard if cloning was successful.
2562-
ScopeGuard::into_inner(self_);
2577+
// Cloning elements may fail (the clone function may panic), but the `ScopeGuard`
2578+
// inside the `clone_from_impl` function will take care of that, dropping all
2579+
// cloned elements if necessary. The `Drop` of `RawTable` takes care of the rest
2580+
// by freeing up the allocated memory.
2581+
self.clone_from_spec(source);
25632582
}
25642583
}
25652584
}
@@ -3373,4 +3392,149 @@ mod test_map {
33733392
assert!(table.find(i + 100, |x| *x == i + 100).is_none());
33743393
}
33753394
}
3395+
3396+
#[test]
3397+
#[should_panic = "panic in clone"]
3398+
fn test_clone_memory_leaks_and_double_drop() {
3399+
use ::alloc::vec::Vec;
3400+
3401+
const DISARMED: bool = false;
3402+
const ARMED: bool = true;
3403+
3404+
struct CheckedCloneDrop {
3405+
panic_in_clone: bool,
3406+
dropped: bool,
3407+
need_drop: Vec<i32>,
3408+
}
3409+
3410+
impl Clone for CheckedCloneDrop {
3411+
fn clone(&self) -> Self {
3412+
if self.panic_in_clone {
3413+
panic!("panic in clone")
3414+
}
3415+
Self {
3416+
panic_in_clone: self.panic_in_clone,
3417+
dropped: self.dropped,
3418+
need_drop: self.need_drop.clone(),
3419+
}
3420+
}
3421+
}
3422+
3423+
impl Drop for CheckedCloneDrop {
3424+
fn drop(&mut self) {
3425+
if self.dropped {
3426+
panic!("double drop");
3427+
}
3428+
self.dropped = true;
3429+
}
3430+
}
3431+
3432+
let mut table: RawTable<(u64, CheckedCloneDrop)> = RawTable::with_capacity(7);
3433+
3434+
let armed_flags = [
3435+
DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
3436+
];
3437+
3438+
// Hash and Key must be equal to each other for controlling the elements placement
3439+
// so that we can be sure that we first clone elements that don't panic during cloning.
3440+
for (idx, &panic_in_clone) in armed_flags.iter().enumerate() {
3441+
let idx = idx as u64;
3442+
table.insert(
3443+
idx,
3444+
(
3445+
idx,
3446+
CheckedCloneDrop {
3447+
panic_in_clone,
3448+
dropped: false,
3449+
need_drop: vec![0, 1, 2, 3],
3450+
},
3451+
),
3452+
|(k, _)| *k,
3453+
);
3454+
}
3455+
let mut count = 0;
3456+
unsafe {
3457+
// Let's check that all elements are located as we wanted
3458+
//
3459+
// SAFETY: We know for sure that `RawTable` will outlive
3460+
// the returned `RawIter` iterator.
3461+
for (bucket, panic_in_clone) in table.iter().zip(armed_flags) {
3462+
// SAFETY: It's OK, we get bucket from RawIter, so it is safe to read
3463+
let (key, value) = bucket.as_ref();
3464+
assert_eq!(*key, count);
3465+
assert_eq!(value.panic_in_clone, panic_in_clone);
3466+
count += 1;
3467+
}
3468+
}
3469+
assert_eq!(table.len(), 7);
3470+
assert_eq!(count, 7);
3471+
3472+
// Clone should normally clone a few elements, and then (when the clone
3473+
// function panics), deallocate both its own memory and the memory of
3474+
// already cloned elements (Vec<i32> memory inside CheckedCloneDrop).
3475+
let _table2 = table.clone();
3476+
}
3477+
3478+
/// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF
3479+
/// AN UNINITIALIZED TABLE DURING THE DROP
3480+
#[test]
3481+
fn test_drop_uninitialized() {
3482+
use ::alloc::vec::Vec;
3483+
3484+
let table = unsafe {
3485+
// SAFETY: The `buckets` is power of two and we're not
3486+
// trying to actually use the returned RawTable.
3487+
RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
3488+
.unwrap()
3489+
};
3490+
drop(table);
3491+
}
3492+
3493+
/// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
3494+
/// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
3495+
#[test]
3496+
fn test_drop_zero_items() {
3497+
use ::alloc::vec::Vec;
3498+
unsafe {
3499+
// SAFETY: The `buckets` is power of two and we're not
3500+
// trying to actually use the returned RawTable.
3501+
let table =
3502+
RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
3503+
.unwrap();
3504+
3505+
// WE SIMULATE, AS IT WERE, A FULL TABLE.
3506+
3507+
// SAFETY: We checked that the table is allocated and therefore the table already has
3508+
// `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
3509+
// so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
3510+
table
3511+
.table
3512+
.ctrl(0)
3513+
.write_bytes(EMPTY, table.table.num_ctrl_bytes());
3514+
3515+
// SAFETY: table.capacity() is guaranteed to be smaller than table.buckets()
3516+
table.table.ctrl(0).write_bytes(0, table.capacity());
3517+
3518+
// Fix up the trailing control bytes. See the comments in set_ctrl
3519+
// for the handling of tables smaller than the group width.
3520+
if table.buckets() < Group::WIDTH {
3521+
// SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
3522+
// so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
3523+
// `Group::WIDTH` is safe
3524+
table
3525+
.table
3526+
.ctrl(0)
3527+
.copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets());
3528+
} else {
3529+
// SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
3530+
// control bytes,so copying `Group::WIDTH` bytes with offset equal
3531+
// to `self.buckets() == self.bucket_mask + 1` is safe
3532+
table
3533+
.table
3534+
.ctrl(0)
3535+
.copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH);
3536+
}
3537+
drop(table);
3538+
}
3539+
}
33763540
}

src/scopeguard.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ impl<T, F> ScopeGuard<T, F>
2525
where
2626
F: FnMut(&mut T),
2727
{
28+
#[allow(dead_code)]
2829
#[inline]
2930
pub fn into_inner(guard: Self) -> T {
3031
// Cannot move out of Drop-implementing types, so

0 commit comments

Comments
 (0)