Skip to content

Commit 50a9e7b

Browse files
committed
Add test for leaking allocator during clonning
1 parent 91390f3 commit 50a9e7b

File tree

2 files changed

+120
-115
lines changed

2 files changed

+120
-115
lines changed

src/map.rs

Lines changed: 120 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -6658,7 +6658,7 @@ mod test_map {
66586658
use allocator_api2::alloc::{AllocError, Allocator, Global};
66596659
use core::alloc::Layout;
66606660
use core::ptr::NonNull;
6661-
use core::sync::atomic::{AtomicBool, Ordering};
6661+
use core::sync::atomic::{AtomicI8, Ordering};
66626662
use rand::{rngs::SmallRng, Rng, SeedableRng};
66636663
use std::borrow::ToOwned;
66646664
use std::cell::RefCell;
@@ -8510,47 +8510,44 @@ mod test_map {
85108510
let _map2 = map1.clone();
85118511
}
85128512

8513-
#[test]
8514-
fn test_hashmap_into_iter_bug() {
8515-
let dropped: Arc<AtomicBool> = Arc::new(AtomicBool::new(false));
8513+
struct MyAllocInner {
8514+
drop_count: Arc<AtomicI8>,
8515+
}
85168516

8517-
{
8518-
struct MyAllocInner {
8519-
drop_flag: Arc<AtomicBool>,
8520-
}
8517+
#[derive(Clone)]
8518+
struct MyAlloc {
8519+
_inner: Arc<MyAllocInner>,
8520+
}
85218521

8522-
#[derive(Clone)]
8523-
struct MyAlloc {
8524-
_inner: Arc<MyAllocInner>,
8525-
}
8522+
impl Drop for MyAllocInner {
8523+
fn drop(&mut self) {
8524+
println!("MyAlloc freed.");
8525+
self.drop_count.fetch_sub(1, Ordering::SeqCst);
8526+
}
8527+
}
85268528

8527-
impl Drop for MyAllocInner {
8528-
fn drop(&mut self) {
8529-
println!("MyAlloc freed.");
8530-
self.drop_flag.store(true, Ordering::SeqCst);
8531-
}
8532-
}
8529+
unsafe impl Allocator for MyAlloc {
8530+
fn allocate(&self, layout: Layout) -> std::result::Result<NonNull<[u8]>, AllocError> {
8531+
let g = Global;
8532+
g.allocate(layout)
8533+
}
85338534

8534-
unsafe impl Allocator for MyAlloc {
8535-
fn allocate(
8536-
&self,
8537-
layout: Layout,
8538-
) -> std::result::Result<NonNull<[u8]>, AllocError> {
8539-
let g = Global;
8540-
g.allocate(layout)
8541-
}
8535+
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
8536+
let g = Global;
8537+
g.deallocate(ptr, layout)
8538+
}
8539+
}
85428540

8543-
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
8544-
let g = Global;
8545-
g.deallocate(ptr, layout)
8546-
}
8547-
}
8541+
#[test]
8542+
fn test_hashmap_into_iter_bug() {
8543+
let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(1));
85488544

8545+
{
85498546
let mut map = crate::HashMap::with_capacity_in(
85508547
10,
85518548
MyAlloc {
85528549
_inner: Arc::new(MyAllocInner {
8553-
drop_flag: dropped.clone(),
8550+
drop_count: dropped.clone(),
85548551
}),
85558552
},
85568553
);
@@ -8563,6 +8560,96 @@ mod test_map {
85638560
}
85648561
}
85658562

8566-
assert!(dropped.load(Ordering::SeqCst));
8563+
// All allocator clones should already be dropped.
8564+
assert_eq!(dropped.load(Ordering::SeqCst), 0);
8565+
}
8566+
8567+
#[test]
8568+
#[should_panic = "panic in clone"]
8569+
fn test_clone_memory_leaks_and_double_drop() {
8570+
let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
8571+
8572+
{
8573+
let mut map = crate::HashMap::with_capacity_in(
8574+
10,
8575+
MyAlloc {
8576+
_inner: Arc::new(MyAllocInner {
8577+
drop_count: dropped.clone(),
8578+
}),
8579+
},
8580+
);
8581+
8582+
const DISARMED: bool = false;
8583+
const ARMED: bool = true;
8584+
8585+
struct CheckedCloneDrop {
8586+
panic_in_clone: bool,
8587+
dropped: bool,
8588+
need_drop: Vec<i32>,
8589+
}
8590+
8591+
impl Clone for CheckedCloneDrop {
8592+
fn clone(&self) -> Self {
8593+
if self.panic_in_clone {
8594+
panic!("panic in clone")
8595+
}
8596+
Self {
8597+
panic_in_clone: self.panic_in_clone,
8598+
dropped: self.dropped,
8599+
need_drop: self.need_drop.clone(),
8600+
}
8601+
}
8602+
}
8603+
8604+
impl Drop for CheckedCloneDrop {
8605+
fn drop(&mut self) {
8606+
if self.dropped {
8607+
panic!("double drop");
8608+
}
8609+
self.dropped = true;
8610+
}
8611+
}
8612+
8613+
let armed_flags = [
8614+
DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
8615+
];
8616+
8617+
// Hash and Key must be equal to each other for controlling the elements placement
8618+
// so that we can be sure that we first clone elements that don't panic during cloning.
8619+
for (idx, &panic_in_clone) in armed_flags.iter().enumerate() {
8620+
let idx = idx as u64;
8621+
map.table.insert(
8622+
idx,
8623+
(
8624+
idx,
8625+
CheckedCloneDrop {
8626+
panic_in_clone,
8627+
dropped: false,
8628+
need_drop: vec![0, 1, 2, 3],
8629+
},
8630+
),
8631+
|(k, _)| *k,
8632+
);
8633+
}
8634+
8635+
let mut count = 0;
8636+
// Let's check that all elements are located as we wanted
8637+
//
8638+
// SAFETY: We know for sure that `RawTable` will outlive
8639+
// the returned `RawIter` iterator.
8640+
for ((key, value), panic_in_clone) in map.iter().zip(armed_flags) {
8641+
assert_eq!(*key, count);
8642+
assert_eq!(value.panic_in_clone, panic_in_clone);
8643+
count += 1;
8644+
}
8645+
assert_eq!(map.len(), 7);
8646+
assert_eq!(count, 7);
8647+
8648+
// Clone should normally clone a few elements, and then (when the
8649+
// clone function panics), deallocate both its own memory, memory
8650+
// of `dropped: Arc<AtomicI8>` and the memory of already cloned
8651+
// elements (Vec<i32> memory inside CheckedCloneDrop).
8652+
let _table2 = map.clone();
8653+
}
85678654
}
85688655
}

src/raw/mod.rs

Lines changed: 0 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -3393,88 +3393,6 @@ mod test_map {
33933393
}
33943394
}
33953395

3396-
#[test]
3397-
#[should_panic = "panic in clone"]
3398-
fn test_clone_memory_leaks_and_double_drop() {
3399-
use ::alloc::vec::Vec;
3400-
3401-
const DISARMED: bool = false;
3402-
const ARMED: bool = true;
3403-
3404-
struct CheckedCloneDrop {
3405-
panic_in_clone: bool,
3406-
dropped: bool,
3407-
need_drop: Vec<i32>,
3408-
}
3409-
3410-
impl Clone for CheckedCloneDrop {
3411-
fn clone(&self) -> Self {
3412-
if self.panic_in_clone {
3413-
panic!("panic in clone")
3414-
}
3415-
Self {
3416-
panic_in_clone: self.panic_in_clone,
3417-
dropped: self.dropped,
3418-
need_drop: self.need_drop.clone(),
3419-
}
3420-
}
3421-
}
3422-
3423-
impl Drop for CheckedCloneDrop {
3424-
fn drop(&mut self) {
3425-
if self.dropped {
3426-
panic!("double drop");
3427-
}
3428-
self.dropped = true;
3429-
}
3430-
}
3431-
3432-
let mut table: RawTable<(u64, CheckedCloneDrop)> = RawTable::with_capacity(7);
3433-
3434-
let armed_flags = [
3435-
DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
3436-
];
3437-
3438-
// Hash and Key must be equal to each other for controlling the elements placement
3439-
// so that we can be sure that we first clone elements that don't panic during cloning.
3440-
for (idx, &panic_in_clone) in armed_flags.iter().enumerate() {
3441-
let idx = idx as u64;
3442-
table.insert(
3443-
idx,
3444-
(
3445-
idx,
3446-
CheckedCloneDrop {
3447-
panic_in_clone,
3448-
dropped: false,
3449-
need_drop: vec![0, 1, 2, 3],
3450-
},
3451-
),
3452-
|(k, _)| *k,
3453-
);
3454-
}
3455-
let mut count = 0;
3456-
unsafe {
3457-
// Let's check that all elements are located as we wanted
3458-
//
3459-
// SAFETY: We know for sure that `RawTable` will outlive
3460-
// the returned `RawIter` iterator.
3461-
for (bucket, panic_in_clone) in table.iter().zip(armed_flags) {
3462-
// SAFETY: It's OK, we get bucket from RawIter, so it is safe to read
3463-
let (key, value) = bucket.as_ref();
3464-
assert_eq!(*key, count);
3465-
assert_eq!(value.panic_in_clone, panic_in_clone);
3466-
count += 1;
3467-
}
3468-
}
3469-
assert_eq!(table.len(), 7);
3470-
assert_eq!(count, 7);
3471-
3472-
// Clone should normally clone a few elements, and then (when the clone
3473-
// function panics), deallocate both its own memory and the memory of
3474-
// already cloned elements (Vec<i32> memory inside CheckedCloneDrop).
3475-
let _table2 = table.clone();
3476-
}
3477-
34783396
/// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF
34793397
/// AN UNINITIALIZED TABLE DURING THE DROP
34803398
#[test]

0 commit comments

Comments
 (0)