Skip to content

Commit 85e5613

Browse files
committed
Auto merge of #1644 - JCTyblaidd:detect_race_with_alloc, r=RalfJung
More tests, fix issue 1643 and detect races with allocation. Fixes #1643 by disabling race detection for V-Table memory, adds race detection between r/w & memory allocation, and adds more tests. ~~There is one unusual result in dealloc_read_race_stack_drop.rs, where the stack variable is read by thread 0 & thread 2 and so reports a race with thread 0, any ideas for the cause of the read on thread 0?~~ Fixed, bug with reporting the index a read race occured in correctly.
2 parents 2065b52 + c13aabc commit 85e5613

34 files changed

+731
-48
lines changed

rust-version

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
a2e29d67c26bdf8f278c98ee02d6cc77a279ed2e
1+
12813159a985d87a98578e05cc39200e4e8c2102

src/data_race.rs

Lines changed: 106 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@
99
//! Relaxed stores now unconditionally block all currently active release sequences and so per-thread tracking of release
1010
//! sequences is not needed.
1111
//!
12+
//! The implementation also models races with memory allocation and deallocation via treating allocation and
13+
//! deallocation as a type of write internally for detecting data-races.
14+
//!
1215
//! This does not explore weak memory orders and so can still miss data-races
1316
//! but should not report false-positives
1417
//!
@@ -73,7 +76,7 @@ use rustc_target::abi::Size;
7376
use crate::{
7477
ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MiriEvalContext, MiriEvalContextExt,
7578
OpTy, Pointer, RangeMap, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp,
76-
VectorIdx,
79+
VectorIdx, MemoryKind, MiriMemoryKind
7780
};
7881

7982
pub type AllocExtra = VClockAlloc;
@@ -192,6 +195,34 @@ struct AtomicMemoryCellClocks {
192195
sync_vector: VClock,
193196
}
194197

198+
/// Type of write operation: allocating memory
199+
/// non-atomic writes and deallocating memory
200+
/// are all treated as writes for the purpose
201+
/// of the data-race detector.
202+
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
203+
enum WriteType {
204+
/// Allocate memory.
205+
Allocate,
206+
207+
/// Standard unsynchronized write.
208+
Write,
209+
210+
/// Deallocate memory.
211+
/// Note that when memory is deallocated first, later non-atomic accesses
212+
/// will be reported as use-after-free, not as data races.
213+
/// (Same for `Allocate` above.)
214+
Deallocate,
215+
}
216+
impl WriteType {
217+
fn get_descriptor(self) -> &'static str {
218+
match self {
219+
WriteType::Allocate => "Allocate",
220+
WriteType::Write => "Write",
221+
WriteType::Deallocate => "Deallocate",
222+
}
223+
}
224+
}
225+
195226
/// Memory Cell vector clock metadata
196227
/// for data-race detection.
197228
#[derive(Clone, PartialEq, Eq, Debug)]
@@ -204,6 +235,11 @@ struct MemoryCellClocks {
204235
/// that performed the last write operation.
205236
write_index: VectorIdx,
206237

238+
/// The type of operation that the write index represents,
239+
/// either newly allocated memory, a non-atomic write or
240+
/// a deallocation of memory.
241+
write_type: WriteType,
242+
207243
/// The vector-clock of the timestamp of the last read operation
208244
/// performed by a thread since the last write operation occurred.
209245
/// It is reset to zero on each write operation.
@@ -215,20 +251,18 @@ struct MemoryCellClocks {
215251
atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
216252
}
217253

218-
/// Create a default memory cell clocks instance
219-
/// for uninitialized memory.
220-
impl Default for MemoryCellClocks {
221-
fn default() -> Self {
254+
impl MemoryCellClocks {
255+
/// Create a new set of clocks representing memory allocated
256+
/// at a given vector timestamp and index.
257+
fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
222258
MemoryCellClocks {
223259
read: VClock::default(),
224-
write: 0,
225-
write_index: VectorIdx::MAX_INDEX,
260+
write: alloc,
261+
write_index: alloc_index,
262+
write_type: WriteType::Allocate,
226263
atomic_ops: None,
227264
}
228265
}
229-
}
230-
231-
impl MemoryCellClocks {
232266

233267
/// Load the internal atomic memory cells if they exist.
234268
#[inline]
@@ -382,6 +416,7 @@ impl MemoryCellClocks {
382416
&mut self,
383417
clocks: &ThreadClockSet,
384418
index: VectorIdx,
419+
write_type: WriteType,
385420
) -> Result<(), DataRace> {
386421
log::trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, clocks);
387422
if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
@@ -393,6 +428,7 @@ impl MemoryCellClocks {
393428
if race_free {
394429
self.write = clocks.clock[index];
395430
self.write_index = index;
431+
self.write_type = write_type;
396432
self.read.set_zero_vector();
397433
Ok(())
398434
} else {
@@ -638,6 +674,21 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
638674
Ok(())
639675
}
640676
}
677+
678+
fn reset_vector_clocks(
679+
&mut self,
680+
ptr: Pointer<Tag>,
681+
size: Size
682+
) -> InterpResult<'tcx> {
683+
let this = self.eval_context_mut();
684+
if let Some(data_race) = &mut this.memory.extra.data_race {
685+
if data_race.multi_threaded.get() {
686+
let alloc_meta = this.memory.get_raw_mut(ptr.alloc_id)?.extra.data_race.as_mut().unwrap();
687+
alloc_meta.reset_clocks(ptr.offset, size);
688+
}
689+
}
690+
Ok(())
691+
}
641692
}
642693

643694
/// Vector clock metadata for a logical memory allocation.
@@ -646,22 +697,50 @@ pub struct VClockAlloc {
646697
/// Assigning each byte a MemoryCellClocks.
647698
alloc_ranges: RefCell<RangeMap<MemoryCellClocks>>,
648699

649-
// Pointer to global state.
700+
/// Pointer to global state.
650701
global: MemoryExtra,
651702
}
652703

653704
impl VClockAlloc {
654-
/// Create a new data-race allocation detector.
655-
pub fn new_allocation(global: &MemoryExtra, len: Size) -> VClockAlloc {
705+
/// Create a new data-race detector for newly allocated memory.
706+
pub fn new_allocation(global: &MemoryExtra, len: Size, kind: MemoryKind<MiriMemoryKind>) -> VClockAlloc {
707+
let (alloc_timestamp, alloc_index) = match kind {
708+
// User allocated and stack memory should track allocation.
709+
MemoryKind::Machine(
710+
MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap
711+
) | MemoryKind::Stack => {
712+
let (alloc_index, clocks) = global.current_thread_state();
713+
let alloc_timestamp = clocks.clock[alloc_index];
714+
(alloc_timestamp, alloc_index)
715+
}
716+
// Other global memory should trace races but be allocated at the 0 timestamp.
717+
MemoryKind::Machine(
718+
MiriMemoryKind::Global | MiriMemoryKind::Machine | MiriMemoryKind::Env |
719+
MiriMemoryKind::ExternStatic | MiriMemoryKind::Tls
720+
) | MemoryKind::CallerLocation | MemoryKind::Vtable => {
721+
(0, VectorIdx::MAX_INDEX)
722+
}
723+
};
656724
VClockAlloc {
657725
global: Rc::clone(global),
658-
alloc_ranges: RefCell::new(RangeMap::new(len, MemoryCellClocks::default())),
726+
alloc_ranges: RefCell::new(RangeMap::new(
727+
len, MemoryCellClocks::new(alloc_timestamp, alloc_index)
728+
)),
729+
}
730+
}
731+
732+
fn reset_clocks(&mut self, offset: Size, len: Size) {
733+
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
734+
for (_, range) in alloc_ranges.iter_mut(offset, len) {
735+
// Reset the portion of the range
736+
*range = MemoryCellClocks::new(0, VectorIdx::MAX_INDEX);
659737
}
660738
}
661739

662740
// Find an index, if one exists where the value
663741
// in `l` is greater than the value in `r`.
664742
fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
743+
log::trace!("Find index where not {:?} <= {:?}", l, r);
665744
let l_slice = l.as_slice();
666745
let r_slice = r.as_slice();
667746
l_slice
@@ -681,7 +760,7 @@ impl VClockAlloc {
681760
.enumerate()
682761
.find_map(|(idx, &r)| if r == 0 { None } else { Some(idx) })
683762
.expect("Invalid VClock Invariant");
684-
Some(idx)
763+
Some(idx + r_slice.len())
685764
} else {
686765
None
687766
}
@@ -712,18 +791,18 @@ impl VClockAlloc {
712791
// Convert the write action into the vector clock it
713792
// represents for diagnostic purposes.
714793
write_clock = VClock::new_with_index(range.write_index, range.write);
715-
("WRITE", range.write_index, &write_clock)
794+
(range.write_type.get_descriptor(), range.write_index, &write_clock)
716795
} else if let Some(idx) = Self::find_gt_index(&range.read, &current_clocks.clock) {
717-
("READ", idx, &range.read)
796+
("Read", idx, &range.read)
718797
} else if !is_atomic {
719798
if let Some(atomic) = range.atomic() {
720799
if let Some(idx) = Self::find_gt_index(&atomic.write_vector, &current_clocks.clock)
721800
{
722-
("ATOMIC_STORE", idx, &atomic.write_vector)
801+
("Atomic Store", idx, &atomic.write_vector)
723802
} else if let Some(idx) =
724803
Self::find_gt_index(&atomic.read_vector, &current_clocks.clock)
725804
{
726-
("ATOMIC_LOAD", idx, &atomic.read_vector)
805+
("Atomic Load", idx, &atomic.read_vector)
727806
} else {
728807
unreachable!(
729808
"Failed to report data-race for non-atomic operation: no race found"
@@ -774,7 +853,7 @@ impl VClockAlloc {
774853
return Self::report_data_race(
775854
&self.global,
776855
range,
777-
"READ",
856+
"Read",
778857
false,
779858
pointer,
780859
len,
@@ -792,17 +871,17 @@ impl VClockAlloc {
792871
&mut self,
793872
pointer: Pointer<Tag>,
794873
len: Size,
795-
action: &str,
874+
write_type: WriteType,
796875
) -> InterpResult<'tcx> {
797876
if self.global.multi_threaded.get() {
798877
let (index, clocks) = self.global.current_thread_state();
799878
for (_, range) in self.alloc_ranges.get_mut().iter_mut(pointer.offset, len) {
800-
if let Err(DataRace) = range.write_race_detect(&*clocks, index) {
879+
if let Err(DataRace) = range.write_race_detect(&*clocks, index, write_type) {
801880
// Report data-race
802881
return Self::report_data_race(
803882
&self.global,
804883
range,
805-
action,
884+
write_type.get_descriptor(),
806885
false,
807886
pointer,
808887
len,
@@ -820,15 +899,15 @@ impl VClockAlloc {
820899
/// being created or if it is temporarily disabled during a racy read or write
821900
/// operation
822901
pub fn write<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
823-
self.unique_access(pointer, len, "Write")
902+
self.unique_access(pointer, len, WriteType::Write)
824903
}
825904

826905
/// Detect data-races for an unsynchronized deallocate operation, will not perform
827906
/// data-race threads if `multi-threaded` is false, either due to no threads
828907
/// being created or if it is temporarily disabled during a racy read or write
829908
/// operation
830909
pub fn deallocate<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
831-
self.unique_access(pointer, len, "Deallocate")
910+
self.unique_access(pointer, len, WriteType::Deallocate)
832911
}
833912
}
834913

@@ -1134,6 +1213,8 @@ impl GlobalState {
11341213
vector_info.push(thread)
11351214
};
11361215

1216+
log::trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
1217+
11371218
// Mark the chosen vector index as in use by the thread.
11381219
thread_info[thread].vector_index = Some(created_index);
11391220

src/machine.rs

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -478,7 +478,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
478478
(None, Tag::Untagged)
479479
};
480480
let race_alloc = if let Some(data_race) = &memory_extra.data_race {
481-
Some(data_race::AllocExtra::new_allocation(&data_race, alloc.size))
481+
Some(data_race::AllocExtra::new_allocation(&data_race, alloc.size, kind))
482482
} else {
483483
None
484484
};
@@ -510,6 +510,18 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
510510
Ok(())
511511
}
512512

513+
514+
fn after_static_mem_initialized(
515+
ecx: &mut InterpCx<'mir, 'tcx, Self>,
516+
ptr: Pointer<Self::PointerTag>,
517+
size: Size,
518+
) -> InterpResult<'tcx> {
519+
if ecx.memory.extra.data_race.is_some() {
520+
ecx.reset_vector_clocks(ptr, size)?;
521+
}
522+
Ok(())
523+
}
524+
513525
#[inline(always)]
514526
fn tag_global_base_pointer(memory_extra: &MemoryExtra, id: AllocId) -> Self::PointerTag {
515527
if let Some(stacked_borrows) = &memory_extra.stacked_borrows {
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
// ignore-windows: Concurrency on Windows is not supported yet.
2+
3+
use std::thread::spawn;
4+
use std::ptr::null_mut;
5+
use std::sync::atomic::{Ordering, AtomicPtr};
6+
use std::mem::MaybeUninit;
7+
8+
#[derive(Copy, Clone)]
9+
struct EvilSend<T>(pub T);
10+
11+
unsafe impl<T> Send for EvilSend<T> {}
12+
unsafe impl<T> Sync for EvilSend<T> {}
13+
14+
pub fn main() {
15+
// Shared atomic pointer
16+
let pointer = AtomicPtr::new(null_mut::<MaybeUninit<usize>>());
17+
let ptr = EvilSend(&pointer as *const AtomicPtr<MaybeUninit<usize>>);
18+
19+
// Note: this is scheduler-dependent
20+
// the operations need to occur in
21+
// order, otherwise the allocation is
22+
// not visible to the other-thread to
23+
// detect the race:
24+
// 1. alloc
25+
// 2. write
26+
unsafe {
27+
let j1 = spawn(move || {
28+
// Concurrent allocate the memory.
29+
// Uses relaxed semantics to not generate
30+
// a release sequence.
31+
let pointer = &*ptr.0;
32+
pointer.store(Box::into_raw(Box::new(MaybeUninit::uninit())), Ordering::Relaxed);
33+
});
34+
35+
let j2 = spawn(move || {
36+
let pointer = &*ptr.0;
37+
38+
// Note: could also error due to reading uninitialized memory, but the data-race detector triggers first.
39+
*pointer.load(Ordering::Relaxed) //~ ERROR Data race detected between Read on Thread(id = 2) and Allocate on Thread(id = 1)
40+
});
41+
42+
j1.join().unwrap();
43+
j2.join().unwrap();
44+
45+
// Clean up memory, will never be executed
46+
drop(Box::from_raw(pointer.load(Ordering::Relaxed)));
47+
}
48+
}

0 commit comments

Comments
 (0)