9
9
//! Relaxed stores now unconditionally block all currently active release sequences and so per-thread tracking of release
10
10
//! sequences is not needed.
11
11
//!
12
+ //! The implementation also models races with memory allocation and deallocation via treating allocation and
13
+ //! deallocation as a type of write internally for detecting data-races.
14
+ //!
12
15
//! This does not explore weak memory orders and so can still miss data-races
13
16
//! but should not report false-positives
14
17
//!
@@ -73,7 +76,7 @@ use rustc_target::abi::Size;
73
76
use crate :: {
74
77
ImmTy , Immediate , InterpResult , MPlaceTy , MemPlaceMeta , MiriEvalContext , MiriEvalContextExt ,
75
78
OpTy , Pointer , RangeMap , ScalarMaybeUninit , Tag , ThreadId , VClock , VTimestamp ,
76
- VectorIdx ,
79
+ VectorIdx , MemoryKind , MiriMemoryKind
77
80
} ;
78
81
79
82
pub type AllocExtra = VClockAlloc ;
@@ -192,6 +195,34 @@ struct AtomicMemoryCellClocks {
192
195
sync_vector : VClock ,
193
196
}
194
197
198
+ /// Type of write operation: allocating memory
199
+ /// non-atomic writes and deallocating memory
200
+ /// are all treated as writes for the purpose
201
+ /// of the data-race detector.
202
+ #[ derive( Copy , Clone , PartialEq , Eq , Debug ) ]
203
+ enum WriteType {
204
+ /// Allocate memory.
205
+ Allocate ,
206
+
207
+ /// Standard unsynchronized write.
208
+ Write ,
209
+
210
+ /// Deallocate memory.
211
+ /// Note that when memory is deallocated first, later non-atomic accesses
212
+ /// will be reported as use-after-free, not as data races.
213
+ /// (Same for `Allocate` above.)
214
+ Deallocate ,
215
+ }
216
+ impl WriteType {
217
+ fn get_descriptor ( self ) -> & ' static str {
218
+ match self {
219
+ WriteType :: Allocate => "Allocate" ,
220
+ WriteType :: Write => "Write" ,
221
+ WriteType :: Deallocate => "Deallocate" ,
222
+ }
223
+ }
224
+ }
225
+
195
226
/// Memory Cell vector clock metadata
196
227
/// for data-race detection.
197
228
#[ derive( Clone , PartialEq , Eq , Debug ) ]
@@ -204,6 +235,11 @@ struct MemoryCellClocks {
204
235
/// that performed the last write operation.
205
236
write_index : VectorIdx ,
206
237
238
+ /// The type of operation that the write index represents,
239
+ /// either newly allocated memory, a non-atomic write or
240
+ /// a deallocation of memory.
241
+ write_type : WriteType ,
242
+
207
243
/// The vector-clock of the timestamp of the last read operation
208
244
/// performed by a thread since the last write operation occurred.
209
245
/// It is reset to zero on each write operation.
@@ -215,20 +251,18 @@ struct MemoryCellClocks {
215
251
atomic_ops : Option < Box < AtomicMemoryCellClocks > > ,
216
252
}
217
253
218
- /// Create a default memory cell clocks instance
219
- /// for uninitialized memory.
220
- impl Default for MemoryCellClocks {
221
- fn default ( ) -> Self {
254
+ impl MemoryCellClocks {
255
+ /// Create a new set of clocks representing memory allocated
256
+ /// at a given vector timestamp and index.
257
+ fn new ( alloc : VTimestamp , alloc_index : VectorIdx ) -> Self {
222
258
MemoryCellClocks {
223
259
read : VClock :: default ( ) ,
224
- write : 0 ,
225
- write_index : VectorIdx :: MAX_INDEX ,
260
+ write : alloc,
261
+ write_index : alloc_index,
262
+ write_type : WriteType :: Allocate ,
226
263
atomic_ops : None ,
227
264
}
228
265
}
229
- }
230
-
231
- impl MemoryCellClocks {
232
266
233
267
/// Load the internal atomic memory cells if they exist.
234
268
#[ inline]
@@ -382,6 +416,7 @@ impl MemoryCellClocks {
382
416
& mut self ,
383
417
clocks : & ThreadClockSet ,
384
418
index : VectorIdx ,
419
+ write_type : WriteType ,
385
420
) -> Result < ( ) , DataRace > {
386
421
log:: trace!( "Unsynchronized write with vectors: {:#?} :: {:#?}" , self , clocks) ;
387
422
if self . write <= clocks. clock [ self . write_index ] && self . read <= clocks. clock {
@@ -393,6 +428,7 @@ impl MemoryCellClocks {
393
428
if race_free {
394
429
self . write = clocks. clock [ index] ;
395
430
self . write_index = index;
431
+ self . write_type = write_type;
396
432
self . read . set_zero_vector ( ) ;
397
433
Ok ( ( ) )
398
434
} else {
@@ -638,6 +674,21 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
638
674
Ok ( ( ) )
639
675
}
640
676
}
677
+
678
+ fn reset_vector_clocks (
679
+ & mut self ,
680
+ ptr : Pointer < Tag > ,
681
+ size : Size
682
+ ) -> InterpResult < ' tcx > {
683
+ let this = self . eval_context_mut ( ) ;
684
+ if let Some ( data_race) = & mut this. memory . extra . data_race {
685
+ if data_race. multi_threaded . get ( ) {
686
+ let alloc_meta = this. memory . get_raw_mut ( ptr. alloc_id ) ?. extra . data_race . as_mut ( ) . unwrap ( ) ;
687
+ alloc_meta. reset_clocks ( ptr. offset , size) ;
688
+ }
689
+ }
690
+ Ok ( ( ) )
691
+ }
641
692
}
642
693
643
694
/// Vector clock metadata for a logical memory allocation.
@@ -646,22 +697,50 @@ pub struct VClockAlloc {
646
697
/// Assigning each byte a MemoryCellClocks.
647
698
alloc_ranges : RefCell < RangeMap < MemoryCellClocks > > ,
648
699
649
- // Pointer to global state.
700
+ /// Pointer to global state.
650
701
global : MemoryExtra ,
651
702
}
652
703
653
704
impl VClockAlloc {
654
- /// Create a new data-race allocation detector.
655
- pub fn new_allocation ( global : & MemoryExtra , len : Size ) -> VClockAlloc {
705
+ /// Create a new data-race detector for newly allocated memory.
706
+ pub fn new_allocation ( global : & MemoryExtra , len : Size , kind : MemoryKind < MiriMemoryKind > ) -> VClockAlloc {
707
+ let ( alloc_timestamp, alloc_index) = match kind {
708
+ // User allocated and stack memory should track allocation.
709
+ MemoryKind :: Machine (
710
+ MiriMemoryKind :: Rust | MiriMemoryKind :: C | MiriMemoryKind :: WinHeap
711
+ ) | MemoryKind :: Stack => {
712
+ let ( alloc_index, clocks) = global. current_thread_state ( ) ;
713
+ let alloc_timestamp = clocks. clock [ alloc_index] ;
714
+ ( alloc_timestamp, alloc_index)
715
+ }
716
+ // Other global memory should trace races but be allocated at the 0 timestamp.
717
+ MemoryKind :: Machine (
718
+ MiriMemoryKind :: Global | MiriMemoryKind :: Machine | MiriMemoryKind :: Env |
719
+ MiriMemoryKind :: ExternStatic | MiriMemoryKind :: Tls
720
+ ) | MemoryKind :: CallerLocation | MemoryKind :: Vtable => {
721
+ ( 0 , VectorIdx :: MAX_INDEX )
722
+ }
723
+ } ;
656
724
VClockAlloc {
657
725
global : Rc :: clone ( global) ,
658
- alloc_ranges : RefCell :: new ( RangeMap :: new ( len, MemoryCellClocks :: default ( ) ) ) ,
726
+ alloc_ranges : RefCell :: new ( RangeMap :: new (
727
+ len, MemoryCellClocks :: new ( alloc_timestamp, alloc_index)
728
+ ) ) ,
729
+ }
730
+ }
731
+
732
+ fn reset_clocks ( & mut self , offset : Size , len : Size ) {
733
+ let mut alloc_ranges = self . alloc_ranges . borrow_mut ( ) ;
734
+ for ( _, range) in alloc_ranges. iter_mut ( offset, len) {
735
+ // Reset the portion of the range
736
+ * range = MemoryCellClocks :: new ( 0 , VectorIdx :: MAX_INDEX ) ;
659
737
}
660
738
}
661
739
662
740
// Find an index, if one exists where the value
663
741
// in `l` is greater than the value in `r`.
664
742
fn find_gt_index ( l : & VClock , r : & VClock ) -> Option < VectorIdx > {
743
+ log:: trace!( "Find index where not {:?} <= {:?}" , l, r) ;
665
744
let l_slice = l. as_slice ( ) ;
666
745
let r_slice = r. as_slice ( ) ;
667
746
l_slice
@@ -681,7 +760,7 @@ impl VClockAlloc {
681
760
. enumerate ( )
682
761
. find_map ( |( idx, & r) | if r == 0 { None } else { Some ( idx) } )
683
762
. expect ( "Invalid VClock Invariant" ) ;
684
- Some ( idx)
763
+ Some ( idx + r_slice . len ( ) )
685
764
} else {
686
765
None
687
766
}
@@ -712,18 +791,18 @@ impl VClockAlloc {
712
791
// Convert the write action into the vector clock it
713
792
// represents for diagnostic purposes.
714
793
write_clock = VClock :: new_with_index ( range. write_index , range. write ) ;
715
- ( "WRITE" , range. write_index , & write_clock)
794
+ ( range . write_type . get_descriptor ( ) , range. write_index , & write_clock)
716
795
} else if let Some ( idx) = Self :: find_gt_index ( & range. read , & current_clocks. clock ) {
717
- ( "READ " , idx, & range. read )
796
+ ( "Read " , idx, & range. read )
718
797
} else if !is_atomic {
719
798
if let Some ( atomic) = range. atomic ( ) {
720
799
if let Some ( idx) = Self :: find_gt_index ( & atomic. write_vector , & current_clocks. clock )
721
800
{
722
- ( "ATOMIC_STORE " , idx, & atomic. write_vector )
801
+ ( "Atomic Store " , idx, & atomic. write_vector )
723
802
} else if let Some ( idx) =
724
803
Self :: find_gt_index ( & atomic. read_vector , & current_clocks. clock )
725
804
{
726
- ( "ATOMIC_LOAD " , idx, & atomic. read_vector )
805
+ ( "Atomic Load " , idx, & atomic. read_vector )
727
806
} else {
728
807
unreachable ! (
729
808
"Failed to report data-race for non-atomic operation: no race found"
@@ -774,7 +853,7 @@ impl VClockAlloc {
774
853
return Self :: report_data_race (
775
854
& self . global ,
776
855
range,
777
- "READ " ,
856
+ "Read " ,
778
857
false ,
779
858
pointer,
780
859
len,
@@ -792,17 +871,17 @@ impl VClockAlloc {
792
871
& mut self ,
793
872
pointer : Pointer < Tag > ,
794
873
len : Size ,
795
- action : & str ,
874
+ write_type : WriteType ,
796
875
) -> InterpResult < ' tcx > {
797
876
if self . global . multi_threaded . get ( ) {
798
877
let ( index, clocks) = self . global . current_thread_state ( ) ;
799
878
for ( _, range) in self . alloc_ranges . get_mut ( ) . iter_mut ( pointer. offset , len) {
800
- if let Err ( DataRace ) = range. write_race_detect ( & * clocks, index) {
879
+ if let Err ( DataRace ) = range. write_race_detect ( & * clocks, index, write_type ) {
801
880
// Report data-race
802
881
return Self :: report_data_race (
803
882
& self . global ,
804
883
range,
805
- action ,
884
+ write_type . get_descriptor ( ) ,
806
885
false ,
807
886
pointer,
808
887
len,
@@ -820,15 +899,15 @@ impl VClockAlloc {
820
899
/// being created or if it is temporarily disabled during a racy read or write
821
900
/// operation
822
901
pub fn write < ' tcx > ( & mut self , pointer : Pointer < Tag > , len : Size ) -> InterpResult < ' tcx > {
823
- self . unique_access ( pointer, len, " Write" )
902
+ self . unique_access ( pointer, len, WriteType :: Write )
824
903
}
825
904
826
905
/// Detect data-races for an unsynchronized deallocate operation, will not perform
827
906
/// data-race threads if `multi-threaded` is false, either due to no threads
828
907
/// being created or if it is temporarily disabled during a racy read or write
829
908
/// operation
830
909
pub fn deallocate < ' tcx > ( & mut self , pointer : Pointer < Tag > , len : Size ) -> InterpResult < ' tcx > {
831
- self . unique_access ( pointer, len, " Deallocate" )
910
+ self . unique_access ( pointer, len, WriteType :: Deallocate )
832
911
}
833
912
}
834
913
@@ -1134,6 +1213,8 @@ impl GlobalState {
1134
1213
vector_info. push ( thread)
1135
1214
} ;
1136
1215
1216
+ log:: trace!( "Creating thread = {:?} with vector index = {:?}" , thread, created_index) ;
1217
+
1137
1218
// Mark the chosen vector index as in use by the thread.
1138
1219
thread_info[ thread] . vector_index = Some ( created_index) ;
1139
1220
0 commit comments