@@ -62,35 +62,35 @@ use super::weak_memory::EvalContextExt as _;
62
62
63
63
pub type AllocExtra = VClockAlloc ;
64
64
65
- /// Valid atomic read-write operations , alias of atomic::Ordering (not non-exhaustive).
65
+ /// Valid atomic read-write orderings , alias of atomic::Ordering (not non-exhaustive).
66
66
#[ derive( Copy , Clone , PartialEq , Eq , Debug ) ]
67
- pub enum AtomicRwOp {
67
+ pub enum AtomicRwOrd {
68
68
Relaxed ,
69
69
Acquire ,
70
70
Release ,
71
71
AcqRel ,
72
72
SeqCst ,
73
73
}
74
74
75
- /// Valid atomic read operations , subset of atomic::Ordering.
75
+ /// Valid atomic read orderings , subset of atomic::Ordering.
76
76
#[ derive( Copy , Clone , PartialEq , Eq , Debug ) ]
77
- pub enum AtomicReadOp {
77
+ pub enum AtomicReadOrd {
78
78
Relaxed ,
79
79
Acquire ,
80
80
SeqCst ,
81
81
}
82
82
83
- /// Valid atomic write operations , subset of atomic::Ordering.
83
+ /// Valid atomic write orderings , subset of atomic::Ordering.
84
84
#[ derive( Copy , Clone , PartialEq , Eq , Debug ) ]
85
- pub enum AtomicWriteOp {
85
+ pub enum AtomicWriteOrd {
86
86
Relaxed ,
87
87
Release ,
88
88
SeqCst ,
89
89
}
90
90
91
- /// Valid atomic fence operations , subset of atomic::Ordering.
91
+ /// Valid atomic fence orderings , subset of atomic::Ordering.
92
92
#[ derive( Copy , Clone , PartialEq , Eq , Debug ) ]
93
- pub enum AtomicFenceOp {
93
+ pub enum AtomicFenceOrd {
94
94
Acquire ,
95
95
Release ,
96
96
AcqRel ,
@@ -486,7 +486,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
486
486
op : & OpTy < ' tcx , Tag > ,
487
487
offset : u64 ,
488
488
layout : TyAndLayout < ' tcx > ,
489
- atomic : AtomicReadOp ,
489
+ atomic : AtomicReadOrd ,
490
490
) -> InterpResult < ' tcx , ScalarMaybeUninit < Tag > > {
491
491
let this = self . eval_context_ref ( ) ;
492
492
let value_place = this. deref_operand_and_offset ( op, offset, layout) ?;
@@ -500,7 +500,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
500
500
offset : u64 ,
501
501
value : impl Into < ScalarMaybeUninit < Tag > > ,
502
502
layout : TyAndLayout < ' tcx > ,
503
- atomic : AtomicWriteOp ,
503
+ atomic : AtomicWriteOrd ,
504
504
) -> InterpResult < ' tcx > {
505
505
let this = self . eval_context_mut ( ) ;
506
506
let value_place = this. deref_operand_and_offset ( op, offset, layout) ?;
@@ -511,7 +511,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
511
511
fn read_scalar_atomic (
512
512
& self ,
513
513
place : & MPlaceTy < ' tcx , Tag > ,
514
- atomic : AtomicReadOp ,
514
+ atomic : AtomicReadOrd ,
515
515
) -> InterpResult < ' tcx , ScalarMaybeUninit < Tag > > {
516
516
let this = self . eval_context_ref ( ) ;
517
517
// This will read from the last store in the modification order of this location. In case
@@ -531,7 +531,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
531
531
& mut self ,
532
532
val : ScalarMaybeUninit < Tag > ,
533
533
dest : & MPlaceTy < ' tcx , Tag > ,
534
- atomic : AtomicWriteOp ,
534
+ atomic : AtomicWriteOrd ,
535
535
) -> InterpResult < ' tcx > {
536
536
let this = self . eval_context_mut ( ) ;
537
537
this. validate_overlapping_atomic ( dest) ?;
@@ -552,7 +552,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
552
552
rhs : & ImmTy < ' tcx , Tag > ,
553
553
op : mir:: BinOp ,
554
554
neg : bool ,
555
- atomic : AtomicRwOp ,
555
+ atomic : AtomicRwOrd ,
556
556
) -> InterpResult < ' tcx , ImmTy < ' tcx , Tag > > {
557
557
let this = self . eval_context_mut ( ) ;
558
558
@@ -581,7 +581,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
581
581
& mut self ,
582
582
place : & MPlaceTy < ' tcx , Tag > ,
583
583
new : ScalarMaybeUninit < Tag > ,
584
- atomic : AtomicRwOp ,
584
+ atomic : AtomicRwOrd ,
585
585
) -> InterpResult < ' tcx , ScalarMaybeUninit < Tag > > {
586
586
let this = self . eval_context_mut ( ) ;
587
587
@@ -602,7 +602,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
602
602
place : & MPlaceTy < ' tcx , Tag > ,
603
603
rhs : ImmTy < ' tcx , Tag > ,
604
604
min : bool ,
605
- atomic : AtomicRwOp ,
605
+ atomic : AtomicRwOrd ,
606
606
) -> InterpResult < ' tcx , ImmTy < ' tcx , Tag > > {
607
607
let this = self . eval_context_mut ( ) ;
608
608
@@ -642,8 +642,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
642
642
place : & MPlaceTy < ' tcx , Tag > ,
643
643
expect_old : & ImmTy < ' tcx , Tag > ,
644
644
new : ScalarMaybeUninit < Tag > ,
645
- success : AtomicRwOp ,
646
- fail : AtomicReadOp ,
645
+ success : AtomicRwOrd ,
646
+ fail : AtomicReadOrd ,
647
647
can_fail_spuriously : bool ,
648
648
) -> InterpResult < ' tcx , Immediate < Tag > > {
649
649
use rand:: Rng as _;
@@ -696,7 +696,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
696
696
fn validate_atomic_load (
697
697
& self ,
698
698
place : & MPlaceTy < ' tcx , Tag > ,
699
- atomic : AtomicReadOp ,
699
+ atomic : AtomicReadOrd ,
700
700
) -> InterpResult < ' tcx > {
701
701
let this = self . eval_context_ref ( ) ;
702
702
this. validate_overlapping_atomic ( place) ?;
@@ -705,7 +705,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
705
705
atomic,
706
706
"Atomic Load" ,
707
707
move |memory, clocks, index, atomic| {
708
- if atomic == AtomicReadOp :: Relaxed {
708
+ if atomic == AtomicReadOrd :: Relaxed {
709
709
memory. load_relaxed ( & mut * clocks, index)
710
710
} else {
711
711
memory. load_acquire ( & mut * clocks, index)
@@ -719,7 +719,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
719
719
fn validate_atomic_store (
720
720
& mut self ,
721
721
place : & MPlaceTy < ' tcx , Tag > ,
722
- atomic : AtomicWriteOp ,
722
+ atomic : AtomicWriteOrd ,
723
723
) -> InterpResult < ' tcx > {
724
724
let this = self . eval_context_mut ( ) ;
725
725
this. validate_overlapping_atomic ( place) ?;
@@ -728,7 +728,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
728
728
atomic,
729
729
"Atomic Store" ,
730
730
move |memory, clocks, index, atomic| {
731
- if atomic == AtomicWriteOp :: Relaxed {
731
+ if atomic == AtomicWriteOrd :: Relaxed {
732
732
memory. store_relaxed ( clocks, index)
733
733
} else {
734
734
memory. store_release ( clocks, index)
@@ -742,9 +742,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
742
742
fn validate_atomic_rmw (
743
743
& mut self ,
744
744
place : & MPlaceTy < ' tcx , Tag > ,
745
- atomic : AtomicRwOp ,
745
+ atomic : AtomicRwOrd ,
746
746
) -> InterpResult < ' tcx > {
747
- use AtomicRwOp :: * ;
747
+ use AtomicRwOrd :: * ;
748
748
let acquire = matches ! ( atomic, Acquire | AcqRel | SeqCst ) ;
749
749
let release = matches ! ( atomic, Release | AcqRel | SeqCst ) ;
750
750
let this = self . eval_context_mut ( ) ;
@@ -764,7 +764,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
764
764
}
765
765
766
766
/// Update the data-race detector for an atomic fence on the current thread.
767
- fn validate_atomic_fence ( & mut self , atomic : AtomicFenceOp ) -> InterpResult < ' tcx > {
767
+ fn validate_atomic_fence ( & mut self , atomic : AtomicFenceOrd ) -> InterpResult < ' tcx > {
768
768
let this = self . eval_context_mut ( ) ;
769
769
if let Some ( data_race) = & mut this. machine . data_race {
770
770
data_race. maybe_perform_sync_operation ( |index, mut clocks| {
@@ -773,22 +773,22 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
773
773
// Apply data-race detection for the current fences
774
774
// this treats AcqRel and SeqCst as the same as an acquire
775
775
// and release fence applied in the same timestamp.
776
- if atomic != AtomicFenceOp :: Release {
776
+ if atomic != AtomicFenceOrd :: Release {
777
777
// Either Acquire | AcqRel | SeqCst
778
778
clocks. apply_acquire_fence ( ) ;
779
779
}
780
- if atomic != AtomicFenceOp :: Acquire {
780
+ if atomic != AtomicFenceOrd :: Acquire {
781
781
// Either Release | AcqRel | SeqCst
782
782
clocks. apply_release_fence ( ) ;
783
783
}
784
- if atomic == AtomicFenceOp :: SeqCst {
784
+ if atomic == AtomicFenceOrd :: SeqCst {
785
785
data_race. last_sc_fence . borrow_mut ( ) . set_at_index ( & clocks. clock , index) ;
786
786
clocks. fence_seqcst . join ( & data_race. last_sc_fence . borrow ( ) ) ;
787
787
clocks. write_seqcst . join ( & data_race. last_sc_write . borrow ( ) ) ;
788
788
}
789
789
790
790
// Increment timestamp in case of release semantics.
791
- Ok ( atomic != AtomicFenceOp :: Acquire )
791
+ Ok ( atomic != AtomicFenceOrd :: Acquire )
792
792
} )
793
793
} else {
794
794
Ok ( ( ) )
0 commit comments