Skip to content

Commit 1435e7c

Browse files
committed
refactor weak-mem test to list all expected executions
1 parent b508450 commit 1435e7c

File tree

3 files changed

+114
-103
lines changed

3 files changed

+114
-103
lines changed

tests/pass/0weak_memory_consistency.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
1-
//@compile-flags: -Zmiri-ignore-leaks -Zmiri-disable-stacked-borrows -Zmiri-disable-validation -Zmiri-provenance-gc=10000
1+
//@compile-flags: -Zmiri-ignore-leaks -Zmiri-disable-stacked-borrows -Zmiri-disable-validation
22
// This test's runtime explodes if the GC interval is set to 1 (which we do in CI), so we
33
// override it internally back to the default frequency.
4+
//@compile-flags: -Zmiri-provenance-gc=10000
45

56
// The following tests check whether our weak memory emulation produces
67
// any inconsistent execution outcomes

tests/pass/0weak_memory_consistency_sc.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
1-
//@compile-flags: -Zmiri-ignore-leaks -Zmiri-disable-stacked-borrows -Zmiri-disable-validation -Zmiri-provenance-gc=10000
1+
//@compile-flags: -Zmiri-ignore-leaks -Zmiri-disable-stacked-borrows -Zmiri-disable-validation
22
// This test's runtime explodes if the GC interval is set to 1 (which we do in CI), so we
33
// override it internally back to the default frequency.
4+
//@compile-flags: -Zmiri-provenance-gc=10000
45

56
// The following tests check whether our weak memory emulation produces
67
// any inconsistent execution outcomes

tests/pass/weak_memory/weak.rs

Lines changed: 110 additions & 101 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
//@compile-flags: -Zmiri-ignore-leaks -Zmiri-fixed-schedule
2+
// This test's runtime explodes if the GC interval is set to 1 (which we do in CI), so we
3+
// override it internally back to the default frequency.
4+
//@compile-flags: -Zmiri-provenance-gc=10000
5+
6+
// Tests showing weak memory behaviours are exhibited, even with a fixed scheule.
7+
// We run all tests a number of times and then check that we see the desired list of outcomes.
28

3-
// Tests showing weak memory behaviours are exhibited. All tests
4-
// return true when the desired behaviour is seen.
5-
// This is scheduler and pseudo-RNG dependent, so each test is
6-
// run multiple times until one try returns true.
79
// Spurious failure is possible, if you are really unlucky with
810
// the RNG and always read the latest value from the store buffer.
911

@@ -31,129 +33,136 @@ fn spin_until(loc: &AtomicUsize, val: usize) -> usize {
3133
val
3234
}
3335

34-
fn relaxed(initial_read: bool) -> bool {
35-
let x = static_atomic(0);
36-
let j1 = spawn(move || {
37-
x.store(1, Relaxed);
38-
// Preemption is disabled, so the store above will never be the
39-
// latest store visible to another thread.
40-
x.store(2, Relaxed);
41-
});
36+
/// Check that the function produces the intended set of outcomes.
37+
#[track_caller]
38+
fn check_all_outcomes<T: Eq + std::hash::Hash + std::fmt::Display>(
39+
expected: impl IntoIterator<Item = T>,
40+
generate: impl Fn() -> T,
41+
) {
42+
use std::collections::HashSet;
43+
44+
let expected: HashSet<T> = HashSet::from_iter(expected);
45+
let mut seen = HashSet::new();
46+
// Let's give it N times as many tries as we are expecting values.
47+
let tries = expected.len() * 8;
48+
for _ in 0..tries {
49+
let val = generate();
50+
assert!(expected.contains(&val), "got an unexpected value: {val}");
51+
seen.insert(val);
52+
}
53+
// Let's see if we saw them all.
54+
for val in expected {
55+
if !seen.contains(&val) {
56+
panic!("did not get value that should be possible: {val}");
57+
}
58+
}
59+
}
4260

43-
let j2 = spawn(move || x.load(Relaxed));
61+
fn relaxed() {
62+
check_all_outcomes([0, 1, 2], || {
63+
let x = static_atomic(0);
64+
let j1 = spawn(move || {
65+
x.store(1, Relaxed);
66+
// Preemption is disabled, so the store above will never be the
67+
// latest store visible to another thread.
68+
x.store(2, Relaxed);
69+
});
4470

45-
j1.join().unwrap();
46-
let r2 = j2.join().unwrap();
71+
let j2 = spawn(move || x.load(Relaxed));
4772

48-
// There are three possible values here: 0 (from the initial read), 1 (from the first relaxed
49-
// read), and 2 (the last read). The last case is boring and we cover the other two.
50-
r2 == if initial_read { 0 } else { 1 }
73+
j1.join().unwrap();
74+
let r2 = j2.join().unwrap();
75+
76+
// There are three possible values here: 0 (from the initial read), 1 (from the first relaxed
77+
// read), and 2 (the last read).
78+
r2
79+
});
5180
}
5281

5382
// https://www.doc.ic.ac.uk/~afd/homepages/papers/pdfs/2017/POPL.pdf Figure 8
54-
fn seq_cst() -> bool {
55-
let x = static_atomic(0);
83+
fn seq_cst() {
84+
check_all_outcomes([1, 3], || {
85+
let x = static_atomic(0);
5686

57-
let j1 = spawn(move || {
58-
x.store(1, Relaxed);
59-
});
87+
let j1 = spawn(move || {
88+
x.store(1, Relaxed);
89+
});
6090

61-
let j2 = spawn(move || {
62-
x.store(2, SeqCst);
63-
x.store(3, SeqCst);
64-
});
91+
let j2 = spawn(move || {
92+
x.store(2, SeqCst);
93+
x.store(3, SeqCst);
94+
});
6595

66-
let j3 = spawn(move || x.load(SeqCst));
96+
let j3 = spawn(move || x.load(SeqCst));
6797

68-
j1.join().unwrap();
69-
j2.join().unwrap();
70-
let r3 = j3.join().unwrap();
98+
j1.join().unwrap();
99+
j2.join().unwrap();
100+
let r3 = j3.join().unwrap();
71101

72-
// Even though we force t3 to run last, it can still see the value 1.
73-
// And it can *never* see the value 2!
74-
assert!(r3 == 1 || r3 == 3);
75-
r3 == 1
102+
// Even though we force t3 to run last, it can still see the value 1.
103+
// And it can *never* see the value 2!
104+
r3
105+
});
76106
}
77107

78-
fn initialization_write(add_fence: bool) -> bool {
79-
let x = static_atomic(11);
108+
fn initialization_write(add_fence: bool) {
109+
check_all_outcomes([11, 22], || {
110+
let x = static_atomic(11);
80111

81-
let wait = static_atomic(0);
112+
let wait = static_atomic(0);
82113

83-
let j1 = spawn(move || {
84-
x.store(22, Relaxed);
85-
// Relaxed is intentional. We want to test if the thread 2 reads the initialisation write
86-
// after a relaxed write
87-
wait.store(1, Relaxed);
88-
});
114+
let j1 = spawn(move || {
115+
x.store(22, Relaxed);
116+
// Relaxed is intentional. We want to test if the thread 2 reads the initialisation write
117+
// after a relaxed write
118+
wait.store(1, Relaxed);
119+
});
89120

90-
let j2 = spawn(move || {
91-
spin_until(wait, 1);
92-
if add_fence {
93-
fence(AcqRel);
94-
}
95-
x.load(Relaxed)
96-
});
121+
let j2 = spawn(move || {
122+
spin_until(wait, 1);
123+
if add_fence {
124+
fence(AcqRel);
125+
}
126+
x.load(Relaxed)
127+
});
97128

98-
j1.join().unwrap();
99-
let r2 = j2.join().unwrap();
129+
j1.join().unwrap();
130+
let r2 = j2.join().unwrap();
100131

101-
r2 == 11
132+
r2
133+
});
102134
}
103135

104-
fn faa_replaced_by_load() -> bool {
105-
// Example from https://github.com/llvm/llvm-project/issues/56450#issuecomment-1183695905
106-
#[no_mangle]
107-
pub fn rdmw(storing: &AtomicUsize, sync: &AtomicUsize, loading: &AtomicUsize) -> usize {
108-
storing.store(1, Relaxed);
109-
fence(Release);
110-
// sync.fetch_add(0, Relaxed);
111-
sync.load(Relaxed);
112-
fence(Acquire);
113-
loading.load(Relaxed)
114-
}
136+
fn faa_replaced_by_load() {
137+
check_all_outcomes([true, false], || {
138+
// Example from https://github.com/llvm/llvm-project/issues/56450#issuecomment-1183695905
139+
pub fn rdmw(storing: &AtomicUsize, sync: &AtomicUsize, loading: &AtomicUsize) -> usize {
140+
storing.store(1, Relaxed);
141+
fence(Release);
142+
// sync.fetch_add(0, Relaxed);
143+
sync.load(Relaxed);
144+
fence(Acquire);
145+
loading.load(Relaxed)
146+
}
115147

116-
let x = static_atomic(0);
117-
let y = static_atomic(0);
118-
let z = static_atomic(0);
148+
let x = static_atomic(0);
149+
let y = static_atomic(0);
150+
let z = static_atomic(0);
119151

120-
// Since each thread is so short, we need to make sure that they truely run at the same time
121-
// Otherwise t1 will finish before t2 even starts
122-
let go = static_atomic(0);
152+
let t1 = spawn(move || rdmw(y, x, z));
123153

124-
let t1 = spawn(move || {
125-
spin_until(go, 1);
126-
rdmw(y, x, z)
127-
});
154+
let t2 = spawn(move || rdmw(z, x, y));
128155

129-
let t2 = spawn(move || {
130-
spin_until(go, 1);
131-
rdmw(z, x, y)
156+
let a = t1.join().unwrap();
157+
let b = t2.join().unwrap();
158+
(a, b) == (0, 0)
132159
});
133-
134-
go.store(1, Relaxed);
135-
136-
let a = t1.join().unwrap();
137-
let b = t2.join().unwrap();
138-
(a, b) == (0, 0)
139-
}
140-
141-
/// Asserts that the function returns true at least once in 100 runs
142-
#[track_caller]
143-
fn assert_once(f: fn() -> bool) {
144-
assert!(std::iter::repeat_with(|| f()).take(100).any(|x| x));
145160
}
146161

147162
pub fn main() {
148-
assert_once(|| relaxed(false));
149-
assert_once(|| relaxed(true));
150-
assert_once(seq_cst);
151-
assert_once(|| initialization_write(false));
152-
assert_once(|| initialization_write(true));
153-
assert_once(faa_replaced_by_load);
154-
155-
// Run seq_cst a few more times since it has an assertion we want to ensure holds always.
156-
for _ in 0..50 {
157-
seq_cst();
158-
}
163+
relaxed();
164+
seq_cst();
165+
initialization_write(false);
166+
initialization_write(true);
167+
faa_replaced_by_load();
159168
}

0 commit comments

Comments
 (0)