|
1 | 1 | //@compile-flags: -Zmiri-ignore-leaks -Zmiri-fixed-schedule
|
| 2 | +// This test's runtime explodes if the GC interval is set to 1 (which we do in CI), so we |
| 3 | +// override it internally back to the default frequency. |
| 4 | +//@compile-flags: -Zmiri-provenance-gc=10000 |
| 5 | + |
| 6 | +// Tests showing weak memory behaviours are exhibited, even with a fixed scheule. |
| 7 | +// We run all tests a number of times and then check that we see the desired list of outcomes. |
2 | 8 |
|
3 |
| -// Tests showing weak memory behaviours are exhibited. All tests |
4 |
| -// return true when the desired behaviour is seen. |
5 |
| -// This is scheduler and pseudo-RNG dependent, so each test is |
6 |
| -// run multiple times until one try returns true. |
7 | 9 | // Spurious failure is possible, if you are really unlucky with
|
8 | 10 | // the RNG and always read the latest value from the store buffer.
|
9 | 11 |
|
@@ -31,129 +33,136 @@ fn spin_until(loc: &AtomicUsize, val: usize) -> usize {
|
31 | 33 | val
|
32 | 34 | }
|
33 | 35 |
|
34 |
| -fn relaxed(initial_read: bool) -> bool { |
35 |
| - let x = static_atomic(0); |
36 |
| - let j1 = spawn(move || { |
37 |
| - x.store(1, Relaxed); |
38 |
| - // Preemption is disabled, so the store above will never be the |
39 |
| - // latest store visible to another thread. |
40 |
| - x.store(2, Relaxed); |
41 |
| - }); |
| 36 | +/// Check that the function produces the intended set of outcomes. |
| 37 | +#[track_caller] |
| 38 | +fn check_all_outcomes<T: Eq + std::hash::Hash + std::fmt::Display>( |
| 39 | + expected: impl IntoIterator<Item = T>, |
| 40 | + generate: impl Fn() -> T, |
| 41 | +) { |
| 42 | + use std::collections::HashSet; |
| 43 | + |
| 44 | + let expected: HashSet<T> = HashSet::from_iter(expected); |
| 45 | + let mut seen = HashSet::new(); |
| 46 | + // Let's give it N times as many tries as we are expecting values. |
| 47 | + let tries = expected.len() * 8; |
| 48 | + for _ in 0..tries { |
| 49 | + let val = generate(); |
| 50 | + assert!(expected.contains(&val), "got an unexpected value: {val}"); |
| 51 | + seen.insert(val); |
| 52 | + } |
| 53 | + // Let's see if we saw them all. |
| 54 | + for val in expected { |
| 55 | + if !seen.contains(&val) { |
| 56 | + panic!("did not get value that should be possible: {val}"); |
| 57 | + } |
| 58 | + } |
| 59 | +} |
42 | 60 |
|
43 |
| - let j2 = spawn(move || x.load(Relaxed)); |
| 61 | +fn relaxed() { |
| 62 | + check_all_outcomes([0, 1, 2], || { |
| 63 | + let x = static_atomic(0); |
| 64 | + let j1 = spawn(move || { |
| 65 | + x.store(1, Relaxed); |
| 66 | + // Preemption is disabled, so the store above will never be the |
| 67 | + // latest store visible to another thread. |
| 68 | + x.store(2, Relaxed); |
| 69 | + }); |
44 | 70 |
|
45 |
| - j1.join().unwrap(); |
46 |
| - let r2 = j2.join().unwrap(); |
| 71 | + let j2 = spawn(move || x.load(Relaxed)); |
47 | 72 |
|
48 |
| - // There are three possible values here: 0 (from the initial read), 1 (from the first relaxed |
49 |
| - // read), and 2 (the last read). The last case is boring and we cover the other two. |
50 |
| - r2 == if initial_read { 0 } else { 1 } |
| 73 | + j1.join().unwrap(); |
| 74 | + let r2 = j2.join().unwrap(); |
| 75 | + |
| 76 | + // There are three possible values here: 0 (from the initial read), 1 (from the first relaxed |
| 77 | + // read), and 2 (the last read). |
| 78 | + r2 |
| 79 | + }); |
51 | 80 | }
|
52 | 81 |
|
53 | 82 | // https://www.doc.ic.ac.uk/~afd/homepages/papers/pdfs/2017/POPL.pdf Figure 8
|
54 |
| -fn seq_cst() -> bool { |
55 |
| - let x = static_atomic(0); |
| 83 | +fn seq_cst() { |
| 84 | + check_all_outcomes([1, 3], || { |
| 85 | + let x = static_atomic(0); |
56 | 86 |
|
57 |
| - let j1 = spawn(move || { |
58 |
| - x.store(1, Relaxed); |
59 |
| - }); |
| 87 | + let j1 = spawn(move || { |
| 88 | + x.store(1, Relaxed); |
| 89 | + }); |
60 | 90 |
|
61 |
| - let j2 = spawn(move || { |
62 |
| - x.store(2, SeqCst); |
63 |
| - x.store(3, SeqCst); |
64 |
| - }); |
| 91 | + let j2 = spawn(move || { |
| 92 | + x.store(2, SeqCst); |
| 93 | + x.store(3, SeqCst); |
| 94 | + }); |
65 | 95 |
|
66 |
| - let j3 = spawn(move || x.load(SeqCst)); |
| 96 | + let j3 = spawn(move || x.load(SeqCst)); |
67 | 97 |
|
68 |
| - j1.join().unwrap(); |
69 |
| - j2.join().unwrap(); |
70 |
| - let r3 = j3.join().unwrap(); |
| 98 | + j1.join().unwrap(); |
| 99 | + j2.join().unwrap(); |
| 100 | + let r3 = j3.join().unwrap(); |
71 | 101 |
|
72 |
| - // Even though we force t3 to run last, it can still see the value 1. |
73 |
| - // And it can *never* see the value 2! |
74 |
| - assert!(r3 == 1 || r3 == 3); |
75 |
| - r3 == 1 |
| 102 | + // Even though we force t3 to run last, it can still see the value 1. |
| 103 | + // And it can *never* see the value 2! |
| 104 | + r3 |
| 105 | + }); |
76 | 106 | }
|
77 | 107 |
|
78 |
| -fn initialization_write(add_fence: bool) -> bool { |
79 |
| - let x = static_atomic(11); |
| 108 | +fn initialization_write(add_fence: bool) { |
| 109 | + check_all_outcomes([11, 22], || { |
| 110 | + let x = static_atomic(11); |
80 | 111 |
|
81 |
| - let wait = static_atomic(0); |
| 112 | + let wait = static_atomic(0); |
82 | 113 |
|
83 |
| - let j1 = spawn(move || { |
84 |
| - x.store(22, Relaxed); |
85 |
| - // Relaxed is intentional. We want to test if the thread 2 reads the initialisation write |
86 |
| - // after a relaxed write |
87 |
| - wait.store(1, Relaxed); |
88 |
| - }); |
| 114 | + let j1 = spawn(move || { |
| 115 | + x.store(22, Relaxed); |
| 116 | + // Relaxed is intentional. We want to test if the thread 2 reads the initialisation write |
| 117 | + // after a relaxed write |
| 118 | + wait.store(1, Relaxed); |
| 119 | + }); |
89 | 120 |
|
90 |
| - let j2 = spawn(move || { |
91 |
| - spin_until(wait, 1); |
92 |
| - if add_fence { |
93 |
| - fence(AcqRel); |
94 |
| - } |
95 |
| - x.load(Relaxed) |
96 |
| - }); |
| 121 | + let j2 = spawn(move || { |
| 122 | + spin_until(wait, 1); |
| 123 | + if add_fence { |
| 124 | + fence(AcqRel); |
| 125 | + } |
| 126 | + x.load(Relaxed) |
| 127 | + }); |
97 | 128 |
|
98 |
| - j1.join().unwrap(); |
99 |
| - let r2 = j2.join().unwrap(); |
| 129 | + j1.join().unwrap(); |
| 130 | + let r2 = j2.join().unwrap(); |
100 | 131 |
|
101 |
| - r2 == 11 |
| 132 | + r2 |
| 133 | + }); |
102 | 134 | }
|
103 | 135 |
|
104 |
| -fn faa_replaced_by_load() -> bool { |
105 |
| - // Example from https://github.com/llvm/llvm-project/issues/56450#issuecomment-1183695905 |
106 |
| - #[no_mangle] |
107 |
| - pub fn rdmw(storing: &AtomicUsize, sync: &AtomicUsize, loading: &AtomicUsize) -> usize { |
108 |
| - storing.store(1, Relaxed); |
109 |
| - fence(Release); |
110 |
| - // sync.fetch_add(0, Relaxed); |
111 |
| - sync.load(Relaxed); |
112 |
| - fence(Acquire); |
113 |
| - loading.load(Relaxed) |
114 |
| - } |
| 136 | +fn faa_replaced_by_load() { |
| 137 | + check_all_outcomes([true, false], || { |
| 138 | + // Example from https://github.com/llvm/llvm-project/issues/56450#issuecomment-1183695905 |
| 139 | + pub fn rdmw(storing: &AtomicUsize, sync: &AtomicUsize, loading: &AtomicUsize) -> usize { |
| 140 | + storing.store(1, Relaxed); |
| 141 | + fence(Release); |
| 142 | + // sync.fetch_add(0, Relaxed); |
| 143 | + sync.load(Relaxed); |
| 144 | + fence(Acquire); |
| 145 | + loading.load(Relaxed) |
| 146 | + } |
115 | 147 |
|
116 |
| - let x = static_atomic(0); |
117 |
| - let y = static_atomic(0); |
118 |
| - let z = static_atomic(0); |
| 148 | + let x = static_atomic(0); |
| 149 | + let y = static_atomic(0); |
| 150 | + let z = static_atomic(0); |
119 | 151 |
|
120 |
| - // Since each thread is so short, we need to make sure that they truely run at the same time |
121 |
| - // Otherwise t1 will finish before t2 even starts |
122 |
| - let go = static_atomic(0); |
| 152 | + let t1 = spawn(move || rdmw(y, x, z)); |
123 | 153 |
|
124 |
| - let t1 = spawn(move || { |
125 |
| - spin_until(go, 1); |
126 |
| - rdmw(y, x, z) |
127 |
| - }); |
| 154 | + let t2 = spawn(move || rdmw(z, x, y)); |
128 | 155 |
|
129 |
| - let t2 = spawn(move || { |
130 |
| - spin_until(go, 1); |
131 |
| - rdmw(z, x, y) |
| 156 | + let a = t1.join().unwrap(); |
| 157 | + let b = t2.join().unwrap(); |
| 158 | + (a, b) == (0, 0) |
132 | 159 | });
|
133 |
| - |
134 |
| - go.store(1, Relaxed); |
135 |
| - |
136 |
| - let a = t1.join().unwrap(); |
137 |
| - let b = t2.join().unwrap(); |
138 |
| - (a, b) == (0, 0) |
139 |
| -} |
140 |
| - |
141 |
| -/// Asserts that the function returns true at least once in 100 runs |
142 |
| -#[track_caller] |
143 |
| -fn assert_once(f: fn() -> bool) { |
144 |
| - assert!(std::iter::repeat_with(|| f()).take(100).any(|x| x)); |
145 | 160 | }
|
146 | 161 |
|
147 | 162 | pub fn main() {
|
148 |
| - assert_once(|| relaxed(false)); |
149 |
| - assert_once(|| relaxed(true)); |
150 |
| - assert_once(seq_cst); |
151 |
| - assert_once(|| initialization_write(false)); |
152 |
| - assert_once(|| initialization_write(true)); |
153 |
| - assert_once(faa_replaced_by_load); |
154 |
| - |
155 |
| - // Run seq_cst a few more times since it has an assertion we want to ensure holds always. |
156 |
| - for _ in 0..50 { |
157 |
| - seq_cst(); |
158 |
| - } |
| 163 | + relaxed(); |
| 164 | + seq_cst(); |
| 165 | + initialization_write(false); |
| 166 | + initialization_write(true); |
| 167 | + faa_replaced_by_load(); |
159 | 168 | }
|
0 commit comments