|
1 | 1 | use crate::mem::SizedTypeProperties; |
2 | | -use crate::intrinsics; |
3 | | -use crate::macros::cfg; |
4 | | -use crate::sync::atomic::{AtomicU8, AtomicU16, AtomicU32, Atomicu64}; |
| 2 | +#[cfg(not(bootstrap))] |
| 3 | +use crate::sync::atomic::{AtomicU8, AtomicU16, AtomicU32, AtomicU64}; |
| 4 | +use crate::{cfg, intrinsics}; |
5 | 5 |
|
6 | 6 | /// Performs a volatile read of the value from `src` without moving it. This |
7 | 7 | /// leaves the memory in `src` unchanged. |
@@ -80,29 +80,34 @@ pub unsafe fn read_volatile<T>(src: *const T) -> T { |
80 | 80 | is_zst: bool = T::IS_ZST, |
81 | 81 | ) => crate::ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst) |
82 | 82 | ); |
83 | | - match size_of<T>() { |
84 | | - 1 => if cfg!(target_has_atomic_load_store = "8") && align_of::<T>() == align_of::<AtomicU8>() { |
85 | | - intrinsics::atomic_load_relaxed(src) |
86 | | - } else { |
87 | | - intrinsics::volatile_load(dst, val) |
88 | | - } |
89 | | - 2 => if cfg!(target_has_atomic_load_store = "16") && align_of::<T>() == align_of::<AtomicU16>() { |
90 | | - intrinsics::atomic_load_relaxed(src) |
91 | | - } else { |
92 | | - intrinsics::volatile_load(dst, val) |
93 | | - } |
94 | | - 4 => if cfg!(target_has_atomic_load_store = "32") && align_of::<T>() == align_of::<AtomicU32>() { |
95 | | - intrinsics::atomic_load_relaxed(src) |
96 | | - } else { |
97 | | - intrinsics::volatile_load(dst, val) |
98 | | - } |
99 | | - 8 => if cfg!(target_has_atomic_load_store = "64") && align_of::<T>() == align_of::<AtomicU64>() { |
100 | | - intrinsics::atomic_load_relaxed(src) |
101 | | - } else { |
102 | | - intrinsics::volatile_load(dst, val) |
103 | | - } |
104 | | - _ => intrinsics::volatile_load(dst, val) |
| 83 | + |
| 84 | + // TODO: Guard patterns |
| 85 | + #[cfg(not(bootstrap))] |
| 86 | + match size_of::<T>() { |
| 87 | + 1 if cfg!(target_has_atomic_load_store = "8") |
| 88 | + && align_of::<T>() == align_of::<AtomicU8>() => |
| 89 | + { |
| 90 | + intrinsics::volatile_load_atomic_relaxed(src) |
| 91 | + } |
| 92 | + 2 if cfg!(target_has_atomic_load_store = "16") |
| 93 | + && align_of::<T>() == align_of::<AtomicU16>() => |
| 94 | + { |
| 95 | + intrinsics::volatile_load_atomic_relaxed(src) |
| 96 | + } |
| 97 | + 4 if cfg!(target_has_atomic_load_store = "32") |
| 98 | + && align_of::<T>() == align_of::<AtomicU32>() => |
| 99 | + { |
| 100 | + intrinsics::volatile_load_atomic_relaxed(src) |
| 101 | + } |
| 102 | + 8 if cfg!(target_has_atomic_load_store = "64") |
| 103 | + && align_of::<T>() == align_of::<AtomicU64>() => |
| 104 | + { |
| 105 | + intrinsics::volatile_load_atomic_relaxed(src) |
| 106 | + } |
| 107 | + _ => intrinsics::volatile_load(src), |
105 | 108 | } |
| 109 | + #[cfg(bootstrap)] |
| 110 | + intrinsics::volatile_load(src) |
106 | 111 | } |
107 | 112 | } |
108 | 113 |
|
@@ -182,28 +187,33 @@ pub unsafe fn write_volatile<T>(dst: *mut T, src: T) { |
182 | 187 | is_zst: bool = T::IS_ZST, |
183 | 188 | ) => crate::ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst) |
184 | 189 | ); |
185 | | - match size_of<T>() { |
186 | | - 1 => if cfg!(target_has_atomic_load_store = "8") && align_of::<T>() == align_of::<AtomicU8>() { |
187 | | - intrinsics::atomic_store_relaxed(dst, val) |
188 | | - } else { |
189 | | - intrinsics::volatile_store(dst, val) |
190 | | - } |
191 | | - 2 => if cfg!(target_has_atomic_load_store = "16") && align_of::<T>() == align_of::<AtomicU16>() { |
192 | | - intrinsics::atomic_store_relaxed(dst, val) |
193 | | - } else { |
194 | | - intrinsics::volatile_store(dst, val) |
195 | | - } |
196 | | - 4 => if cfg!(target_has_atomic_load_store = "32") && align_of::<T>() == align_of::<AtomicU32>() { |
197 | | - intrinsics::atomic_store_relaxed(dst, val) |
198 | | - } else { |
199 | | - intrinsics::volatile_store(dst, val) |
200 | | - } |
201 | | - 8 => if cfg!(target_has_atomic_load_store = "64") && align_of::<T>() == align_of::<AtomicU64>() { |
202 | | - intrinsics::atomic_store_relaxed(dst, val) |
203 | | - } else { |
204 | | - intrinsics::volatile_store(dst, val) |
205 | | - } |
206 | | - _ => intrinsics::volatile_store(dst, val) |
| 190 | + |
| 191 | + // TODO: Guard patterns |
| 192 | + #[cfg(not(bootstrap))] |
| 193 | + match size_of::<T>() { |
| 194 | + 1 if cfg!(target_has_atomic_load_store = "8") |
| 195 | + && align_of::<T>() == align_of::<AtomicU8>() => |
| 196 | + { |
| 197 | + intrinsics::volatile_store_atomic_relaxed(dst, src) |
| 198 | + } |
| 199 | + 2 if cfg!(target_has_atomic_load_store = "16") |
| 200 | + && align_of::<T>() == align_of::<AtomicU16>() => |
| 201 | + { |
| 202 | + intrinsics::volatile_store_atomic_relaxed(dst, src) |
| 203 | + } |
| 204 | + 4 if cfg!(target_has_atomic_load_store = "32") |
| 205 | + && align_of::<T>() == align_of::<AtomicU32>() => |
| 206 | + { |
| 207 | + intrinsics::volatile_store_atomic_relaxed(dst, src) |
| 208 | + } |
| 209 | + 8 if cfg!(target_has_atomic_load_store = "64") |
| 210 | + && align_of::<T>() == align_of::<AtomicU64>() => |
| 211 | + { |
| 212 | + intrinsics::volatile_store_atomic_relaxed(dst, src) |
| 213 | + } |
| 214 | + _ => intrinsics::volatile_store(dst, src), |
207 | 215 | } |
| 216 | + #[cfg(bootstrap)] |
| 217 | + intrinsics::volatile_store(dst, src) |
208 | 218 | } |
209 | 219 | } |
0 commit comments