|
11 | 11 | //! Define the `ByteValued` trait to mark that it is safe to instantiate the struct with random
|
12 | 12 | //! data.
|
13 | 13 |
|
14 |
| -use crate::VolatileSlice; |
15 | 14 | use std::io::{Read, Write};
|
16 | 15 | use std::mem::size_of;
|
17 | 16 | use std::result::Result;
|
18 | 17 | use std::slice::{from_raw_parts, from_raw_parts_mut};
|
| 18 | +use std::sync::atomic::Ordering; |
| 19 | + |
| 20 | +use crate::atomic_integer::AtomicInteger; |
| 21 | +use crate::VolatileSlice; |
19 | 22 |
|
20 | 23 | /// Types for which it is safe to initialize from raw data.
|
21 | 24 | ///
|
@@ -153,6 +156,41 @@ byte_valued_type!(i32);
|
153 | 156 | byte_valued_type!(i64);
|
154 | 157 | byte_valued_type!(isize);
|
155 | 158 |
|
| 159 | +/// A trait used to identify types which can be accessed atomically by proxy. |
| 160 | +pub trait AtomicAccess: |
| 161 | + ByteValued |
| 162 | + // Could not find a more succinct way of stating that `Self` can be converted |
| 163 | + // into `Self::A::V`, and the other way around. |
| 164 | + + From<<<Self as AtomicAccess>::A as AtomicInteger>::V> |
| 165 | + + Into<<<Self as AtomicAccess>::A as AtomicInteger>::V> |
| 166 | +{ |
| 167 | + /// The `AtomicInteger` that atomic operations on `Self` are based on. |
| 168 | + type A: AtomicInteger; |
| 169 | +} |
| 170 | + |
| 171 | +macro_rules! impl_atomic_access { |
| 172 | + ($T:ty, $A:path) => { |
| 173 | + impl AtomicAccess for $T { |
| 174 | + type A = $A; |
| 175 | + } |
| 176 | + }; |
| 177 | +} |
| 178 | + |
| 179 | +impl_atomic_access!(i8, std::sync::atomic::AtomicI8); |
| 180 | +impl_atomic_access!(i16, std::sync::atomic::AtomicI16); |
| 181 | +impl_atomic_access!(i32, std::sync::atomic::AtomicI32); |
| 182 | +#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] |
| 183 | +impl_atomic_access!(i64, std::sync::atomic::AtomicI64); |
| 184 | + |
| 185 | +impl_atomic_access!(u8, std::sync::atomic::AtomicU8); |
| 186 | +impl_atomic_access!(u16, std::sync::atomic::AtomicU16); |
| 187 | +impl_atomic_access!(u32, std::sync::atomic::AtomicU32); |
| 188 | +#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] |
| 189 | +impl_atomic_access!(u64, std::sync::atomic::AtomicU64); |
| 190 | + |
| 191 | +impl_atomic_access!(isize, std::sync::atomic::AtomicIsize); |
| 192 | +impl_atomic_access!(usize, std::sync::atomic::AtomicUsize); |
| 193 | + |
156 | 194 | /// A container to host a range of bytes and access its content.
|
157 | 195 | ///
|
158 | 196 | /// Candidates which may implement this trait include:
|
@@ -269,16 +307,40 @@ pub trait Bytes<A> {
|
269 | 307 | fn write_all_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E>
|
270 | 308 | where
|
271 | 309 | F: Write;
|
| 310 | + |
| 311 | + /// Atomically store a value at the specified address. |
| 312 | + fn store<T: AtomicAccess>(&self, val: T, addr: A, order: Ordering) -> Result<(), Self::E>; |
| 313 | + |
| 314 | + /// Atomically load a value from the specified address. |
| 315 | + fn load<T: AtomicAccess>(&self, addr: A, order: Ordering) -> Result<T, Self::E>; |
272 | 316 | }
|
273 | 317 |
|
274 | 318 | #[cfg(test)]
|
275 |
| -mod tests { |
276 |
| - use crate::{ByteValued, Bytes}; |
| 319 | +pub(crate) mod tests { |
| 320 | + use super::*; |
| 321 | + |
277 | 322 | use std::fmt::Debug;
|
278 |
| - use std::io::{Read, Write}; |
279 |
| - use std::mem::{align_of, size_of}; |
| 323 | + use std::mem::align_of; |
280 | 324 | use std::slice;
|
281 | 325 |
|
| 326 | + // Helper method to test atomic accesses for a given `b: Bytes` that's supposed to be |
| 327 | + // zero-initialized. |
| 328 | + pub fn check_atomic_accesses<A, B>(b: B, addr: A, bad_addr: A) |
| 329 | + where |
| 330 | + A: Copy, |
| 331 | + B: Bytes<A>, |
| 332 | + B::E: Debug, |
| 333 | + { |
| 334 | + let val = 100u32; |
| 335 | + |
| 336 | + assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), 0); |
| 337 | + b.store(val, addr, Ordering::Relaxed).unwrap(); |
| 338 | + assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), val); |
| 339 | + |
| 340 | + assert!(b.load::<u32>(bad_addr, Ordering::Relaxed).is_err()); |
| 341 | + assert!(b.store(val, bad_addr, Ordering::Relaxed).is_err()); |
| 342 | + } |
| 343 | + |
282 | 344 | fn check_byte_valued_type<T>()
|
283 | 345 | where
|
284 | 346 | T: ByteValued + PartialEq + Debug + Default,
|
@@ -409,6 +471,19 @@ mod tests {
|
409 | 471 | {
|
410 | 472 | unimplemented!()
|
411 | 473 | }
|
| 474 | + |
| 475 | + fn store<T: AtomicAccess>( |
| 476 | + &self, |
| 477 | + _val: T, |
| 478 | + _addr: usize, |
| 479 | + _order: Ordering, |
| 480 | + ) -> Result<(), Self::E> { |
| 481 | + unimplemented!() |
| 482 | + } |
| 483 | + |
| 484 | + fn load<T: AtomicAccess>(&self, _addr: usize, _order: Ordering) -> Result<T, Self::E> { |
| 485 | + unimplemented!() |
| 486 | + } |
412 | 487 | }
|
413 | 488 |
|
414 | 489 | #[test]
|
|
0 commit comments