Skip to content

Commit 42fd1be

Browse files
alexandruagalxiord
authored andcommitted
add atomic load and store methods to Bytes
Signed-off-by: Alexandru Agache <[email protected]>
1 parent b4480de commit 42fd1be

File tree

6 files changed

+163
-10
lines changed

6 files changed

+163
-10
lines changed

coverage_config_x86_64.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
{
2-
"coverage_score": 84.8,
2+
"coverage_score": 85.4,
33
"exclude_path": "mmap_windows.rs",
44
"crate_features": "backend-mmap,backend-atomic"
55
}

src/bytes.rs

Lines changed: 80 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,14 @@
1111
//! Define the `ByteValued` trait to mark that it is safe to instantiate the struct with random
1212
//! data.
1313
14-
use crate::VolatileSlice;
1514
use std::io::{Read, Write};
1615
use std::mem::size_of;
1716
use std::result::Result;
1817
use std::slice::{from_raw_parts, from_raw_parts_mut};
18+
use std::sync::atomic::Ordering;
19+
20+
use crate::atomic_integer::AtomicInteger;
21+
use crate::VolatileSlice;
1922

2023
/// Types for which it is safe to initialize from raw data.
2124
///
@@ -153,6 +156,41 @@ byte_valued_type!(i32);
153156
byte_valued_type!(i64);
154157
byte_valued_type!(isize);
155158

159+
/// A trait used to identify types which can be accessed atomically by proxy.
160+
pub trait AtomicAccess:
161+
ByteValued
162+
// Could not find a more succinct way of stating that `Self` can be converted
163+
// into `Self::A::V`, and the other way around.
164+
+ From<<<Self as AtomicAccess>::A as AtomicInteger>::V>
165+
+ Into<<<Self as AtomicAccess>::A as AtomicInteger>::V>
166+
{
167+
/// The `AtomicInteger` that atomic operations on `Self` are based on.
168+
type A: AtomicInteger;
169+
}
170+
171+
macro_rules! impl_atomic_access {
172+
($T:ty, $A:path) => {
173+
impl AtomicAccess for $T {
174+
type A = $A;
175+
}
176+
};
177+
}
178+
179+
impl_atomic_access!(i8, std::sync::atomic::AtomicI8);
180+
impl_atomic_access!(i16, std::sync::atomic::AtomicI16);
181+
impl_atomic_access!(i32, std::sync::atomic::AtomicI32);
182+
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
183+
impl_atomic_access!(i64, std::sync::atomic::AtomicI64);
184+
185+
impl_atomic_access!(u8, std::sync::atomic::AtomicU8);
186+
impl_atomic_access!(u16, std::sync::atomic::AtomicU16);
187+
impl_atomic_access!(u32, std::sync::atomic::AtomicU32);
188+
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
189+
impl_atomic_access!(u64, std::sync::atomic::AtomicU64);
190+
191+
impl_atomic_access!(isize, std::sync::atomic::AtomicIsize);
192+
impl_atomic_access!(usize, std::sync::atomic::AtomicUsize);
193+
156194
/// A container to host a range of bytes and access its content.
157195
///
158196
/// Candidates which may implement this trait include:
@@ -269,16 +307,40 @@ pub trait Bytes<A> {
269307
fn write_all_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E>
270308
where
271309
F: Write;
310+
311+
/// Atomically store a value at the specified address.
312+
fn store<T: AtomicAccess>(&self, val: T, addr: A, order: Ordering) -> Result<(), Self::E>;
313+
314+
/// Atomically load a value from the specified address.
315+
fn load<T: AtomicAccess>(&self, addr: A, order: Ordering) -> Result<T, Self::E>;
272316
}
273317

274318
#[cfg(test)]
275-
mod tests {
276-
use crate::{ByteValued, Bytes};
319+
pub(crate) mod tests {
320+
use super::*;
321+
277322
use std::fmt::Debug;
278-
use std::io::{Read, Write};
279-
use std::mem::{align_of, size_of};
323+
use std::mem::align_of;
280324
use std::slice;
281325

326+
// Helper method to test atomic accesses for a given `b: Bytes` that's supposed to be
327+
// zero-initialized.
328+
pub fn check_atomic_accesses<A, B>(b: B, addr: A, bad_addr: A)
329+
where
330+
A: Copy,
331+
B: Bytes<A>,
332+
B::E: Debug,
333+
{
334+
let val = 100u32;
335+
336+
assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), 0);
337+
b.store(val, addr, Ordering::Relaxed).unwrap();
338+
assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), val);
339+
340+
assert!(b.load::<u32>(bad_addr, Ordering::Relaxed).is_err());
341+
assert!(b.store(val, bad_addr, Ordering::Relaxed).is_err());
342+
}
343+
282344
fn check_byte_valued_type<T>()
283345
where
284346
T: ByteValued + PartialEq + Debug + Default,
@@ -409,6 +471,19 @@ mod tests {
409471
{
410472
unimplemented!()
411473
}
474+
475+
fn store<T: AtomicAccess>(
476+
&self,
477+
_val: T,
478+
_addr: usize,
479+
_order: Ordering,
480+
) -> Result<(), Self::E> {
481+
unimplemented!()
482+
}
483+
484+
fn load<T: AtomicAccess>(&self, _addr: usize, _order: Ordering) -> Result<T, Self::E> {
485+
unimplemented!()
486+
}
412487
}
413488

414489
#[test]

src/guest_memory.rs

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,10 +39,11 @@ use std::fs::File;
3939
use std::io::{self, Read, Write};
4040
use std::ops::{BitAnd, BitOr, Deref};
4141
use std::rc::Rc;
42+
use std::sync::atomic::Ordering;
4243
use std::sync::Arc;
4344

4445
use crate::address::{Address, AddressValue};
45-
use crate::bytes::Bytes;
46+
use crate::bytes::{AtomicAccess, Bytes};
4647
use crate::volatile_memory;
4748

4849
static MAX_ACCESS_CHUNK: usize = 4096;
@@ -868,6 +869,20 @@ impl<T: GuestMemory> Bytes<GuestAddress> for T {
868869
}
869870
Ok(())
870871
}
872+
873+
fn store<O: AtomicAccess>(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()> {
874+
// `find_region` should really do what `to_region_addr` is doing right now, except
875+
// it should keep returning a `Result`.
876+
self.to_region_addr(addr)
877+
.ok_or(Error::InvalidGuestAddress(addr))
878+
.and_then(|(region, region_addr)| region.store(val, region_addr, order))
879+
}
880+
881+
fn load<O: AtomicAccess>(&self, addr: GuestAddress, order: Ordering) -> Result<O> {
882+
self.to_region_addr(addr)
883+
.ok_or(Error::InvalidGuestAddress(addr))
884+
.and_then(|(region, region_addr)| region.load(region_addr, order))
885+
}
871886
}
872887

873888
#[cfg(test)]
@@ -1081,4 +1096,14 @@ mod tests {
10811096
.write_all_to(addr, &mut Cursor::new(&mut image), 0)
10821097
.is_ok());
10831098
}
1099+
1100+
#[cfg(feature = "backend-mmap")]
1101+
#[test]
1102+
fn test_atomic_accesses() {
1103+
let addr = GuestAddress(0x1000);
1104+
let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
1105+
let bad_addr = addr.unchecked_add(0x1000);
1106+
1107+
crate::bytes::tests::check_atomic_accesses(mem, addr, bad_addr);
1108+
}
10841109
}

src/lib.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ mod atomic_integer;
3232
pub use atomic_integer::AtomicInteger;
3333

3434
pub mod bytes;
35-
pub use bytes::{ByteValued, Bytes};
35+
pub use bytes::{AtomicAccess, ByteValued, Bytes};
3636

3737
pub mod endian;
3838
pub use endian::{Be16, Be32, Be64, BeSize, Le16, Le32, Le64, LeSize};

src/mmap.rs

Lines changed: 35 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,14 +18,15 @@ use std::fmt;
1818
use std::io::{Read, Write};
1919
use std::ops::Deref;
2020
use std::result;
21+
use std::sync::atomic::Ordering;
2122
use std::sync::Arc;
2223

2324
use crate::address::Address;
2425
use crate::guest_memory::{
2526
self, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MemoryRegionAddress,
2627
};
2728
use crate::volatile_memory::{VolatileMemory, VolatileSlice};
28-
use crate::Bytes;
29+
use crate::{AtomicAccess, Bytes};
2930

3031
#[cfg(unix)]
3132
pub use crate::mmap_unix::{Error as MmapRegionError, MmapRegion};
@@ -342,6 +343,27 @@ impl Bytes<MemoryRegionAddress> for GuestRegionMmap {
342343
.write_all_to::<F>(maddr, dst, count)
343344
.map_err(Into::into)
344345
}
346+
347+
fn store<T: AtomicAccess>(
348+
&self,
349+
val: T,
350+
addr: MemoryRegionAddress,
351+
order: Ordering,
352+
) -> guest_memory::Result<()> {
353+
self.as_volatile_slice().and_then(|s| {
354+
s.store(val, addr.raw_value() as usize, order)
355+
.map_err(Into::into)
356+
})
357+
}
358+
359+
fn load<T: AtomicAccess>(
360+
&self,
361+
addr: MemoryRegionAddress,
362+
order: Ordering,
363+
) -> guest_memory::Result<T> {
364+
self.as_volatile_slice()
365+
.and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into))
366+
}
345367
}
346368

347369
impl GuestMemoryRegion for GuestRegionMmap {
@@ -1444,4 +1466,16 @@ mod tests {
14441466
assert_eq!(guest_mem.check_range(start_addr2, 0xc00), false);
14451467
assert_eq!(guest_mem.check_range(start_addr1, std::usize::MAX), false);
14461468
}
1469+
1470+
#[test]
1471+
fn test_atomic_accesses() {
1472+
let region =
1473+
GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0)).unwrap();
1474+
1475+
crate::bytes::tests::check_atomic_accesses(
1476+
region,
1477+
MemoryRegionAddress(0),
1478+
MemoryRegionAddress(0x1000),
1479+
);
1480+
}
14471481
}

src/volatile_memory.rs

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,10 +34,11 @@ use std::ptr::copy;
3434
use std::ptr::{read_volatile, write_volatile};
3535
use std::result;
3636
use std::slice::{from_raw_parts, from_raw_parts_mut};
37+
use std::sync::atomic::Ordering;
3738
use std::usize;
3839

3940
use crate::atomic_integer::AtomicInteger;
40-
use crate::{ByteValued, Bytes};
41+
use crate::{AtomicAccess, ByteValued, Bytes};
4142

4243
use copy_slice_impl::copy_slice;
4344

@@ -695,6 +696,16 @@ impl Bytes<usize> for VolatileSlice<'_> {
695696
}
696697
Ok(())
697698
}
699+
700+
fn store<T: AtomicAccess>(&self, val: T, addr: usize, order: Ordering) -> Result<()> {
701+
self.get_atomic_ref::<T::A>(addr)
702+
.map(|r| r.store(val.into(), order))
703+
}
704+
705+
fn load<T: AtomicAccess>(&self, addr: usize, order: Ordering) -> Result<T> {
706+
self.get_atomic_ref::<T::A>(addr)
707+
.map(|r| r.load(order).into())
708+
}
698709
}
699710

700711
impl VolatileMemory for VolatileSlice<'_> {
@@ -1672,4 +1683,12 @@ mod tests {
16721683
assert_eq!(super::alignment(a + 12), 4);
16731684
assert_eq!(super::alignment(a + 8), 8);
16741685
}
1686+
1687+
#[test]
1688+
fn test_atomic_accesses() {
1689+
let a = VecMem::new(0x1000);
1690+
let s = a.as_volatile_slice();
1691+
1692+
crate::bytes::tests::check_atomic_accesses(s, 0, 0x1000);
1693+
}
16751694
}

0 commit comments

Comments
 (0)