Skip to content

Commit 2b83c72

Browse files
committed
Bytes: Do not use to_region_addr()
When we switch to a (potentially) virtual memory model, we want to compact the interface, especially removing references to memory regions because virtual memory is not just split into regions, but pages first. The one memory-region-referencing part we are going to keep is `try_access()` because that method is nicely structured around the fragmentation we will have to accept when it comes to paged memory. `to_region_addr()` in contrast does not even take a length argument, so for virtual memory, using the returned region and address is unsafe if doing so crosses page boundaries. Therefore, switch `Bytes::load()` and `store()` from using `to_region_addr()` to `try_access()`. Signed-off-by: Hanna Czenczek <[email protected]>
1 parent 9bcd5ac commit 2b83c72

File tree

1 file changed

+54
-8
lines changed

1 file changed

+54
-8
lines changed

src/guest_memory.rs

Lines changed: 54 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@
4444
use std::convert::From;
4545
use std::fs::File;
4646
use std::io;
47+
use std::mem::size_of;
4748
use std::ops::{BitAnd, BitOr, Deref};
4849
use std::rc::Rc;
4950
use std::sync::atomic::Ordering;
@@ -591,17 +592,62 @@ impl<T: GuestMemory + ?Sized> Bytes<GuestAddress> for T {
591592
}
592593

593594
fn store<O: AtomicAccess>(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()> {
594-
// `find_region` should really do what `to_region_addr` is doing right now, except
595-
// it should keep returning a `Result`.
596-
self.to_region_addr(addr)
597-
.ok_or(Error::InvalidGuestAddress(addr))
598-
.and_then(|(region, region_addr)| region.store(val, region_addr, order))
595+
let expected = size_of::<O>();
596+
597+
let completed = self.try_access(
598+
expected,
599+
addr,
600+
|offset, len, region_addr, region| -> Result<usize> {
601+
assert_eq!(offset, 0);
602+
if len < expected {
603+
return Err(Error::PartialBuffer {
604+
expected,
605+
completed: len,
606+
});
607+
}
608+
region.store(val, region_addr, order).map(|()| expected)
609+
},
610+
)?;
611+
612+
if completed < expected {
613+
Err(Error::PartialBuffer {
614+
expected,
615+
completed,
616+
})
617+
} else {
618+
Ok(())
619+
}
599620
}
600621

601622
fn load<O: AtomicAccess>(&self, addr: GuestAddress, order: Ordering) -> Result<O> {
602-
self.to_region_addr(addr)
603-
.ok_or(Error::InvalidGuestAddress(addr))
604-
.and_then(|(region, region_addr)| region.load(region_addr, order))
623+
let expected = size_of::<O>();
624+
let mut result = None::<O>;
625+
626+
let completed = self.try_access(
627+
expected,
628+
addr,
629+
|offset, len, region_addr, region| -> Result<usize> {
630+
assert_eq!(offset, 0);
631+
if len < expected {
632+
return Err(Error::PartialBuffer {
633+
expected,
634+
completed: len,
635+
});
636+
}
637+
result = Some(region.load(region_addr, order)?);
638+
Ok(expected)
639+
},
640+
)?;
641+
642+
if completed < expected {
643+
Err(Error::PartialBuffer {
644+
expected,
645+
completed,
646+
})
647+
} else {
648+
// Must be set because `completed == expected`
649+
Ok(result.unwrap())
650+
}
605651
}
606652
}
607653

0 commit comments

Comments
 (0)