Skip to content

Commit d780d9d

Browse files
committed
Bytes: Do not use to_region_addr()
When we switch to a (potentially) virtual memory model, we want to compact the interface, especially removing references to memory regions because virtual memory is not just split into regions, but pages first. The one memory-region-referencing part we are going to keep is `try_access()` because that method is nicely structured around the fragmentation we will have to accept when it comes to paged memory. `to_region_addr()` in contrast does not even take a length argument, so for virtual memory, using the returned region and address is unsafe if doing so crosses page boundaries. Therefore, switch `Bytes::load()` and `store()` from using `to_region_addr()` to `try_access()`. Signed-off-by: Hanna Czenczek <[email protected]>
1 parent 592be4a commit d780d9d

File tree

1 file changed

+54
-8
lines changed

1 file changed

+54
-8
lines changed

src/guest_memory.rs

Lines changed: 54 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@
4444
use std::convert::From;
4545
use std::fs::File;
4646
use std::io;
47+
use std::mem::size_of;
4748
use std::ops::{BitAnd, BitOr, Deref};
4849
use std::rc::Rc;
4950
use std::sync::atomic::Ordering;
@@ -718,17 +719,62 @@ impl<T: GuestMemory + ?Sized> Bytes<GuestAddress> for T {
718719
}
719720

720721
fn store<O: AtomicAccess>(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()> {
721-
// `find_region` should really do what `to_region_addr` is doing right now, except
722-
// it should keep returning a `Result`.
723-
self.to_region_addr(addr)
724-
.ok_or(Error::InvalidGuestAddress(addr))
725-
.and_then(|(region, region_addr)| region.store(val, region_addr, order))
722+
let expected = size_of::<O>();
723+
724+
let completed = self.try_access(
725+
expected,
726+
addr,
727+
|offset, len, region_addr, region| -> Result<usize> {
728+
assert_eq!(offset, 0);
729+
if len < expected {
730+
return Err(Error::PartialBuffer {
731+
expected,
732+
completed: len,
733+
});
734+
}
735+
region.store(val, region_addr, order).map(|()| expected)
736+
},
737+
)?;
738+
739+
if completed < expected {
740+
Err(Error::PartialBuffer {
741+
expected,
742+
completed,
743+
})
744+
} else {
745+
Ok(())
746+
}
726747
}
727748

728749
fn load<O: AtomicAccess>(&self, addr: GuestAddress, order: Ordering) -> Result<O> {
729-
self.to_region_addr(addr)
730-
.ok_or(Error::InvalidGuestAddress(addr))
731-
.and_then(|(region, region_addr)| region.load(region_addr, order))
750+
let expected = size_of::<O>();
751+
let mut result = None::<O>;
752+
753+
let completed = self.try_access(
754+
expected,
755+
addr,
756+
|offset, len, region_addr, region| -> Result<usize> {
757+
assert_eq!(offset, 0);
758+
if len < expected {
759+
return Err(Error::PartialBuffer {
760+
expected,
761+
completed: len,
762+
});
763+
}
764+
result = Some(region.load(region_addr, order)?);
765+
Ok(expected)
766+
},
767+
)?;
768+
769+
if completed < expected {
770+
Err(Error::PartialBuffer {
771+
expected,
772+
completed,
773+
})
774+
} else {
775+
// Must be set because `completed == expected`
776+
Ok(result.unwrap())
777+
}
732778
}
733779
}
734780

0 commit comments

Comments
 (0)