Skip to content

Commit 3adf67b

Browse files
vireshkjiangliu
authored andcommitted
volatile_memory: Add on-demand mapping support for Xen
For Xen grant memory mapping model, the memory can't always be mapped in advance and it may be required to map it on the fly. This commit adds support for the same by introducing a new field for the various volatile memory structures: `mmap: Option<&'a MmapInfo>`. The type `MmapInfo` is set to `PhantomData<()>` for all the existing users, and is set to `MmapXen` for Xen. All the existing users set the new field to `None` and so the behavior remains unchanged for them. With Xen grant memory mappings, the address returned by as_ptr() will simply be an offset into the region, as the real mapping happens at a later point. In order not to break existing users of the API, lets keep the existing API around for non-xen implementations. Add new implementations, ptr_guard() and ptr_guard_mut(), for both xen and non-xen platforms, which can be used to map memory and then get a pointer to it, work on it, and unmap the memory automatically. Signed-off-by: Viresh Kumar <[email protected]>
1 parent 53af3f0 commit 3adf67b

File tree

5 files changed

+312
-49
lines changed

5 files changed

+312
-49
lines changed

src/mmap.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -908,7 +908,8 @@ mod tests {
908908
fn slice_addr() {
909909
let m = GuestRegionMmap::from_range(GuestAddress(0), 5, None).unwrap();
910910
let s = m.get_slice(MemoryRegionAddress(2), 3).unwrap();
911-
assert_eq!(s.as_ptr(), unsafe { m.as_ptr().offset(2) });
911+
let guard = s.ptr_guard();
912+
assert_eq!(guard.as_ptr(), unsafe { m.as_ptr().offset(2) });
912913
}
913914

914915
#[test]

src/mmap_unix.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -413,6 +413,7 @@ impl<B: Bitmap> VolatileMemory for MmapRegion<B> {
413413
self.addr.add(offset),
414414
count,
415415
self.bitmap.slice_at(offset),
416+
None,
416417
)
417418
},
418419
)

src/mmap_windows.rs

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,12 @@ impl<B: Bitmap> VolatileMemory for MmapRegion<B> {
210210
// Safe because we checked that offset + count was within our range and we only ever hand
211211
// out volatile accessors.
212212
Ok(unsafe {
213-
VolatileSlice::with_bitmap(self.addr.add(offset), count, self.bitmap.slice_at(offset))
213+
VolatileSlice::with_bitmap(
214+
self.addr.add(offset),
215+
count,
216+
self.bitmap.slice_at(offset),
217+
None,
218+
)
214219
})
215220
}
216221
}

src/mmap_xen.rs

Lines changed: 101 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -296,6 +296,12 @@ impl<B: Bitmap> VolatileMemory for MmapRegion<B> {
296296
) -> volatile_memory::Result<VolatileSlice<BS<B>>> {
297297
let _ = self.compute_end_offset(offset, count)?;
298298

299+
let mmap_info = if self.mmap.mmap_in_advance() {
300+
None
301+
} else {
302+
Some(&self.mmap)
303+
};
304+
299305
Ok(
300306
// SAFETY: Safe because we checked that offset + count was within our range and we only
301307
// ever hand out volatile accessors.
@@ -304,6 +310,7 @@ impl<B: Bitmap> VolatileMemory for MmapRegion<B> {
304310
self.as_ptr().add(offset),
305311
count,
306312
self.bitmap.slice_at(offset),
313+
mmap_info,
307314
)
308315
},
309316
)
@@ -432,6 +439,7 @@ fn validate_file(file_offset: &Option<FileOffset>) -> Result<(i32, u64)> {
432439

433440
// Xen Foreign memory mapping interface.
434441
trait MmapXenTrait: std::fmt::Debug {
442+
fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice>;
435443
fn addr(&self) -> *mut u8;
436444
}
437445

@@ -459,6 +467,11 @@ impl MmapXenUnix {
459467
}
460468

461469
impl MmapXenTrait for MmapXenUnix {
470+
#[allow(unused_variables)]
471+
fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
472+
Err(Error::MappedInAdvance)
473+
}
474+
462475
fn addr(&self) -> *mut u8 {
463476
self.0.addr()
464477
}
@@ -564,6 +577,11 @@ impl MmapXenForeign {
564577
}
565578

566579
impl MmapXenTrait for MmapXenForeign {
580+
#[allow(unused_variables)]
581+
fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
582+
Err(Error::MappedInAdvance)
583+
}
584+
567585
fn addr(&self) -> *mut u8 {
568586
self.unix_mmap.addr()
569587
}
@@ -790,6 +808,11 @@ impl MmapXenGrant {
790808
}
791809

792810
impl MmapXenTrait for MmapXenGrant {
811+
// Maps a slice out of the entire region.
812+
fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
813+
MmapXenSlice::new_with(self.clone(), addr as usize, prot, len)
814+
}
815+
793816
fn addr(&self) -> *mut u8 {
794817
if let Some(ref unix_mmap) = self.unix_mmap {
795818
unix_mmap.addr()
@@ -807,6 +830,65 @@ impl Drop for MmapXenGrant {
807830
}
808831
}
809832

833+
#[derive(Debug)]
834+
pub(crate) struct MmapXenSlice {
835+
grant: Option<MmapXenGrant>,
836+
unix_mmap: Option<MmapUnix>,
837+
addr: *mut u8,
838+
size: usize,
839+
index: u64,
840+
}
841+
842+
impl MmapXenSlice {
843+
fn raw(addr: *mut u8) -> Self {
844+
Self {
845+
grant: None,
846+
unix_mmap: None,
847+
addr,
848+
size: 0,
849+
index: 0,
850+
}
851+
}
852+
853+
fn new_with(grant: MmapXenGrant, offset: usize, prot: i32, size: usize) -> Result<Self> {
854+
let page_size = page_size() as usize;
855+
let page_base: usize = (offset / page_size) * page_size;
856+
let offset = offset - page_base;
857+
let size = offset + size;
858+
859+
let addr = grant.guest_base.0 + page_base as u64;
860+
let (unix_mmap, index) = grant.mmap_range(GuestAddress(addr), size, prot)?;
861+
862+
// SAFETY: We have already mapped the range including offset.
863+
let addr = unsafe { unix_mmap.addr().add(offset) };
864+
865+
Ok(Self {
866+
grant: Some(grant),
867+
unix_mmap: Some(unix_mmap),
868+
addr,
869+
size,
870+
index,
871+
})
872+
}
873+
874+
// Mapped address for the region.
875+
pub(crate) fn addr(&self) -> *mut u8 {
876+
self.addr
877+
}
878+
}
879+
880+
impl Drop for MmapXenSlice {
881+
fn drop(&mut self) {
882+
// Unmaps memory automatically once this instance goes out of scope.
883+
if let Some(unix_mmap) = self.unix_mmap.take() {
884+
self.grant
885+
.as_ref()
886+
.unwrap()
887+
.unmap_range(unix_mmap, self.size, self.index);
888+
}
889+
}
890+
}
891+
810892
#[derive(Debug)]
811893
pub struct MmapXen {
812894
xen_flags: MmapXenFlags,
@@ -849,6 +931,22 @@ impl MmapXen {
849931
fn data(&self) -> u32 {
850932
self.domid
851933
}
934+
935+
fn mmap_in_advance(&self) -> bool {
936+
self.xen_flags.mmap_in_advance()
937+
}
938+
939+
pub(crate) fn mmap(
940+
mmap_xen: Option<&Self>,
941+
addr: *mut u8,
942+
prot: i32,
943+
len: usize,
944+
) -> MmapXenSlice {
945+
match mmap_xen {
946+
Some(mmap_xen) => mmap_xen.mmap.mmap_slice(addr, prot, len).unwrap(),
947+
None => MmapXenSlice::raw(addr),
948+
}
949+
}
852950
}
853951

854952
#[cfg(test)]
@@ -936,21 +1034,21 @@ mod tests {
9361034
assert_eq!(r.flags(), range.mmap_flags);
9371035
assert_eq!(r.data(), range.mmap_data);
9381036
assert_ne!(r.addr(), null_mut());
939-
assert!(r.xen_flags.mmap_in_advance());
1037+
assert!(r.mmap_in_advance());
9401038

9411039
range.mmap_flags = MmapXenFlags::GRANT.bits();
9421040
let r = MmapXen::new(&range).unwrap();
9431041
assert_eq!(r.flags(), range.mmap_flags);
9441042
assert_eq!(r.data(), range.mmap_data);
9451043
assert_ne!(r.addr(), null_mut());
946-
assert!(r.xen_flags.mmap_in_advance());
1044+
assert!(r.mmap_in_advance());
9471045

9481046
range.mmap_flags = MmapXenFlags::GRANT.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits();
9491047
let r = MmapXen::new(&range).unwrap();
9501048
assert_eq!(r.flags(), range.mmap_flags);
9511049
assert_eq!(r.data(), range.mmap_data);
9521050
assert_eq!(r.addr(), null_mut());
953-
assert!(!r.xen_flags.mmap_in_advance());
1051+
assert!(!r.mmap_in_advance());
9541052
}
9551053

9561054
#[test]

0 commit comments

Comments
 (0)