Skip to content

Commit 0b36313

Browse files
committed
fixup! Add API for getting VM's dirty pages, and add some bitmap utility functions.
1 parent c409f01 commit 0b36313

File tree

2 files changed

+23
-53
lines changed

2 files changed

+23
-53
lines changed

src/hyperlight_host/src/hypervisor/hyperv_linux.rs

Lines changed: 12 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -306,8 +306,12 @@ pub(crate) struct HypervLinuxDriver {
306306
vm_fd: VmFd,
307307
vcpu_fd: VcpuFd,
308308
entrypoint: u64,
309+
// Regions part of the original sandbox
309310
mem_regions: Vec<MemoryRegion>,
310-
n_initial_regions: usize,
311+
// Size of the (contigous) sandbox mem_regions
312+
mem_regions_size: usize,
313+
// Regions that are mapped after sandbox creation
314+
mmap_regions: Vec<MemoryRegion>,
311315
orig_rsp: GuestPtr,
312316
interrupt_handle: Arc<LinuxInterruptHandle>,
313317

@@ -453,8 +457,9 @@ impl HypervLinuxDriver {
453457
page_size: 0,
454458
vm_fd,
455459
vcpu_fd,
456-
n_initial_regions: mem_regions.len(),
457460
mem_regions,
461+
mem_regions_size: total_size,
462+
mmap_regions: Vec::new(),
458463
entrypoint: entrypoint_ptr.absolute()?,
459464
orig_rsp: rsp_ptr,
460465
interrupt_handle: interrupt_handle.clone(),
@@ -623,16 +628,14 @@ impl Hypervisor for HypervLinuxDriver {
623628
}
624629
let mshv_region: mshv_user_mem_region = rgn.to_owned().into();
625630
self.vm_fd.map_user_memory(mshv_region)?;
626-
self.mem_regions.push(rgn.to_owned());
631+
self.mmap_regions.push(rgn.to_owned());
627632
Ok(())
628633
}
629634

630635
#[instrument(err(Debug), skip_all, parent = Span::current(), level = "Trace")]
631636
unsafe fn unmap_regions(&mut self, n: u64) -> Result<()> {
632-
for rgn in self
633-
.mem_regions
634-
.split_off(self.mem_regions.len() - n as usize)
635-
{
637+
let n_keep = self.mmap_regions.len() - n as usize;
638+
for rgn in self.mmap_regions.split_off(n_keep) {
636639
let mshv_region: mshv_user_mem_region = rgn.to_owned().into();
637640
self.vm_fd.unmap_user_memory(mshv_region)?;
638641
}
@@ -922,31 +925,9 @@ impl Hypervisor for HypervLinuxDriver {
922925
.to_owned()
923926
.into();
924927

925-
let n_contiguous = self
926-
.mem_regions
927-
.windows(2)
928-
.take_while(|window| window[0].guest_region.end == window[1].guest_region.start)
929-
.count()
930-
+ 1; // +1 because windows(2) gives us n-1 pairs for n regions
931-
932-
if n_contiguous != self.n_initial_regions {
933-
return Err(new_error!(
934-
"get_and_clear_dirty_pages: not all regions are contiguous, expected {} but got {}",
935-
self.n_initial_regions,
936-
n_contiguous
937-
));
938-
}
939-
940-
let sandbox_total_size = self
941-
.mem_regions
942-
.iter()
943-
.take(n_contiguous)
944-
.map(|r| r.guest_region.len())
945-
.sum();
946-
947928
let mut sandbox_dirty_pages = self.vm_fd.get_dirty_log(
948929
first_mshv_region.guest_pfn,
949-
sandbox_total_size,
930+
self.mem_regions_size,
950931
#[cfg(mshv2)]
951932
CLEAR_DIRTY_BIT_FLAG,
952933
#[cfg(mshv3)]
@@ -958,7 +939,7 @@ impl Hypervisor for HypervLinuxDriver {
958939
// TODO: remove this once bug in mshv is fixed. The bug makes it possible
959940
// for non-mapped memory to incorrectly be marked dirty. To fix this, we just zero out
960941
// any bits that are not within the sandbox size.
961-
let sandbox_pages = sandbox_total_size / self.page_size;
942+
let sandbox_pages = self.mem_regions_size / self.page_size;
962943
let last_block_idx = sandbox_dirty_pages.len().saturating_sub(1);
963944
if let Some(last_block) = sandbox_dirty_pages.last_mut() {
964945
let last_block_start_page = last_block_idx * 64;

src/hyperlight_host/src/hypervisor/kvm.rs

Lines changed: 11 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -291,8 +291,10 @@ pub(crate) struct KVMDriver {
291291
vcpu_fd: VcpuFd,
292292
entrypoint: u64,
293293
orig_rsp: GuestPtr,
294+
// Regions part of the original sandbox
294295
mem_regions: Vec<MemoryRegion>,
295-
n_initial_regions: usize,
296+
// Regions that are mapped after sandbox creation
297+
mmap_regions: Vec<MemoryRegion>,
296298
interrupt_handle: Arc<LinuxInterruptHandle>,
297299

298300
#[cfg(gdb)]
@@ -375,8 +377,8 @@ impl KVMDriver {
375377
vcpu_fd,
376378
entrypoint,
377379
orig_rsp: rsp_gp,
378-
n_initial_regions: mem_regions.len(),
379380
mem_regions,
381+
mmap_regions: Vec::new(),
380382
interrupt_handle: interrupt_handle.clone(),
381383
#[cfg(gdb)]
382384
debug,
@@ -507,18 +509,19 @@ impl Hypervisor for KVMDriver {
507509
}
508510

509511
let mut kvm_region: kvm_userspace_memory_region = region.clone().into();
510-
kvm_region.slot = self.mem_regions.len() as u32;
512+
kvm_region.slot = (self.mem_regions.len() + self.mmap_regions.len()) as u32;
511513
unsafe { self.vm_fd.set_user_memory_region(kvm_region) }?;
512-
self.mem_regions.push(region.to_owned());
514+
self.mmap_regions.push(region.to_owned());
513515
Ok(())
514516
}
515517

516518
#[instrument(err(Debug), skip_all, parent = Span::current(), level = "Trace")]
517519
unsafe fn unmap_regions(&mut self, n: u64) -> Result<()> {
518-
let n_keep = self.mem_regions.len() - n as usize;
519-
for (k, region) in self.mem_regions.split_off(n_keep).iter().enumerate() {
520+
let n_keep = self.mmap_regions.len() - n as usize;
521+
let n_sandbox_regions = self.mem_regions.len();
522+
for (k, region) in self.mmap_regions.split_off(n_keep).iter().enumerate() {
520523
let mut kvm_region: kvm_userspace_memory_region = region.clone().into();
521-
kvm_region.slot = (n_keep + k) as u32;
524+
kvm_region.slot = (n_sandbox_regions + n_keep + k) as u32;
522525
// Setting memory_size to 0 unmaps the slot's region
523526
// From https://docs.kernel.org/virt/kvm/api.html
524527
// > Deleting a slot is done by passing zero for memory_size.
@@ -756,25 +759,11 @@ impl Hypervisor for KVMDriver {
756759

757760
// TODO: Implement getting additional host-mapped dirty pages.
758761
fn get_and_clear_dirty_pages(&mut self) -> Result<Vec<u64>> {
759-
let n_contiguous = self
760-
.mem_regions
761-
.windows(2)
762-
.take_while(|window| window[0].guest_region.end == window[1].guest_region.start)
763-
.count()
764-
+ 1; // +1 because windows(2) gives us n-1 pairs for n regions
765-
766-
if n_contiguous != self.n_initial_regions {
767-
return Err(new_error!(
768-
"get_and_clear_dirty_pages: not all regions are contiguous, expected {} but got {}",
769-
self.n_initial_regions,
770-
n_contiguous
771-
));
772-
}
773762
let mut page_indices = vec![];
774763
let mut current_page = 0;
775764

776765
// Iterate over all memory regions and get the dirty pages for each region ignoring guard pages which cannot be dirty
777-
for (i, mem_region) in self.mem_regions.iter().take(n_contiguous).enumerate() {
766+
for (i, mem_region) in self.mem_regions.iter().enumerate() {
778767
let num_pages = mem_region.guest_region.len() / PAGE_SIZE_USIZE;
779768
let bitmap = match mem_region.flags {
780769
MemoryRegionFlags::READ => {

0 commit comments

Comments
 (0)