Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions src/vmm/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -70,5 +70,9 @@ harness = false
name = "block_request"
harness = false

[[bench]]
name = "memory_access"
harness = false

[lints]
workspace = true
70 changes: 70 additions & 0 deletions src/vmm/benches/memory_access.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
// Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
use vm_memory::{GuestAddress, GuestMemory};
use vmm::resources::VmResources;
use vmm::vmm_config::machine_config::{HugePageConfig, VmConfig};

fn bench_single_page_fault(c: &mut Criterion, configuration: VmResources) {
c.bench_function("page_fault", |b| {
b.iter_batched(
|| {
let memory = configuration.allocate_guest_memory().unwrap();
let ptr = memory
.get_slice(GuestAddress(0), 1)
.unwrap()
.ptr_guard_mut()
.as_ptr();

// fine to return both here, because ptr is not a reference into `memory` (e.g. no
// self-referential structs are happening here)
(memory, ptr)
},
|(_, ptr)| unsafe {
// Cause a single page fault
ptr.write_volatile(1);
},
BatchSize::SmallInput,
)
});
}

pub fn bench_4k_page_fault(c: &mut Criterion) {
bench_single_page_fault(
c,
VmResources {
vm_config: VmConfig {
vcpu_count: 1,
mem_size_mib: 2,
..Default::default()
},
..Default::default()
},
)
}

pub fn bench_2m_page_fault(c: &mut Criterion) {
bench_single_page_fault(
c,
VmResources {
vm_config: VmConfig {
vcpu_count: 1,
mem_size_mib: 2,
huge_pages: HugePageConfig::Hugetlbfs2M,
..Default::default()
},
..Default::default()
},
)
}

criterion_group! {
name = memory_access_benches;
config = Criterion::default().noise_threshold(0.05);
targets = bench_4k_page_fault, bench_2m_page_fault
}

criterion_main! {
memory_access_benches
}
40 changes: 5 additions & 35 deletions src/vmm/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ use crate::utils::u64_to_usize;
use crate::vmm_config::boot_source::BootConfig;
use crate::vmm_config::instance_info::InstanceInfo;
use crate::vmm_config::machine_config::{VmConfig, VmConfigError};
use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryExtension, GuestMemoryMmap};
use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryMmap};
use crate::vstate::vcpu::{Vcpu, VcpuConfig, VcpuError};
use crate::vstate::vm::Vm;
use crate::{device_manager, EventManager, Vmm, VmmError};
Expand Down Expand Up @@ -252,39 +252,9 @@ pub fn build_microvm_for_boot(
.boot_source_builder()
.ok_or(MissingKernelConfig)?;

let track_dirty_pages = vm_resources.track_dirty_pages();

let vhost_user_device_used = vm_resources
.block
.devices
.iter()
.any(|b| b.lock().expect("Poisoned lock").is_vhost_user());

// Page faults are more expensive for shared memory mapping, including memfd.
// For this reason, we only back guest memory with a memfd
// if a vhost-user-blk device is configured in the VM, otherwise we fall back to
// an anonymous private memory.
//
// The vhost-user-blk branch is not currently covered by integration tests in Rust,
// because that would require running a backend process. If in the future we converge to
// a single way of backing guest memory for vhost-user and non-vhost-user cases,
// that would not be worth the effort.
let guest_memory = if vhost_user_device_used {
GuestMemoryMmap::memfd_backed(
vm_resources.vm_config.mem_size_mib,
track_dirty_pages,
vm_resources.vm_config.huge_pages,
)
.map_err(StartMicrovmError::GuestMemory)?
} else {
let regions = crate::arch::arch_memory_regions(vm_resources.vm_config.mem_size_mib << 20);
GuestMemoryMmap::from_raw_regions(
&regions,
track_dirty_pages,
vm_resources.vm_config.huge_pages,
)
.map_err(StartMicrovmError::GuestMemory)?
};
let guest_memory = vm_resources
.allocate_guest_memory()
.map_err(StartMicrovmError::GuestMemory)?;

let entry_addr = load_kernel(boot_config, &guest_memory)?;
let initrd = load_initrd_from_config(boot_config, &guest_memory)?;
Expand All @@ -299,7 +269,7 @@ pub fn build_microvm_for_boot(
event_manager,
guest_memory,
None,
track_dirty_pages,
vm_resources.vm_config.track_dirty_pages,
vm_resources.vm_config.vcpu_count,
cpu_template.kvm_capabilities.clone(),
)?;
Expand Down
42 changes: 37 additions & 5 deletions src/vmm/src/resources.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ use crate::vmm_config::metrics::{init_metrics, MetricsConfig, MetricsConfigError
use crate::vmm_config::mmds::{MmdsConfig, MmdsConfigError};
use crate::vmm_config::net::*;
use crate::vmm_config::vsock::*;
use crate::vstate::memory::{GuestMemoryExtension, GuestMemoryMmap, MemoryError};

/// Errors encountered when configuring microVM resources.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
Expand Down Expand Up @@ -234,11 +235,6 @@ impl VmResources {
Ok(())
}

/// Returns whether dirty page tracking is enabled or not.
pub fn track_dirty_pages(&self) -> bool {
self.vm_config.track_dirty_pages
}

/// Add a custom CPU template to the VM resources
/// to configure vCPUs.
pub fn set_custom_cpu_template(&mut self, cpu_template: CustomCpuTemplate) {
Expand Down Expand Up @@ -473,6 +469,42 @@ impl VmResources {

Ok(())
}

/// Allocates guest memory in a configuration most appropriate for these [`VmResources`].
///
/// If vhost-user-blk devices are in use, allocates memfd-backed shared memory, otherwise
/// prefers anonymous memory for performance reasons.
pub fn allocate_guest_memory(&self) -> Result<GuestMemoryMmap, MemoryError> {
let vhost_user_device_used = self
.block
.devices
.iter()
.any(|b| b.lock().expect("Poisoned lock").is_vhost_user());

// Page faults are more expensive for shared memory mapping, including memfd.
// For this reason, we only back guest memory with a memfd
// if a vhost-user-blk device is configured in the VM, otherwise we fall back to
// an anonymous private memory.
//
// The vhost-user-blk branch is not currently covered by integration tests in Rust,
// because that would require running a backend process. If in the future we converge to
// a single way of backing guest memory for vhost-user and non-vhost-user cases,
// that would not be worth the effort.
if vhost_user_device_used {
GuestMemoryMmap::memfd_backed(
self.vm_config.mem_size_mib,
self.vm_config.track_dirty_pages,
self.vm_config.huge_pages,
)
} else {
let regions = crate::arch::arch_memory_regions(self.vm_config.mem_size_mib << 20);
GuestMemoryMmap::from_raw_regions(
&regions,
self.vm_config.track_dirty_pages,
self.vm_config.huge_pages,
)
}
}
}

impl From<&VmResources> for VmmConfig {
Expand Down
2 changes: 1 addition & 1 deletion src/vmm/src/rpc_interface.rs
Original file line number Diff line number Diff line change
Expand Up @@ -755,7 +755,7 @@ impl RuntimeApiController {
log_dev_preview_warning("Virtual machine snapshots", None);

if create_params.snapshot_type == SnapshotType::Diff
&& !self.vm_resources.track_dirty_pages()
&& !self.vm_resources.vm_config.track_dirty_pages
{
return Err(VmmActionError::NotSupported(
"Diff snapshots are not allowed on uVMs with dirty page tracking disabled."
Expand Down
Loading