Skip to content

Commit eb53096

Browse files
committed
add function to VmResources for allocating I/O memory
If swiotlb is requested through the API, this function will allocate a single contiguous region intended for swiotlb use. It needs to be continguous because we must be able to describe it using a single memory range in FDT, as devices cannot be assigned to reserved memory consisting of multiple regions (e.g. FDT assignment is to memory regions, not to #reserved-memory nodes). While we're at it, always use anon memory for the "normal" part of guest memory if swiotlb is enabled, as if swiotlb is enabled we know that vhost devices will never need to access that part of memory. Signed-off-by: Patrick Roy <[email protected]>
1 parent 9d758f1 commit eb53096

File tree

4 files changed

+89
-22
lines changed

4 files changed

+89
-22
lines changed

src/vmm/src/arch/aarch64/mod.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,14 @@ pub fn arch_memory_regions(offset: usize, size: usize) -> Vec<(GuestAddress, usi
8989
)]
9090
}
9191

92+
/// How many bytes of physical guest memory are addressible before the final gap in
93+
/// the address space on this architecture.
94+
///
95+
/// There are no architectural gaps in the physical address space on aarch64, so this is 0
96+
pub fn bytes_before_last_gap() -> usize {
97+
0
98+
}
99+
92100
/// Configures the system for booting Linux.
93101
pub fn configure_system_for_boot(
94102
vmm: &mut Vmm,

src/vmm/src/arch/mod.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ pub use aarch64::vcpu::*;
2020
pub use aarch64::vm::{ArchVm, ArchVmError, VmState};
2121
#[cfg(target_arch = "aarch64")]
2222
pub use aarch64::{
23-
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions,
23+
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions, bytes_before_last_gap,
2424
configure_system_for_boot, get_kernel_start, initrd_load_addr, layout::CMDLINE_MAX_SIZE,
2525
layout::IRQ_BASE, layout::IRQ_MAX, layout::SYSTEM_MEM_SIZE, layout::SYSTEM_MEM_START,
2626
load_kernel,
@@ -39,7 +39,7 @@ pub use x86_64::vm::{ArchVm, ArchVmError, VmState};
3939

4040
#[cfg(target_arch = "x86_64")]
4141
pub use crate::arch::x86_64::{
42-
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions,
42+
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions, bytes_before_last_gap,
4343
configure_system_for_boot, get_kernel_start, initrd_load_addr, layout::APIC_ADDR,
4444
layout::CMDLINE_MAX_SIZE, layout::IOAPIC_ADDR, layout::IRQ_BASE, layout::IRQ_MAX,
4545
layout::SYSTEM_MEM_SIZE, layout::SYSTEM_MEM_START, load_kernel,

src/vmm/src/arch/x86_64/mod.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,14 @@ pub fn arch_memory_regions(offset: usize, size: usize) -> Vec<(GuestAddress, usi
140140
}
141141
}
142142

143+
/// How many bytes of physical guest memory are addressible before the final gap in
144+
/// the address space on this architecture.
145+
///
146+
/// On x86_64, this is the number of bytes that fit before the MMIO gap.
147+
pub fn bytes_before_last_gap() -> usize {
148+
u64_to_usize(MMIO_MEM_START)
149+
}
150+
143151
/// Returns the memory address where the kernel could be loaded.
144152
pub fn get_kernel_start() -> u64 {
145153
layout::HIMEM_START

src/vmm/src/resources.rs

Lines changed: 71 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -437,29 +437,39 @@ impl VmResources {
437437
Ok(())
438438
}
439439

440-
/// Allocates guest memory in a configuration most appropriate for these [`VmResources`].
441-
///
442-
/// If vhost-user-blk devices are in use, allocates memfd-backed shared memory, otherwise
443-
/// prefers anonymous memory for performance reasons.
444-
pub fn allocate_guest_memory(&self) -> Result<Vec<GuestRegionMmap>, MemoryError> {
445-
let vhost_user_device_used = self
446-
.block
440+
/// Returns true if any vhost user devices are configured int his [`VmResources`] object
441+
pub fn vhost_user_devices_used(&self) -> bool {
442+
self.block
447443
.devices
448444
.iter()
449-
.any(|b| b.lock().expect("Poisoned lock").is_vhost_user());
445+
.any(|b| b.lock().expect("Poisoned lock").is_vhost_user())
446+
}
450447

451-
// Page faults are more expensive for shared memory mapping, including memfd.
452-
// For this reason, we only back guest memory with a memfd
453-
// if a vhost-user-blk device is configured in the VM, otherwise we fall back to
454-
// an anonymous private memory.
455-
//
456-
// The vhost-user-blk branch is not currently covered by integration tests in Rust,
457-
// because that would require running a backend process. If in the future we converge to
458-
// a single way of backing guest memory for vhost-user and non-vhost-user cases,
459-
// that would not be worth the effort.
460-
let regions =
461-
crate::arch::arch_memory_regions(0, mib_to_bytes(self.machine_config.mem_size_mib));
462-
if vhost_user_device_used {
448+
/// The size of the swiotlb region requested, in MiB
449+
#[cfg(target_arch = "aarch64")]
450+
pub fn swiotlb_size_mib(&self) -> usize {
451+
self.machine_config.mem_config.initial_swiotlb_size
452+
}
453+
454+
/// The size of the swiotlb region requested, in MiB
455+
#[cfg(target_arch = "x86_64")]
456+
pub fn swiotlb_size_mib(&self) -> usize {
457+
0
458+
}
459+
460+
/// Whether the use of swiotlb was requested
461+
pub fn swiotlb_used(&self) -> bool {
462+
self.swiotlb_size_mib() > 0
463+
}
464+
465+
fn allocate_memory(
466+
&self,
467+
offset: usize,
468+
size: usize,
469+
vhost_accessible: bool,
470+
) -> Result<Vec<GuestRegionMmap>, MemoryError> {
471+
let regions = crate::arch::arch_memory_regions(offset, size);
472+
if vhost_accessible {
463473
memory::memfd_backed(
464474
regions.as_ref(),
465475
self.machine_config.track_dirty_pages,
@@ -473,6 +483,47 @@ impl VmResources {
473483
)
474484
}
475485
}
486+
487+
/// Allocates guest memory in a configuration most appropriate for these [`VmResources`].
488+
///
489+
/// If vhost-user-blk devices are in use, allocates memfd-backed shared memory, otherwise
490+
/// prefers anonymous memory for performance reasons.
491+
pub fn allocate_guest_memory(&self) -> Result<Vec<GuestRegionMmap>, MemoryError> {
492+
// Page faults are more expensive for shared memory mapping, including memfd.
493+
// For this reason, we only back guest memory with a memfd
494+
// if a vhost-user-blk device is configured in the VM, otherwise we fall back to
495+
// an anonymous private memory.
496+
//
497+
// Note that if a swiotlb region is used, no I/O will go through the "regular"
498+
// memory regions, and we can back them with anon memory regardless.
499+
//
500+
// The vhost-user-blk branch is not currently covered by integration tests in Rust,
501+
// because that would require running a backend process. If in the future we converge to
502+
// a single way of backing guest memory for vhost-user and non-vhost-user cases,
503+
// that would not be worth the effort.
504+
self.allocate_memory(
505+
0,
506+
mib_to_bytes(self.machine_config.mem_size_mib - self.swiotlb_size_mib()),
507+
self.vhost_user_devices_used() && !self.swiotlb_used(),
508+
)
509+
}
510+
511+
/// Allocates the dedicated I/O region for swiotlb use, if one was requested.
512+
pub fn allocate_swiotlb_region(&self) -> Result<Option<GuestRegionMmap>, MemoryError> {
513+
if !self.swiotlb_used() {
514+
return Ok(None);
515+
}
516+
517+
let swiotlb_size = mib_to_bytes(self.swiotlb_size_mib());
518+
let start = mib_to_bytes(self.machine_config.mem_size_mib) - swiotlb_size;
519+
let start = start.max(crate::arch::bytes_before_last_gap());
520+
521+
let mut mem = self.allocate_memory(start, swiotlb_size, self.vhost_user_devices_used())?;
522+
523+
assert_eq!(mem.len(), 1);
524+
525+
Ok(Some(mem.remove(0)))
526+
}
476527
}
477528

478529
impl From<&VmResources> for VmmConfig {

0 commit comments

Comments
 (0)