Skip to content

Commit 388dc69

Browse files
committed
add function to VmResources for allocating I/O memory
If swiotlb is requested through the API, this function will allocate a single contiguous region intended for swiotlb use. It needs to be continguous because we must be able to describe it using a single memory range in FDT, as devices cannot be assigned to reserved memory consisting of multiple regions (e.g. FDT assignment is to memory regions, not to #reserved-memory nodes). While we're at it, always use anon memory for the "normal" part of guest memory if swiotlb is enabled, as if swiotlb is enabled we know that vhost devices will never need to access that part of memory. Signed-off-by: Patrick Roy <[email protected]>
1 parent d5c56b2 commit 388dc69

File tree

4 files changed

+91
-24
lines changed

4 files changed

+91
-24
lines changed

src/vmm/src/arch/aarch64/mod.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,14 @@ pub fn arch_memory_regions(offset: usize, size: usize) -> Vec<(GuestAddress, usi
5555
)]
5656
}
5757

58+
/// How many bytes of physical guest memory are addressible before the final gap in
59+
/// the address space on this architecture.
60+
///
61+
/// There are no architectural gaps in the physical address space on aarch64, so this is 0
62+
pub fn bytes_before_last_gap() -> usize {
63+
0
64+
}
65+
5866
/// Configures the system and should be called once per vm before starting vcpu threads.
5967
/// For aarch64, we only setup the FDT.
6068
///

src/vmm/src/arch/mod.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,9 @@ pub use aarch64::vcpu::*;
2020
pub use aarch64::vm::{ArchVm, ArchVmError, VmState};
2121
#[cfg(target_arch = "aarch64")]
2222
pub use aarch64::{
23-
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions, configure_system,
24-
get_kernel_start, initrd_load_addr, layout::CMDLINE_MAX_SIZE, layout::IRQ_BASE,
25-
layout::IRQ_MAX, layout::SYSTEM_MEM_SIZE, layout::SYSTEM_MEM_START,
23+
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions, bytes_before_last_gap,
24+
configure_system, get_kernel_start, initrd_load_addr, layout::CMDLINE_MAX_SIZE,
25+
layout::IRQ_BASE, layout::IRQ_MAX, layout::SYSTEM_MEM_SIZE, layout::SYSTEM_MEM_START,
2626
};
2727

2828
/// Module for x86_64 related functionality.
@@ -40,7 +40,7 @@ pub use x86_64::{
4040
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions, configure_system,
4141
get_kernel_start, initrd_load_addr, layout::APIC_ADDR, layout::CMDLINE_MAX_SIZE,
4242
layout::IOAPIC_ADDR, layout::IRQ_BASE, layout::IRQ_MAX, layout::SYSTEM_MEM_SIZE,
43-
layout::SYSTEM_MEM_START,
43+
layout::SYSTEM_MEM_START, bytes_before_last_gap
4444
};
4545

4646
/// Types of devices that can get attached to this platform.

src/vmm/src/arch/x86_64/mod.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,14 @@ pub fn arch_memory_regions(offset: usize, size: usize) -> Vec<(GuestAddress, usi
104104
}
105105
}
106106

107+
/// How many bytes of physical guest memory are addressible before the final gap in
108+
/// the address space on this architecture.
109+
///
110+
/// On x86_64, this is the number of bytes that fit before the MMIO gap.
111+
pub fn bytes_before_last_gap() -> usize {
112+
u64_to_usize(MMIO_MEM_START)
113+
}
114+
107115
/// Returns the memory address where the kernel could be loaded.
108116
pub fn get_kernel_start() -> u64 {
109117
layout::HIMEM_START

src/vmm/src/resources.rs

Lines changed: 71 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -437,29 +437,39 @@ impl VmResources {
437437
Ok(())
438438
}
439439

440-
/// Allocates guest memory in a configuration most appropriate for these [`VmResources`].
441-
///
442-
/// If vhost-user-blk devices are in use, allocates memfd-backed shared memory, otherwise
443-
/// prefers anonymous memory for performance reasons.
444-
pub fn allocate_guest_memory(&self) -> Result<Vec<GuestRegionMmap>, MemoryError> {
445-
let vhost_user_device_used = self
446-
.block
440+
/// Returns true if any vhost user devices are configured int his [`VmResources`] object
441+
pub fn vhost_user_devices_used(&self) -> bool {
442+
self.block
447443
.devices
448444
.iter()
449-
.any(|b| b.lock().expect("Poisoned lock").is_vhost_user());
445+
.any(|b| b.lock().expect("Poisoned lock").is_vhost_user())
446+
}
450447

451-
// Page faults are more expensive for shared memory mapping, including memfd.
452-
// For this reason, we only back guest memory with a memfd
453-
// if a vhost-user-blk device is configured in the VM, otherwise we fall back to
454-
// an anonymous private memory.
455-
//
456-
// The vhost-user-blk branch is not currently covered by integration tests in Rust,
457-
// because that would require running a backend process. If in the future we converge to
458-
// a single way of backing guest memory for vhost-user and non-vhost-user cases,
459-
// that would not be worth the effort.
460-
let regions =
461-
crate::arch::arch_memory_regions(0, mib_to_bytes(self.machine_config.mem_size_mib));
462-
if vhost_user_device_used {
448+
/// The size of the swiotlb region requested, in MiB
449+
#[cfg(target_arch = "aarch64")]
450+
pub fn swiotlb_size_mib(&self) -> usize {
451+
self.machine_config.mem_config.initial_swiotlb_size
452+
}
453+
454+
/// The size of the swiotlb region requested, in MiB
455+
#[cfg(target_arch = "x86_64")]
456+
pub fn swiotlb_size_mib(&self) -> usize {
457+
0
458+
}
459+
460+
/// Whether the use of swiotlb was requested
461+
pub fn swiotlb_used(&self) -> bool {
462+
self.swiotlb_size_mib() > 0
463+
}
464+
465+
fn allocate_memory(
466+
&self,
467+
offset: usize,
468+
size: usize,
469+
vhost_accessible: bool,
470+
) -> Result<Vec<GuestRegionMmap>, MemoryError> {
471+
let regions = crate::arch::arch_memory_regions(offset, size);
472+
if vhost_accessible {
463473
memory::memfd_backed(
464474
regions.as_ref(),
465475
self.machine_config.track_dirty_pages,
@@ -473,6 +483,47 @@ impl VmResources {
473483
)
474484
}
475485
}
486+
487+
/// Allocates guest memory in a configuration most appropriate for these [`VmResources`].
488+
///
489+
/// If vhost-user-blk devices are in use, allocates memfd-backed shared memory, otherwise
490+
/// prefers anonymous memory for performance reasons.
491+
pub fn allocate_guest_memory(&self) -> Result<Vec<GuestRegionMmap>, MemoryError> {
492+
// Page faults are more expensive for shared memory mapping, including memfd.
493+
// For this reason, we only back guest memory with a memfd
494+
// if a vhost-user-blk device is configured in the VM, otherwise we fall back to
495+
// an anonymous private memory.
496+
//
497+
// Note that if a swiotlb region is used, no I/O will go through the "regular"
498+
// memory regions, and we can back them with anon memory regardless.
499+
//
500+
// The vhost-user-blk branch is not currently covered by integration tests in Rust,
501+
// because that would require running a backend process. If in the future we converge to
502+
// a single way of backing guest memory for vhost-user and non-vhost-user cases,
503+
// that would not be worth the effort.
504+
self.allocate_memory(
505+
0,
506+
mib_to_bytes(self.machine_config.mem_size_mib - self.swiotlb_size_mib()),
507+
self.vhost_user_devices_used() && !self.swiotlb_used(),
508+
)
509+
}
510+
511+
/// Allocates the dedicated I/O region for swiotlb use, if one was requested.
512+
pub fn allocate_io_memory(&self) -> Result<Option<GuestRegionMmap>, MemoryError> {
513+
if !self.swiotlb_used() {
514+
return Ok(None);
515+
}
516+
517+
let swiotlb_size = mib_to_bytes(self.swiotlb_size_mib());
518+
let start = mib_to_bytes(self.machine_config.mem_size_mib) - swiotlb_size;
519+
let start = start.max(crate::arch::bytes_before_last_gap());
520+
521+
let mut mem = self.allocate_memory(start, swiotlb_size, self.vhost_user_devices_used())?;
522+
523+
assert_eq!(mem.len(), 1);
524+
525+
Ok(Some(mem.remove(0)))
526+
}
476527
}
477528

478529
impl From<&VmResources> for VmmConfig {

0 commit comments

Comments
 (0)