Skip to content

Commit ebad30b

Browse files
committed
add function to VmResources for allocating I/O memory
If swiotlb is requested through the API, this function will allocate a single contiguous region intended for swiotlb use. It needs to be continguous because we must be able to describe it using a single memory range in FDT, as devices cannot be assigned to reserved memory consisting of multiple regions (e.g. FDT assignment is to memory regions, not to #reserved-memory nodes). While we're at it, always use anon memory for the "normal" part of guest memory if swiotlb is enabled, as if swiotlb is enabled we know that vhost devices will never need to access that part of memory. Signed-off-by: Patrick Roy <[email protected]>
1 parent 0260c2d commit ebad30b

File tree

5 files changed

+96
-28
lines changed

5 files changed

+96
-28
lines changed

src/vmm/src/arch/aarch64/mod.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,14 @@ pub fn arch_memory_regions(offset: usize, size: usize) -> Vec<(GuestAddress, usi
5151
)]
5252
}
5353

54+
/// How many bytes of physical guest memory are addressible before the final gap in
55+
/// the address space on this architecture.
56+
///
57+
/// There are no architectural gaps in the physical address space on aarch64, so this is 0
58+
pub fn bytes_before_last_gap() -> usize {
59+
0
60+
}
61+
5462
/// Configures the system and should be called once per vm before starting vcpu threads.
5563
/// For aarch64, we only setup the FDT.
5664
///

src/vmm/src/arch/mod.rs

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,9 @@ pub mod aarch64;
1414

1515
#[cfg(target_arch = "aarch64")]
1616
pub use aarch64::{
17-
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions, configure_system,
18-
get_kernel_start, initrd_load_addr, layout::CMDLINE_MAX_SIZE, layout::IRQ_BASE,
19-
layout::IRQ_MAX, layout::SYSTEM_MEM_SIZE, layout::SYSTEM_MEM_START,
17+
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions, bytes_before_last_gap,
18+
configure_system, get_kernel_start, initrd_load_addr, layout::CMDLINE_MAX_SIZE,
19+
layout::IRQ_BASE, layout::IRQ_MAX, layout::SYSTEM_MEM_SIZE, layout::SYSTEM_MEM_START,
2020
};
2121

2222
/// Module for x86_64 related functionality.
@@ -25,10 +25,10 @@ pub mod x86_64;
2525

2626
#[cfg(target_arch = "x86_64")]
2727
pub use crate::arch::x86_64::{
28-
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions, configure_system,
29-
get_kernel_start, initrd_load_addr, layout::APIC_ADDR, layout::CMDLINE_MAX_SIZE,
30-
layout::IOAPIC_ADDR, layout::IRQ_BASE, layout::IRQ_MAX, layout::SYSTEM_MEM_SIZE,
31-
layout::SYSTEM_MEM_START,
28+
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions, bytes_before_last_gap,
29+
configure_system, get_kernel_start, initrd_load_addr, layout::APIC_ADDR,
30+
layout::CMDLINE_MAX_SIZE, layout::IOAPIC_ADDR, layout::IRQ_BASE, layout::IRQ_MAX,
31+
layout::SYSTEM_MEM_SIZE, layout::SYSTEM_MEM_START,
3232
};
3333

3434
/// Types of devices that can get attached to this platform.

src/vmm/src/arch/x86_64/mod.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,14 @@ pub fn arch_memory_regions(offset: usize, size: usize) -> Vec<(GuestAddress, usi
9797
}
9898
}
9999

100+
/// How many bytes of physical guest memory are addressible before the final gap in
101+
/// the address space on this architecture.
102+
///
103+
/// On x86_64, this is the number of bytes that fit before the MMIO gap.
104+
pub fn bytes_before_last_gap() -> usize {
105+
u64_to_usize(MMIO_MEM_START)
106+
}
107+
100108
/// Returns the memory address where the kernel could be loaded.
101109
pub fn get_kernel_start() -> u64 {
102110
layout::HIMEM_START

src/vmm/src/resources.rs

Lines changed: 71 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -437,28 +437,39 @@ impl VmResources {
437437
Ok(())
438438
}
439439

440-
/// Allocates guest memory in a configuration most appropriate for these [`VmResources`].
441-
///
442-
/// If vhost-user-blk devices are in use, allocates memfd-backed shared memory, otherwise
443-
/// prefers anonymous memory for performance reasons.
444-
pub fn allocate_guest_memory(&self) -> Result<Vec<GuestRegionMmap>, MemoryError> {
445-
let vhost_user_device_used = self
446-
.block
440+
/// Returns true if any vhost user devices are configured int his [`VmResources`] object
441+
pub fn vhost_user_devices_used(&self) -> bool {
442+
self.block
447443
.devices
448444
.iter()
449-
.any(|b| b.lock().expect("Poisoned lock").is_vhost_user());
445+
.any(|b| b.lock().expect("Poisoned lock").is_vhost_user())
446+
}
450447

451-
// Page faults are more expensive for shared memory mapping, including memfd.
452-
// For this reason, we only back guest memory with a memfd
453-
// if a vhost-user-blk device is configured in the VM, otherwise we fall back to
454-
// an anonymous private memory.
455-
//
456-
// The vhost-user-blk branch is not currently covered by integration tests in Rust,
457-
// because that would require running a backend process. If in the future we converge to
458-
// a single way of backing guest memory for vhost-user and non-vhost-user cases,
459-
// that would not be worth the effort.
460-
let regions = crate::arch::arch_memory_regions(0, self.machine_config.mem_size_mib << MIB_TO_BYTES_SHIFT);
461-
if vhost_user_device_used {
448+
/// The size of the swiotlb region requested, in MiB
449+
#[cfg(target_arch = "aarch64")]
450+
pub fn swiotlb_size_mib(&self) -> usize {
451+
self.machine_config.mem_config.initial_swiotlb_size
452+
}
453+
454+
/// The size of the swiotlb region requested, in MiB
455+
#[cfg(target_arch = "x86_64")]
456+
pub fn swiotlb_size_mib(&self) -> usize {
457+
0
458+
}
459+
460+
/// Whether the use of swiotlb was requested
461+
pub fn swiotlb_used(&self) -> bool {
462+
self.swiotlb_size_mib() > 0
463+
}
464+
465+
fn allocate_memory(
466+
&self,
467+
offset: usize,
468+
size: usize,
469+
vhost_accessible: bool,
470+
) -> Result<Vec<GuestRegionMmap>, MemoryError> {
471+
let regions = crate::arch::arch_memory_regions(offset, size);
472+
if vhost_accessible {
462473
memory::memfd_backed(
463474
regions.as_ref(),
464475
self.machine_config.track_dirty_pages,
@@ -472,6 +483,47 @@ impl VmResources {
472483
)
473484
}
474485
}
486+
487+
/// Allocates guest memory in a configuration most appropriate for these [`VmResources`].
488+
///
489+
/// If vhost-user-blk devices are in use, allocates memfd-backed shared memory, otherwise
490+
/// prefers anonymous memory for performance reasons.
491+
pub fn allocate_guest_memory(&self) -> Result<Vec<GuestRegionMmap>, MemoryError> {
492+
// Page faults are more expensive for shared memory mapping, including memfd.
493+
// For this reason, we only back guest memory with a memfd
494+
// if a vhost-user-blk device is configured in the VM, otherwise we fall back to
495+
// an anonymous private memory.
496+
//
497+
// Note that if a swiotlb region is used, no I/O will go through the "regular"
498+
// memory regions, and we can back them with anon memory regardless.
499+
//
500+
// The vhost-user-blk branch is not currently covered by integration tests in Rust,
501+
// because that would require running a backend process. If in the future we converge to
502+
// a single way of backing guest memory for vhost-user and non-vhost-user cases,
503+
// that would not be worth the effort.
504+
self.allocate_memory(
505+
0,
506+
(self.machine_config.mem_size_mib - self.swiotlb_size_mib()) << MIB_TO_BYTES_SHIFT,
507+
self.vhost_user_devices_used() && !self.swiotlb_used(),
508+
)
509+
}
510+
511+
/// Allocates the dedicated I/O region for swiotlb use, if one was requested.
512+
pub fn allocate_io_memory(&self) -> Result<Option<GuestRegionMmap>, MemoryError> {
513+
if !self.swiotlb_used() {
514+
return Ok(None);
515+
}
516+
517+
let swiotlb_size = self.swiotlb_size_mib() << MIB_TO_BYTES_SHIFT;
518+
let start = (self.machine_config.mem_size_mib << MIB_TO_BYTES_SHIFT) - swiotlb_size;
519+
let start = start.max(crate::arch::bytes_before_last_gap());
520+
521+
let mut mem = self.allocate_memory(start, swiotlb_size, self.vhost_user_devices_used())?;
522+
523+
assert_eq!(mem.len(), 1);
524+
525+
Ok(Some(mem.remove(0)))
526+
}
475527
}
476528

477529
impl From<&VmResources> for VmmConfig {

src/vmm/src/vstate/memory.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ use vm_memory::{Error as VmMemoryError, GuestMemoryError, WriteVolatile};
2121
use vmm_sys_util::errno;
2222

2323
use crate::DirtyBitmap;
24-
use crate::utils::{MIB_TO_BYTES_SHIFT, get_page_size, u64_to_usize};
24+
use crate::utils::{get_page_size, u64_to_usize};
2525
use crate::vmm_config::machine_config::HugePageConfig;
2626

2727
/// Type of GuestMemoryMmap.
@@ -351,7 +351,7 @@ mod tests {
351351

352352
use super::*;
353353
use crate::snapshot::Snapshot;
354-
use crate::utils::get_page_size;
354+
use crate::utils::{get_page_size, KIB_TO_BYTES_SHIFT};
355355

356356
#[test]
357357
fn test_anonymous() {

0 commit comments

Comments
 (0)