Skip to content

Commit b1e4f15

Browse files
committed
add function to VmResources for allocating I/O memory
If swiotlb is requested through the API, this function will allocate a single contiguous region intended for swiotlb use. It needs to be continguous because we must be able to describe it using a single memory range in FDT, as devices cannot be assigned to reserved memory consisting of multiple regions (e.g. FDT assignment is to memory regions, not to #reserved-memory nodes). While we're at it, always use anon memory for the "normal" part of guest memory if swiotlb is enabled, as if swiotlb is enabled we know that vhost devices will never need to access that part of memory. Signed-off-by: Patrick Roy <[email protected]>
1 parent 3d03c7b commit b1e4f15

File tree

4 files changed

+93
-28
lines changed

4 files changed

+93
-28
lines changed

src/vmm/src/arch/aarch64/mod.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,14 @@ pub fn arch_memory_regions(offset: usize, size: usize) -> Vec<(GuestAddress, usi
5151
)]
5252
}
5353

54+
/// How many bytes of physical guest memory are addressible before the final gap in
55+
/// the address space on this architecture.
56+
///
57+
/// There are no architectural gaps in the physical address space on aarch64, so this is 0
58+
pub fn bytes_before_last_gap() -> usize {
59+
0
60+
}
61+
5462
/// Configures the system and should be called once per vm before starting vcpu threads.
5563
/// For aarch64, we only setup the FDT.
5664
///

src/vmm/src/arch/mod.rs

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,9 @@ pub mod aarch64;
1414

1515
#[cfg(target_arch = "aarch64")]
1616
pub use aarch64::{
17-
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions, configure_system,
18-
get_kernel_start, initrd_load_addr, layout::CMDLINE_MAX_SIZE, layout::IRQ_BASE,
19-
layout::IRQ_MAX, layout::SYSTEM_MEM_SIZE, layout::SYSTEM_MEM_START,
17+
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions, bytes_before_last_gap,
18+
configure_system, get_kernel_start, initrd_load_addr, layout::CMDLINE_MAX_SIZE,
19+
layout::IRQ_BASE, layout::IRQ_MAX, layout::SYSTEM_MEM_SIZE, layout::SYSTEM_MEM_START,
2020
};
2121

2222
/// Module for x86_64 related functionality.
@@ -25,10 +25,10 @@ pub mod x86_64;
2525

2626
#[cfg(target_arch = "x86_64")]
2727
pub use crate::arch::x86_64::{
28-
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions, configure_system,
29-
get_kernel_start, initrd_load_addr, layout::APIC_ADDR, layout::CMDLINE_MAX_SIZE,
30-
layout::IOAPIC_ADDR, layout::IRQ_BASE, layout::IRQ_MAX, layout::SYSTEM_MEM_SIZE,
31-
layout::SYSTEM_MEM_START,
28+
ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions, bytes_before_last_gap,
29+
configure_system, get_kernel_start, initrd_load_addr, layout::APIC_ADDR,
30+
layout::CMDLINE_MAX_SIZE, layout::IOAPIC_ADDR, layout::IRQ_BASE, layout::IRQ_MAX,
31+
layout::SYSTEM_MEM_SIZE, layout::SYSTEM_MEM_START,
3232
};
3333

3434
/// Types of devices that can get attached to this platform.

src/vmm/src/arch/x86_64/mod.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,14 @@ pub fn arch_memory_regions(offset: usize, size: usize) -> Vec<(GuestAddress, usi
9797
}
9898
}
9999

100+
/// How many bytes of physical guest memory are addressible before the final gap in
101+
/// the address space on this architecture.
102+
///
103+
/// On x86_64, this is the number of bytes that fit before the MMIO gap.
104+
pub fn bytes_before_last_gap() -> usize {
105+
u64_to_usize(MMIO_MEM_START)
106+
}
107+
100108
/// Returns the memory address where the kernel could be loaded.
101109
pub fn get_kernel_start() -> u64 {
102110
layout::HIMEM_START

src/vmm/src/resources.rs

Lines changed: 70 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -437,43 +437,92 @@ impl VmResources {
437437
Ok(())
438438
}
439439

440+
/// Returns true if any vhost user devices are configured int his [`VmResources`] object
441+
pub fn vhost_user_devices_used(&self) -> bool {
442+
self.block
443+
.devices
444+
.iter()
445+
.any(|b| b.lock().expect("Poisoned lock").is_vhost_user())
446+
}
447+
448+
/// The size of the swiotlb region requested, in MiB
449+
#[cfg(target_arch = "aarch64")]
450+
pub fn swiotlb_size_mib(&self) -> usize {
451+
self.machine_config.mem_config.initial_swiotlb_size
452+
}
453+
454+
/// The size of the swiotlb region requested, in MiB
455+
#[cfg(target_arch = "x86_64")]
456+
pub fn swiotlb_size_mib(&self) -> usize {
457+
0
458+
}
459+
460+
/// Whether the use of swiotlb was requested
461+
pub fn swiotlb_used(&self) -> bool {
462+
self.swiotlb_size_mib() > 0
463+
}
464+
465+
fn allocate_memory(
466+
&self,
467+
offset: usize,
468+
size: usize,
469+
vhost_accessible: bool,
470+
) -> Result<Vec<GuestRegionMmap>, MemoryError> {
471+
let regions = crate::arch::arch_memory_regions(offset, size);
472+
if vhost_accessible {
473+
memory::memfd_backed(
474+
regions.as_ref(),
475+
self.machine_config.track_dirty_pages,
476+
self.machine_config.huge_pages,
477+
)
478+
} else {
479+
memory::anonymous(
480+
regions.into_iter(),
481+
self.machine_config.track_dirty_pages,
482+
self.machine_config.huge_pages,
483+
)
484+
}
485+
}
486+
440487
/// Allocates guest memory in a configuration most appropriate for these [`VmResources`].
441488
///
442489
/// If vhost-user-blk devices are in use, allocates memfd-backed shared memory, otherwise
443490
/// prefers anonymous memory for performance reasons.
444491
pub fn allocate_guest_memory(&self) -> Result<Vec<GuestRegionMmap>, MemoryError> {
445-
let vhost_user_device_used = self
446-
.block
447-
.devices
448-
.iter()
449-
.any(|b| b.lock().expect("Poisoned lock").is_vhost_user());
450-
451492
// Page faults are more expensive for shared memory mapping, including memfd.
452493
// For this reason, we only back guest memory with a memfd
453494
// if a vhost-user-blk device is configured in the VM, otherwise we fall back to
454495
// an anonymous private memory.
455496
//
497+
// Note that if a swiotlb region is used, no I/O will go through the "regular"
498+
// memory regions, and we can back them with anon memory regardless.
499+
//
456500
// The vhost-user-blk branch is not currently covered by integration tests in Rust,
457501
// because that would require running a backend process. If in the future we converge to
458502
// a single way of backing guest memory for vhost-user and non-vhost-user cases,
459503
// that would not be worth the effort.
460-
let regions = crate::arch::arch_memory_regions(
504+
self.allocate_memory(
461505
0,
462-
self.machine_config.mem_size_mib << MIB_TO_BYTES_SHIFT,
463-
);
464-
if vhost_user_device_used {
465-
memory::memfd_backed(
466-
regions.as_ref(),
467-
self.machine_config.track_dirty_pages,
468-
self.machine_config.huge_pages,
469-
)
470-
} else {
471-
memory::anonymous(
472-
regions.into_iter(),
473-
self.machine_config.track_dirty_pages,
474-
self.machine_config.huge_pages,
475-
)
506+
(self.machine_config.mem_size_mib - self.swiotlb_size_mib()) << MIB_TO_BYTES_SHIFT,
507+
self.vhost_user_devices_used() && !self.swiotlb_used(),
508+
)
509+
}
510+
511+
/// Allocates the dedicated I/O region for swiotlb use, if one was requested.
512+
pub fn allocate_io_memory(&self) -> Result<Option<GuestRegionMmap>, MemoryError> {
513+
if !self.swiotlb_used() {
514+
return Ok(None);
476515
}
516+
517+
let swiotlb_size = self.swiotlb_size_mib() << MIB_TO_BYTES_SHIFT;
518+
let start = (self.machine_config.mem_size_mib << MIB_TO_BYTES_SHIFT) - swiotlb_size;
519+
let start = start.max(crate::arch::bytes_before_last_gap());
520+
521+
let mut mem = self.allocate_memory(start, swiotlb_size, self.vhost_user_devices_used())?;
522+
523+
assert_eq!(mem.len(), 1);
524+
525+
Ok(Some(mem.remove(0)))
477526
}
478527
}
479528

0 commit comments

Comments
 (0)