Skip to content

Commit fa3afe1

Browse files
committed
Snapshot restore for swiotlb regions
Support restoring VMs with swiotlb regions. For this, untangle the uffd handshake from the actual restoration of memory regions, as we first need to restore and register to the Vm _all_ memory regions, before we can then send a single handshake containing both normal and swiotlb regions to the Uffd handler. While we're here, significantly simplify the jungle of error types. Signed-off-by: Patrick Roy <[email protected]>
1 parent 8b712b8 commit fa3afe1

File tree

10 files changed

+183
-201
lines changed

10 files changed

+183
-201
lines changed

src/vmm/src/arch/aarch64/mod.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,7 @@ use crate::cpu_config::templates::CustomCpuTemplate;
3030
use crate::initrd::InitrdConfig;
3131
use crate::utils::{align_up, usize_to_u64};
3232
use crate::vmm_config::machine_config::MachineConfig;
33-
use crate::vstate::memory::{
34-
Address, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap,
35-
};
33+
use crate::vstate::memory::{Address, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap};
3634
use crate::vstate::vcpu::KvmVcpuError;
3735
use crate::{Vcpu, VcpuConfig, Vmm, logger};
3836

@@ -142,7 +140,9 @@ pub fn configure_system_for_boot(
142140

143141
let swiotlb_region = match vmm.vm.swiotlb_regions().num_regions() {
144142
0 | 1 => vmm.vm.swiotlb_regions().iter().next(),
145-
_ => panic!("Firecracker tried to configure more than one swiotlb region. This is a logic bug.")
143+
_ => panic!(
144+
"Firecracker tried to configure more than one swiotlb region. This is a logic bug."
145+
),
146146
};
147147

148148
let fdt = fdt::create_fdt(

src/vmm/src/builder.rs

Lines changed: 65 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,16 @@
44
//! Enables pre-boot setup, instantiation and booting of a Firecracker VMM.
55
66
use std::fmt::Debug;
7-
use std::io;
7+
use std::io::{self};
88
#[cfg(feature = "gdb")]
99
use std::sync::mpsc;
1010
use std::sync::{Arc, Mutex};
1111

1212
use event_manager::{MutEventSubscriber, SubscriberOps};
1313
use libc::EFD_NONBLOCK;
1414
use linux_loader::cmdline::Cmdline as LoaderKernelCmdline;
15-
use userfaultfd::Uffd;
1615
use utils::time::TimestampUs;
16+
use vm_memory::GuestMemoryRegion;
1717
#[cfg(target_arch = "aarch64")]
1818
use vm_superio::Rtc;
1919
use vm_superio::Serial;
@@ -50,14 +50,17 @@ use crate::devices::virtio::vsock::{Vsock, VsockUnixBackend};
5050
use crate::gdb;
5151
use crate::initrd::{InitrdConfig, InitrdError};
5252
use crate::logger::{debug, error};
53-
use crate::persist::{MicrovmState, MicrovmStateError};
53+
use crate::persist::{
54+
MicrovmState, MicrovmStateError, RestoreMemoryError, SnapshotStateFromFileError,
55+
restore_memory, send_uffd_handshake,
56+
};
5457
use crate::resources::VmResources;
5558
use crate::seccomp::BpfThreadMap;
5659
use crate::snapshot::Persist;
5760
use crate::vmm_config::instance_info::InstanceInfo;
5861
use crate::vmm_config::machine_config::MachineConfigError;
62+
use crate::vmm_config::snapshot::{MemBackendConfig, MemBackendType};
5963
use crate::vstate::kvm::Kvm;
60-
use crate::vstate::memory::GuestRegionMmap;
6164
use crate::vstate::vcpu::{Vcpu, VcpuError};
6265
use crate::vstate::vm::Vm;
6366
use crate::{EventManager, Vmm, VmmError, device_manager};
@@ -391,6 +394,8 @@ pub enum BuildMicrovmFromSnapshotError {
391394
SetTsc(#[from] crate::arch::SetTscError),
392395
/// Failed to restore microVM state: {0}
393396
RestoreState(#[from] crate::vstate::vm::ArchVmError),
397+
/// Failed to get snapshot state from file: {0}
398+
LoadState(#[from] SnapshotStateFromFileError),
394399
/// Failed to update microVM configuration: {0}
395400
VmUpdateConfig(#[from] MachineConfigError),
396401
/// Failed to restore MMIO device: {0}
@@ -411,19 +416,19 @@ pub enum BuildMicrovmFromSnapshotError {
411416
ACPIDeviManager(#[from] ACPIDeviceManagerRestoreError),
412417
/// VMGenID update failed: {0}
413418
VMGenIDUpdate(std::io::Error),
419+
/// Failed to restore guest memory: {0}
420+
Memory(#[from] RestoreMemoryError),
414421
}
415422

416423
/// Builds and starts a microVM based on the provided MicrovmState.
417424
///
418425
/// An `Arc` reference of the built `Vmm` is also plugged in the `EventManager`, while another
419426
/// is returned.
420-
#[allow(clippy::too_many_arguments)]
421427
pub fn build_microvm_from_snapshot(
422428
instance_info: &InstanceInfo,
423429
event_manager: &mut EventManager,
424430
microvm_state: MicrovmState,
425-
guest_memory: Vec<GuestRegionMmap>,
426-
uffd: Option<Uffd>,
431+
mem_backend: &MemBackendConfig,
427432
seccomp_filters: &BpfThreadMap,
428433
vm_resources: &mut VmResources,
429434
) -> Result<Arc<Mutex<Vmm>>, BuildMicrovmFromSnapshotError> {
@@ -437,11 +442,63 @@ pub fn build_microvm_from_snapshot(
437442
)
438443
.map_err(StartMicrovmError::Internal)?;
439444

445+
let track_dirty_pages = vm_resources.machine_config.track_dirty_pages;
446+
let huge_pages = vm_resources.machine_config.huge_pages;
447+
448+
let mem_backend_path = &mem_backend.backend_path;
449+
450+
let mem_file = match mem_backend.backend_type {
451+
MemBackendType::File => Some(mem_backend_path),
452+
MemBackendType::Uffd => None,
453+
};
454+
455+
let guest_memory = restore_memory(
456+
&microvm_state.vm_state.memory,
457+
mem_file,
458+
huge_pages,
459+
track_dirty_pages,
460+
0,
461+
)?;
462+
let io_memory = restore_memory(
463+
&microvm_state.vm_state.io_memory,
464+
mem_file,
465+
huge_pages,
466+
track_dirty_pages,
467+
guest_memory.iter().map(|r| r.len()).sum(),
468+
)?;
469+
440470
vmm.vm
441471
.register_memory_regions(guest_memory)
442472
.map_err(VmmError::Vm)
443473
.map_err(StartMicrovmError::Internal)?;
444-
vmm.uffd = uffd;
474+
475+
for region in io_memory {
476+
vmm.vm
477+
.register_swiotlb_region(region)
478+
.map_err(VmmError::Vm)
479+
.map_err(StartMicrovmError::Internal)?;
480+
}
481+
482+
vmm.uffd = match mem_backend.backend_type {
483+
MemBackendType::File => None,
484+
MemBackendType::Uffd => {
485+
let (uffd, mut mappings) = vmm
486+
.vm
487+
.create_uffd()
488+
.map_err(RestoreMemoryError::UffdCreate)?;
489+
490+
#[allow(deprecated)]
491+
mappings.iter_mut().for_each(|mapping| {
492+
mapping.page_size = vm_resources.machine_config.huge_pages.page_size();
493+
mapping.page_size_kib = vm_resources.machine_config.huge_pages.page_size();
494+
});
495+
496+
send_uffd_handshake(mem_backend_path, &mappings, &uffd)
497+
.map_err(RestoreMemoryError::UffdHandshake)?;
498+
499+
Some(uffd)
500+
}
501+
};
445502

446503
#[cfg(target_arch = "x86_64")]
447504
{

src/vmm/src/devices/virtio/vhost_user.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -478,6 +478,7 @@ pub(crate) mod tests {
478478
libc::MAP_PRIVATE,
479479
Some(file),
480480
false,
481+
0,
481482
)
482483
.unwrap()
483484
.into_iter()

0 commit comments

Comments
 (0)