Skip to content

Commit d9599f5

Browse files
JackThomson2pb8o
authored andcommitted
feat: Only copy bitmap if the snapshot fails
Only if the snapshot writing fails then store the kvm buffer inside the firecracker internal bitmap. Also attempted to make the filling algorithm in the test to be dynamic so it will work on ARM chips Signed-off-by: Jack Thomson <[email protected]>
1 parent 0d0cab9 commit d9599f5

File tree

2 files changed

+103
-57
lines changed

2 files changed

+103
-57
lines changed

src/vmm/src/vstate/memory.rs

Lines changed: 97 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,9 @@ where
107107

108108
/// Resets all the memory region bitmaps
109109
fn reset_dirty(&self);
110+
111+
/// Store the dirty bitmap in internal store
112+
fn store_dirty_bitmap(&self, dirty_bitmap: &DirtyBitmap, page_size: usize);
110113
}
111114

112115
/// State of a guest memory region saved to file/buffer.
@@ -301,73 +304,57 @@ impl GuestMemoryExtension for GuestMemoryMmap {
301304
let mut writer_offset = 0;
302305
let page_size = get_page_size().map_err(MemoryError::PageSize)?;
303306

304-
self.iter().enumerate().for_each(|(slot, region)| {
307+
let write_result = self.iter().enumerate().try_for_each(|(slot, region)| {
305308
let kvm_bitmap = dirty_bitmap.get(&slot).unwrap();
306309
let firecracker_bitmap = region.bitmap();
310+
let mut write_size = 0;
311+
let mut dirty_batch_start: u64 = 0;
307312

308313
for (i, v) in kvm_bitmap.iter().enumerate() {
309314
for j in 0..64 {
310315
let is_kvm_page_dirty = ((v >> j) & 1u64) != 0u64;
311-
312-
if is_kvm_page_dirty {
313-
let page_offset = ((i * 64) + j) * page_size;
314-
315-
firecracker_bitmap.mark_dirty(page_offset, 1)
316-
}
317-
}
318-
}
319-
});
320-
321-
self.iter()
322-
.enumerate()
323-
.try_for_each(|(slot, region)| {
324-
let kvm_bitmap = dirty_bitmap.get(&slot).unwrap();
325-
let firecracker_bitmap = region.bitmap();
326-
let mut write_size = 0;
327-
let mut dirty_batch_start: u64 = 0;
328-
329-
for i in 0..kvm_bitmap.len() {
330-
for j in 0..64 {
331-
let page_offset = ((i * 64) + j) * page_size;
332-
let is_firecracker_page_dirty = firecracker_bitmap.dirty_at(page_offset);
333-
if is_firecracker_page_dirty {
334-
// We are at the start of a new batch of dirty pages.
335-
if write_size == 0 {
336-
// Seek forward over the unmodified pages.
337-
writer
338-
.seek(SeekFrom::Start(writer_offset + page_offset as u64))
339-
.unwrap();
340-
dirty_batch_start = page_offset as u64;
341-
}
342-
write_size += page_size;
343-
} else if write_size > 0 {
344-
// We are at the end of a batch of dirty pages.
345-
writer.write_all_volatile(
346-
&region.get_slice(
347-
MemoryRegionAddress(dirty_batch_start),
348-
write_size,
349-
)?,
350-
)?;
351-
352-
write_size = 0;
316+
let page_offset = ((i * 64) + j) * page_size;
317+
let is_firecracker_page_dirty = firecracker_bitmap.dirty_at(page_offset);
318+
319+
if is_kvm_page_dirty || is_firecracker_page_dirty {
320+
// We are at the start of a new batch of dirty pages.
321+
if write_size == 0 {
322+
// Seek forward over the unmodified pages.
323+
writer
324+
.seek(SeekFrom::Start(writer_offset + page_offset as u64))
325+
.unwrap();
326+
dirty_batch_start = page_offset as u64;
353327
}
328+
write_size += page_size;
329+
} else if write_size > 0 {
330+
// We are at the end of a batch of dirty pages.
331+
writer.write_all_volatile(
332+
&region
333+
.get_slice(MemoryRegionAddress(dirty_batch_start), write_size)?,
334+
)?;
335+
336+
write_size = 0;
354337
}
355338
}
339+
}
356340

357-
if write_size > 0 {
358-
writer.write_all_volatile(
359-
&region.get_slice(MemoryRegionAddress(dirty_batch_start), write_size)?,
360-
)?;
361-
}
362-
writer_offset += region.len();
341+
if write_size > 0 {
342+
writer.write_all_volatile(
343+
&region.get_slice(MemoryRegionAddress(dirty_batch_start), write_size)?,
344+
)?;
345+
}
346+
writer_offset += region.len();
363347

364-
Ok(())
365-
})
366-
.map_err(MemoryError::WriteMemory)?;
348+
Ok(())
349+
});
367350

368-
self.reset_dirty();
351+
if write_result.is_err() {
352+
self.store_dirty_bitmap(dirty_bitmap, page_size);
353+
} else {
354+
self.reset_dirty();
355+
}
369356

370-
Ok(())
357+
write_result.map_err(MemoryError::WriteMemory)
371358
}
372359

373360
/// Resets all the memory region bitmaps
@@ -378,6 +365,26 @@ impl GuestMemoryExtension for GuestMemoryMmap {
378365
}
379366
})
380367
}
368+
369+
/// Stores the dirty bitmap inside into the internal bitmap
370+
fn store_dirty_bitmap(&self, dirty_bitmap: &DirtyBitmap, page_size: usize) {
371+
self.iter().enumerate().for_each(|(slot, region)| {
372+
let kvm_bitmap = dirty_bitmap.get(&slot).unwrap();
373+
let firecracker_bitmap = region.bitmap();
374+
375+
for (i, v) in kvm_bitmap.iter().enumerate() {
376+
for j in 0..64 {
377+
let is_kvm_page_dirty = ((v >> j) & 1u64) != 0u64;
378+
379+
if is_kvm_page_dirty {
380+
let page_offset = ((i * 64) + j) * page_size;
381+
382+
firecracker_bitmap.mark_dirty(page_offset, 1)
383+
}
384+
}
385+
}
386+
});
387+
}
381388
}
382389

383390
fn create_memfd(
@@ -829,6 +836,42 @@ mod tests {
829836
assert_eq!(expected_first_region, diff_file_content);
830837
}
831838

839+
#[test]
840+
fn test_store_dirty_bitmap() {
841+
let page_size = get_page_size().unwrap();
842+
843+
// Two regions of three pages each, with a one page gap between them.
844+
let region_1_address = GuestAddress(0);
845+
let region_2_address = GuestAddress(page_size as u64 * 4);
846+
let region_size = page_size * 3;
847+
let mem_regions = [
848+
(region_1_address, region_size),
849+
(region_2_address, region_size),
850+
];
851+
let guest_memory =
852+
GuestMemoryMmap::from_raw_regions(&mem_regions, true, HugePageConfig::None).unwrap();
853+
854+
// Check that Firecracker bitmap is clean.
855+
guest_memory.iter().for_each(|r| {
856+
assert!(!r.bitmap().dirty_at(0));
857+
assert!(!r.bitmap().dirty_at(page_size));
858+
assert!(!r.bitmap().dirty_at(page_size * 2));
859+
});
860+
861+
let mut dirty_bitmap: DirtyBitmap = HashMap::new();
862+
dirty_bitmap.insert(0, vec![0b101]);
863+
dirty_bitmap.insert(1, vec![0b101]);
864+
865+
guest_memory.store_dirty_bitmap(&dirty_bitmap, page_size);
866+
867+
// Assert that the bitmap now reports as being dirty maching the dirty bitmap
868+
guest_memory.iter().for_each(|r| {
869+
assert!(r.bitmap().dirty_at(0));
870+
assert!(!r.bitmap().dirty_at(page_size));
871+
assert!(r.bitmap().dirty_at(page_size * 2));
872+
});
873+
}
874+
832875
#[test]
833876
fn test_create_memfd() {
834877
let size = 1;

tests/integration_tests/functional/test_snapshot_not_losing_dirty_pages.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import subprocess
66
from pathlib import Path
77

8+
import psutil
89
import pytest
910

1011

@@ -44,10 +45,12 @@ def test_diff_snapshot_works_after_error(
4445

4546
chroot = Path(uvm.chroot())
4647

47-
# Create a large file, so we run out of space (ENOSPC) during the snapshot
48-
# Assumes a Docker /srv tmpfs of 1G, derived by trial and error
48+
# Create a large file dynamically based on available space
4949
fill = chroot / "fill"
50-
subprocess.check_call(f"fallocate -l 330M {fill}", shell=True)
50+
disk_usage = psutil.disk_usage(chroot)
51+
target_size = round(disk_usage.free * 0.9) # Attempt to fill 90% of free space
52+
53+
subprocess.check_call(f"fallocate -l {target_size} {fill}", shell=True)
5154

5255
try:
5356
uvm.snapshot_diff()

0 commit comments

Comments
 (0)