|
| 1 | +// Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
| 2 | +// SPDX-License-Identifier: Apache-2.0 |
| 3 | + |
| 4 | +use std::num::NonZeroUsize; |
| 5 | +use std::sync::atomic::{AtomicU64, Ordering}; |
| 6 | + |
| 7 | +/// `UserfaultBitmap` implements a simple bit map on the page level with test and set operations. |
| 8 | +/// It is page-size aware, so it converts addresses to page numbers before setting or clearing |
| 9 | +/// the bits. |
| 10 | +#[derive(Debug)] |
| 11 | +pub struct UserfaultBitmap { |
| 12 | + map: *mut AtomicU64, |
| 13 | + size: usize, |
| 14 | + byte_size: usize, |
| 15 | + page_size: NonZeroUsize, |
| 16 | + map_size: usize, |
| 17 | +} |
| 18 | + |
| 19 | +impl UserfaultBitmap { |
| 20 | + /// Create a new bitmap using a user-supplied pointer. |
| 21 | + /// |
| 22 | + /// # Safety |
| 23 | + /// |
| 24 | + /// Caller must ensure: |
| 25 | + /// * `map_ptr` points to a valid region of memory containing initialized `AtomicU64` elements |
| 26 | + /// * `map_ptr` is properly aligned for `AtomicU64` |
| 27 | + /// * The memory region contains enough space for `ceil(ceil(byte_size/page_size)/64)` elements |
| 28 | + /// * The memory region pointed to by `map_ptr` must not be accessed through any other means |
| 29 | + /// while this `UserfaultBitmap` exists |
| 30 | + /// * The caller must ensure the memory remains valid for the lifetime of the returned |
| 31 | + /// `UserfaultBitmap` |
| 32 | + pub unsafe fn new(map_ptr: *mut AtomicU64, byte_size: usize, page_size: NonZeroUsize) -> Self { |
| 33 | + let num_pages = byte_size.div_ceil(page_size.get()); |
| 34 | + let map_size = num_pages.div_ceil(u64::BITS as usize); |
| 35 | + |
| 36 | + UserfaultBitmap { |
| 37 | + map: map_ptr, |
| 38 | + size: num_pages, |
| 39 | + byte_size, |
| 40 | + page_size, |
| 41 | + map_size, |
| 42 | + } |
| 43 | + } |
| 44 | + |
| 45 | + /// Is bit `n` set? Bits outside the range of the bitmap are always unset. |
| 46 | + pub fn is_bit_set(&self, index: usize) -> bool { |
| 47 | + if index < self.size { |
| 48 | + unsafe { |
| 49 | + let map_entry = &*self.map.add(index >> 6); |
| 50 | + (map_entry.load(Ordering::Acquire) & (1 << (index & 63))) != 0 |
| 51 | + } |
| 52 | + } else { |
| 53 | + // Out-of-range bits are always unset. |
| 54 | + false |
| 55 | + } |
| 56 | + } |
| 57 | + |
| 58 | + /// Reset a range of `len` bytes starting at `start_addr`. The first bit set in the bitmap |
| 59 | + /// is for the page corresponding to `start_addr`, and the last bit that we set corresponds |
| 60 | + /// to address `start_addr + len - 1`. |
| 61 | + pub fn reset_addr_range(&self, start_addr: usize, len: usize) { |
| 62 | + if len == 0 { |
| 63 | + return; |
| 64 | + } |
| 65 | + |
| 66 | + let first_bit = start_addr / self.page_size; |
| 67 | + let last_bit = start_addr.saturating_add(len - 1) / self.page_size; |
| 68 | + |
| 69 | + for n in first_bit..=last_bit { |
| 70 | + if n >= self.size { |
| 71 | + break; |
| 72 | + } |
| 73 | + unsafe { |
| 74 | + let map_entry = &*self.map.add(n >> 6); |
| 75 | + map_entry.fetch_and(!(1 << (n & 63)), Ordering::SeqCst); |
| 76 | + } |
| 77 | + } |
| 78 | + } |
| 79 | +} |
| 80 | + |
| 81 | +#[cfg(test)] |
| 82 | +mod tests { |
| 83 | + use std::sync::atomic::AtomicU64; |
| 84 | + |
| 85 | + use super::*; |
| 86 | + |
| 87 | + // Helper function to create a test bitmap |
| 88 | + fn setup_test_bitmap( |
| 89 | + byte_size: usize, |
| 90 | + page_size: NonZeroUsize, |
| 91 | + ) -> (Vec<AtomicU64>, UserfaultBitmap) { |
| 92 | + let num_pages = byte_size.div_ceil(page_size.get()); |
| 93 | + let map_size = num_pages.div_ceil(u64::BITS as usize); |
| 94 | + let mut memory = Vec::with_capacity(map_size); |
| 95 | + for _ in 0..map_size { |
| 96 | + memory.push(AtomicU64::new(0)); |
| 97 | + } |
| 98 | + let ptr = memory.as_mut_ptr(); |
| 99 | + let bitmap = unsafe { UserfaultBitmap::new(ptr, byte_size, page_size) }; |
| 100 | + (memory, bitmap) |
| 101 | + } |
| 102 | + |
| 103 | + #[test] |
| 104 | + fn test_basic_initialization() { |
| 105 | + let page_size = NonZeroUsize::new(128).unwrap(); |
| 106 | + let (_memory, bitmap) = setup_test_bitmap(1024, page_size); |
| 107 | + |
| 108 | + assert!(!bitmap.is_bit_set(0)); |
| 109 | + assert!(!bitmap.is_bit_set(7)); |
| 110 | + } |
| 111 | + |
| 112 | + #[test] |
| 113 | + fn test_out_of_bounds_access() { |
| 114 | + let page_size = NonZeroUsize::new(128).unwrap(); |
| 115 | + let (_memory, bitmap) = setup_test_bitmap(1024, page_size); |
| 116 | + |
| 117 | + // With 1024 bytes and 128-byte pages, we should have 8 pages |
| 118 | + assert!(!bitmap.is_bit_set(8)); // This should be out of bounds |
| 119 | + assert!(!bitmap.is_bit_set(100)); // This should be out of bounds |
| 120 | + } |
| 121 | + |
| 122 | + #[test] |
| 123 | + fn test_reset_addr_range() { |
| 124 | + let page_size = NonZeroUsize::new(128).unwrap(); |
| 125 | + let (memory, bitmap) = setup_test_bitmap(1024, page_size); |
| 126 | + |
| 127 | + // Set bits 0 and 1 (representing first two pages) |
| 128 | + memory[0].store(0b11, Ordering::SeqCst); |
| 129 | + |
| 130 | + // Verify bits are set |
| 131 | + assert!(bitmap.is_bit_set(0)); |
| 132 | + assert!(bitmap.is_bit_set(1)); |
| 133 | + assert!(!bitmap.is_bit_set(2)); |
| 134 | + |
| 135 | + // Reset first page |
| 136 | + bitmap.reset_addr_range(0, 128); |
| 137 | + |
| 138 | + // Verify first bit is reset but second remains set |
| 139 | + assert!(!bitmap.is_bit_set(0)); |
| 140 | + assert!(bitmap.is_bit_set(1)); |
| 141 | + } |
| 142 | + |
| 143 | + #[test] |
| 144 | + fn test_reset_addr_range_spanning_multiple_words() { |
| 145 | + let page_size = NonZeroUsize::new(128).unwrap(); |
| 146 | + // Ensure we allocate enough space for at least 2 words (128 bits) |
| 147 | + let (memory, bitmap) = setup_test_bitmap(128 * 128, page_size); // 128 pages |
| 148 | + |
| 149 | + // Set bits in different words |
| 150 | + memory[0].store(u64::MAX, Ordering::SeqCst); |
| 151 | + memory[1].store(u64::MAX, Ordering::SeqCst); |
| 152 | + |
| 153 | + // Reset a range spanning both words |
| 154 | + bitmap.reset_addr_range(63 * 128, 256); // Reset bits 63 and 64 |
| 155 | + |
| 156 | + // Check bits are reset |
| 157 | + assert!(!bitmap.is_bit_set(63)); |
| 158 | + assert!(!bitmap.is_bit_set(64)); |
| 159 | + // Check adjacent bits are still set |
| 160 | + assert!(bitmap.is_bit_set(62)); |
| 161 | + assert!(bitmap.is_bit_set(65)); |
| 162 | + } |
| 163 | + |
| 164 | + #[test] |
| 165 | + fn test_reset_addr_range_zero_length() { |
| 166 | + let page_size = NonZeroUsize::new(128).unwrap(); |
| 167 | + let (memory, bitmap) = setup_test_bitmap(1024, page_size); |
| 168 | + |
| 169 | + // Set a bit manually |
| 170 | + memory[0].store(1, Ordering::SeqCst); |
| 171 | + |
| 172 | + // Reset with length 0 |
| 173 | + bitmap.reset_addr_range(0, 0); |
| 174 | + |
| 175 | + // Bit should still be set |
| 176 | + assert!(bitmap.is_bit_set(0)); |
| 177 | + } |
| 178 | + |
| 179 | + #[test] |
| 180 | + fn test_reset_addr_range_beyond_bounds() { |
| 181 | + let page_size = NonZeroUsize::new(128).unwrap(); |
| 182 | + let (_memory, bitmap) = setup_test_bitmap(1024, page_size); |
| 183 | + |
| 184 | + // This should not panic |
| 185 | + bitmap.reset_addr_range(1024, 2048); |
| 186 | + } |
| 187 | + |
| 188 | + #[test] |
| 189 | + fn test_edge_cases() { |
| 190 | + // Test with minimum page size |
| 191 | + let page_size = NonZeroUsize::new(1).unwrap(); |
| 192 | + let (_memory, bitmap) = setup_test_bitmap(64, page_size); |
| 193 | + assert!(!bitmap.is_bit_set(0)); |
| 194 | + |
| 195 | + // Test with zero byte_size |
| 196 | + let page_size = NonZeroUsize::new(128).unwrap(); |
| 197 | + let (_memory, bitmap) = setup_test_bitmap(0, page_size); |
| 198 | + assert!(!bitmap.is_bit_set(0)); |
| 199 | + |
| 200 | + // Test reset_addr_range with maximum usize value |
| 201 | + bitmap.reset_addr_range(usize::MAX - 128, 256); |
| 202 | + } |
| 203 | +} |
0 commit comments