diff --git a/src/bitmap.rs b/src/bitmap.rs index 7fcb841..f86d3ba 100644 --- a/src/bitmap.rs +++ b/src/bitmap.rs @@ -66,13 +66,27 @@ impl BaseAllocator for BitmapPageAllocator { self.total_pages = (end - start) / PAGE_SIZE; // Calculate the base offset stored in the real [`BitAlloc`] instance. + // The base must be aligned to MAX_ALIGN_1GB to support maximum alignment. self.base = crate::align_down(start, MAX_ALIGN_1GB); // Range in bitmap: [start - self.base, start - self.base + total_pages * PAGE_SIZE) - let start = start - self.base; - let start_idx = start / PAGE_SIZE; + let start_idx = (start - self.base) / PAGE_SIZE; + let end_idx = start_idx + self.total_pages; - self.inner.insert(start_idx..start_idx + self.total_pages); + // Panic if the bitmap capacity is insufficient for the requested range. + // This can happen when: + // 1. The size is too large for the bitmap capacity + // 2. The start address is not aligned well, creating a large gap + assert!( + end_idx <= BitAllocUsed::CAP, + "bitmap capacity exceeded: need {} pages but CAP is {} (start={:#x}, size={:#x})", + end_idx, + BitAllocUsed::CAP, + start, + size + ); + + self.inner.insert(start_idx..end_idx); } fn add_memory(&mut self, _start: usize, _size: usize) -> AllocResult { @@ -334,4 +348,69 @@ mod tests { i += 1; } } + + #[test] + fn test_init_nonzero_start_address() { + // Test with non-zero start address that fits within capacity. + // With BitAlloc1M in test mode (CAP = 1M pages = 4GB), a small offset + // and 4MB allocation should work fine. + let mut allocator = BitmapPageAllocator::::new(); + let size = 4 * 1024 * 1024; // 4 MB size + let start_addr = 40960; // non-zero address (10 pages offset from 0) + + allocator.init(start_addr, size); + + // Verify the allocator is properly initialized + assert_eq!(allocator.total_pages(), size / PAGE_SIZE); + assert_eq!(allocator.used_pages(), 0); + assert_eq!(allocator.available_pages(), size / PAGE_SIZE); + + // Test basic allocation + let addr = allocator.alloc_pages(1, PAGE_SIZE).unwrap(); + assert_eq!(addr, start_addr); + assert_eq!(allocator.used_pages(), 1); + + // Test deallocation + allocator.dealloc_pages(addr, 1); + assert_eq!(allocator.used_pages(), 0); + } + + #[test] + fn test_init_with_1gb_aligned_start() { + const SIZE_1G: usize = 1024 * 1024 * 1024; + + // Test with 1GB-aligned start address + let mut allocator = BitmapPageAllocator::::new(); + let size = 4 * 1024 * 1024; // 4 MB + let start_addr = SIZE_1G; // 1GB-aligned + + allocator.init(start_addr, size); + + // Should still support allocations with various alignments + let addr = allocator.alloc_pages(1, PAGE_SIZE).unwrap(); + assert_eq!(addr, start_addr); + allocator.dealloc_pages(addr, 1); + + // Test with larger alignment + let addr = allocator.alloc_pages(1, 1024 * 1024).unwrap(); // 1MB alignment + assert_eq!(addr % (1024 * 1024), 0); + allocator.dealloc_pages(addr, 1); + } + + #[test] + #[should_panic(expected = "bitmap capacity exceeded")] + fn test_init_capacity_exceeded() { + // Test that init panics when the required range exceeds bitmap capacity. + // In test mode, BitAlloc1M has CAP = 1M pages = 4GB. + // With a start address that creates a 1GB gap (due to alignment) and + // requesting 4GB allocation, we need 1GB/4KB + 4GB/4KB = 256K + 1M = ~1.25M pages, + // which exceeds the 1M capacity. + let mut allocator = BitmapPageAllocator::::new(); + let size = 4 * 1024 * 1024 * 1024; // 4 GB - at capacity limit + let start_addr = PAGE_SIZE; // Small offset causes 1GB gap when aligned down to 1GB boundary + + // This should panic because start is aligned down to 0, creating gap of 1 page, + // and 4GB = 1M pages, total = 1M + 1 which exceeds CAP of 1M + allocator.init(start_addr, size); + } }