Skip to content

Commit 36cbc6b

Browse files
dma: allocate the nearest size
Currently in the `DmaAllocator`, if the request size was greater than PAGE_SIZE (4KiB), the allocator would allocate a 2MiB page to satisfy the allocation request. The memory required to satisfy the request is relaxed by finding the nearest buddy order from the allocation request size and allocating with that order instead. Signed-off-by: Andy-Python-Programmer <[email protected]>
1 parent 8c9b70e commit 36cbc6b

File tree

3 files changed

+31
-41
lines changed

3 files changed

+31
-41
lines changed

src/aero_kernel/src/mem/paging/addr.rs

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,14 +111,20 @@ impl VirtAddr {
111111
}
112112
}
113113

114+
pub fn as_bytes_mut(&self, size_bytes: usize) -> &mut [u8] {
115+
assert!(self.validate_read::<&[u8]>());
116+
unsafe { core::slice::from_raw_parts_mut(self.as_mut_ptr(), size_bytes) }
117+
}
118+
114119
/// Converts this HHDM (Higher Half Direct Map) virtual address to its physical address.
115120
pub fn as_hhdm_phys(&self) -> PhysAddr {
116121
unsafe { PhysAddr::new(self.clone() - crate::PHYSICAL_MEMORY_OFFSET) }
117122
}
118123

119124
/// Returns if the address is valid to read `sizeof(T)` bytes at the address.
120125
fn validate_read<T: Sized>(&self) -> bool {
121-
// FIXME: (*self + core::mem::size_of::<T>()) <= crate::arch::task::userland_last_address() // in-range
126+
// FIXME: (*self + core::mem::size_of::<T>()) <= crate::arch::task::userland_last_address()
127+
// // in-range
122128
self.0 != 0 // non-null
123129
}
124130

src/aero_kernel/src/mem/paging/frame.rs

Lines changed: 21 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -78,19 +78,26 @@ impl LockedFrameAllocator {
7878
self.0
7979
.call_once(|| Mutex::new(GlobalFrameAllocator::new(memory_map)));
8080
}
81+
82+
pub fn alloc(&self, size_bytes: usize) -> Option<PhysAddr> {
83+
let order = order_from_size(size_bytes as u64);
84+
85+
let mut allocator = self.0.get()?.lock_irq();
86+
allocator.allocate_frame_inner(order)
87+
}
88+
89+
pub fn alloc_zeroed(&self, size_bytes: usize) -> Option<PhysAddr> {
90+
let addr = self.alloc(size_bytes)?;
91+
addr.as_hhdm_virt().as_bytes_mut(size_bytes).fill(0);
92+
93+
Some(addr)
94+
}
8195
}
8296

8397
unsafe impl FrameAllocator<Size4KiB> for LockedFrameAllocator {
8498
fn allocate_frame(&self) -> Option<PhysFrame<Size4KiB>> {
85-
self.0.get().map(|m| {
86-
m.lock_irq()
87-
.allocate_frame_inner(order_from_size(Size4KiB::SIZE))
88-
.map(|f| {
89-
let frame = PhysFrame::containing_address(f);
90-
frame.as_slice_mut().fill(0);
91-
frame
92-
})
93-
})?
99+
let phys = self.alloc(Size4KiB::SIZE as _)?;
100+
Some(PhysFrame::containing_address(phys))
94101
}
95102

96103
fn deallocate_frame(&self, frame: PhysFrame<Size4KiB>) {
@@ -106,11 +113,8 @@ unsafe impl FrameAllocator<Size4KiB> for LockedFrameAllocator {
106113

107114
unsafe impl FrameAllocator<Size2MiB> for LockedFrameAllocator {
108115
fn allocate_frame(&self) -> Option<PhysFrame<Size2MiB>> {
109-
self.0.get().map(|m| {
110-
m.lock_irq()
111-
.allocate_frame_inner(order_from_size(Size2MiB::SIZE))
112-
.map(|f| PhysFrame::containing_address(f))
113-
})?
116+
let phys = self.alloc(Size2MiB::SIZE as _)?;
117+
Some(PhysFrame::containing_address(phys))
114118
}
115119

116120
fn deallocate_frame(&self, frame: PhysFrame<Size2MiB>) {
@@ -177,15 +181,9 @@ pub fn pmm_alloc(order: BuddyOrdering) -> PhysAddr {
177181
let order = order as usize;
178182
debug_assert!(order <= BUDDY_SIZE.len());
179183

180-
let addr = super::FRAME_ALLOCATOR
181-
.0
182-
.get()
183-
.expect("pmm: frame allocator not initialized")
184-
.lock()
185-
.allocate_frame_inner(order)
186-
.expect("pmm: out of memory");
187-
188-
addr
184+
super::FRAME_ALLOCATOR
185+
.alloc_zeroed(BUDDY_SIZE[order] as _)
186+
.unwrap()
189187
}
190188

191189
#[derive(Debug)]

src/aero_kernel/src/utils/dma.rs

Lines changed: 3 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -35,23 +35,14 @@ pub struct DmaAllocator;
3535
unsafe impl Allocator for DmaAllocator {
3636
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
3737
// XXX: The DMA buffer must be aligned to a page boundary.
38-
let size_bytes = layout.size() as u64;
39-
40-
let phys = if size_bytes <= Size4KiB::SIZE {
41-
let frame: PhysFrame<Size4KiB> = FRAME_ALLOCATOR.allocate_frame().ok_or(AllocError)?;
42-
frame.start_address()
43-
} else {
44-
assert!(size_bytes <= Size2MiB::SIZE);
45-
46-
let frame: PhysFrame<Size2MiB> = FRAME_ALLOCATOR.allocate_frame().ok_or(AllocError)?;
47-
frame.start_address()
48-
};
38+
let size_bytes = layout.size();
4939

40+
let phys = FRAME_ALLOCATOR.alloc(size_bytes).ok_or(AllocError)?;
5041
let virt = phys.as_hhdm_virt();
5142

5243
// SAFETY: The frame is aligned and non-null.
5344
let ptr = unsafe { NonNull::new_unchecked(virt.as_mut_ptr() as *mut u8) };
54-
Ok(NonNull::slice_from_raw_parts(ptr, size_bytes as _))
45+
Ok(NonNull::slice_from_raw_parts(ptr, size_bytes))
5546
}
5647

5748
unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {}
@@ -63,11 +54,6 @@ pub type DmaBuffer<T> = Box<T, DmaAllocator>;
6354
pub struct Dma<T: ?Sized>(DmaBuffer<T>);
6455

6556
impl<T> Dma<T> {
66-
/// Creates a new DMA buffer intialized with `value`.
67-
pub fn new(value: T) -> Self {
68-
Dma(DmaBuffer::new_in(value, DmaAllocator))
69-
}
70-
7157
/// Creates a new DMA (Direct Memory Access) buffer and is initialized
7258
/// with zeros.
7359
///

0 commit comments

Comments
 (0)