|
17 | 17 | // build the memory manager.
|
18 | 18 |
|
19 | 19 | use bootloader::BootInfo;
|
| 20 | +use core::sync::atomic::{AtomicU64, Ordering}; |
20 | 21 | use x86_64::registers::control::Cr3;
|
21 |
| -use x86_64::structures::paging::{OffsetPageTable, PageTable}; |
| 22 | +use x86_64::structures::paging::mapper::MapToError; |
| 23 | +use x86_64::structures::paging::{ |
| 24 | + FrameAllocator, Mapper, OffsetPageTable, Page, PageTable, PageTableFlags, Size4KiB, |
| 25 | +}; |
22 | 26 | use x86_64::VirtAddr;
|
23 | 27 |
|
24 | 28 | mod constants;
|
@@ -82,6 +86,93 @@ pub unsafe fn kernel_pml4() -> OffsetPageTable<'static> {
|
82 | 86 | OffsetPageTable::new(page_table, PHYSICAL_MEMORY_OFFSET)
|
83 | 87 | }
|
84 | 88 |
|
| 89 | +/// StackBounds describes the address space used |
| 90 | +/// for a stack. |
| 91 | +/// |
| 92 | +#[derive(Debug, Clone, Copy, PartialEq, Eq)] |
| 93 | +pub struct StackBounds { |
| 94 | + start: VirtAddr, |
| 95 | + end: VirtAddr, |
| 96 | +} |
| 97 | + |
| 98 | +impl StackBounds { |
| 99 | + /// from returns a set of stack bounds consisting of the |
| 100 | + /// given virtual address range. |
| 101 | + /// |
| 102 | + pub fn from(range: &VirtAddrRange) -> Self { |
| 103 | + StackBounds { |
| 104 | + start: range.start(), |
| 105 | + end: range.end() + 1u64, // StackBounds is exclusive, range is inclusive. |
| 106 | + } |
| 107 | + } |
| 108 | + |
| 109 | + /// start returns the smallest valid address in the |
| 110 | + /// stack bounds. As the stack grows downwards, this |
| 111 | + /// is also known as the bottom of the stack. |
| 112 | + /// |
| 113 | + pub fn start(&self) -> VirtAddr { |
| 114 | + self.start |
| 115 | + } |
| 116 | + |
| 117 | + /// end returns the first address beyond the stack |
| 118 | + /// bounds. As the stack grows downwards, this is |
| 119 | + /// also known as the top of the stack. |
| 120 | + /// |
| 121 | + pub fn end(&self) -> VirtAddr { |
| 122 | + self.end |
| 123 | + } |
| 124 | +} |
| 125 | + |
| 126 | +/// reserve_kernel_stack reserves num_pages pages of |
| 127 | +/// stack memory for a kernel thread. |
| 128 | +/// |
| 129 | +/// reserve_kernel_stack returns the page at the start |
| 130 | +/// of the stack (the lowest address). |
| 131 | +/// |
| 132 | +fn reserve_kernel_stack(num_pages: u64) -> Page { |
| 133 | + static STACK_ALLOC_NEXT: AtomicU64 = AtomicU64::new(constants::KERNEL_STACK_1_START.as_u64()); |
| 134 | + let start_addr = VirtAddr::new( |
| 135 | + STACK_ALLOC_NEXT.fetch_add(num_pages * Page::<Size4KiB>::SIZE, Ordering::Relaxed), |
| 136 | + ); |
| 137 | + |
| 138 | + let last_addr = start_addr + (num_pages * Page::<Size4KiB>::SIZE) - 1u64; |
| 139 | + if !KERNEL_STACK.contains_range(start_addr, last_addr) { |
| 140 | + panic!("cannot reserve kernel stack: kernel stack space exhausted"); |
| 141 | + } |
| 142 | + |
| 143 | + Page::from_start_address(start_addr).expect("`STACK_ALLOC_NEXT` not page aligned") |
| 144 | +} |
| 145 | + |
| 146 | +/// new_kernel_stack allocates num_pages pages of stack |
| 147 | +/// memory for a kernel thread and guard page, returning |
| 148 | +/// the address space of the allocated stack. |
| 149 | +/// |
| 150 | +pub fn new_kernel_stack(num_pages: u64) -> Result<StackBounds, MapToError<Size4KiB>> { |
| 151 | + let guard_page = reserve_kernel_stack(num_pages + 1); |
| 152 | + let stack_start = guard_page + 1; |
| 153 | + let stack_end = stack_start + num_pages; |
| 154 | + |
| 155 | + let mut mapper = unsafe { kernel_pml4() }; |
| 156 | + let mut frame_allocator = pmm::ALLOCATOR.lock(); |
| 157 | + for page in Page::range(stack_start, stack_end) { |
| 158 | + let frame = frame_allocator |
| 159 | + .allocate_frame() |
| 160 | + .ok_or(MapToError::FrameAllocationFailed)?; |
| 161 | + |
| 162 | + let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE; |
| 163 | + unsafe { |
| 164 | + mapper |
| 165 | + .map_to(page, frame, flags, &mut *frame_allocator)? |
| 166 | + .flush() |
| 167 | + }; |
| 168 | + } |
| 169 | + |
| 170 | + Ok(StackBounds { |
| 171 | + start: stack_start.start_address(), |
| 172 | + end: stack_end.start_address(), |
| 173 | + }) |
| 174 | +} |
| 175 | + |
85 | 176 | #[test_case]
|
86 | 177 | fn simple_allocation() {
|
87 | 178 | use alloc::boxed::Box;
|
|
0 commit comments