Skip to content

Commit ffa7b35

Browse files
vmalloc: initial implementation
Signed-off-by: Andy-Python-Programmer <[email protected]>
1 parent d2e6fd6 commit ffa7b35

File tree

4 files changed

+189
-96
lines changed

4 files changed

+189
-96
lines changed

src/aero_kernel/src/mem/alloc.rs

Lines changed: 12 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -19,21 +19,13 @@
1919

2020
use core::alloc;
2121
use core::alloc::{GlobalAlloc, Layout};
22-
use core::ptr::NonNull;
23-
24-
use linked_list_allocator::Heap;
2522

2623
use crate::utils::sync::Mutex;
27-
use crate::AERO_SYSTEM_ALLOCATOR;
2824

2925
use super::paging::FRAME_ALLOCATOR;
30-
use super::AddressSpace;
26+
use super::vmalloc;
3127
use crate::mem::paging::*;
3228

33-
const HEAP_MAX_SIZE: usize = 128 * 1024 * 1024; // 128 GiB
34-
const HEAP_START: usize = 0xfffff80000000000;
35-
const HEAP_END: usize = HEAP_START + HEAP_MAX_SIZE;
36-
3729
#[repr(C)]
3830
struct SlabHeader {
3931
ptr: *mut Slab,
@@ -125,7 +117,6 @@ impl Slab {
125117

126118
struct ProtectedAllocator {
127119
slabs: [Slab; 10],
128-
linked_list_heap: Heap,
129120
}
130121

131122
struct Allocator {
@@ -148,8 +139,6 @@ impl Allocator {
148139
Slab::new(512),
149140
Slab::new(1024),
150141
],
151-
152-
linked_list_heap: Heap::empty(),
153142
}),
154143
}
155144
}
@@ -165,71 +154,21 @@ impl Allocator {
165154
if let Some(slab) = slab {
166155
slab.alloc()
167156
} else {
168-
inner
169-
.linked_list_heap
170-
.allocate_first_fit(layout)
171-
.or_else(|_| {
172-
let heap_top = inner.linked_list_heap.top();
173-
let size = align_up(layout.size() as u64, 0x1000);
174-
175-
// Check if our heap has not increased beyond the maximum allowed size.
176-
if heap_top + size as usize > HEAP_END {
177-
panic!("the heap size has increased more then {:#x}", HEAP_END)
178-
}
179-
180-
// Else we just have to extend the heap.
181-
let mut address_space = AddressSpace::this();
182-
let mut offset_table = address_space.offset_page_table();
183-
184-
let page_range = {
185-
let heap_start = VirtAddr::new(heap_top as _);
186-
let heap_end = heap_start + size - 1u64;
187-
188-
let heap_start_page: Page = Page::containing_address(heap_start);
189-
let heap_end_page = Page::containing_address(heap_end);
190-
191-
Page::range_inclusive(heap_start_page, heap_end_page)
192-
};
193-
194-
for page in page_range {
195-
let frame = unsafe {
196-
FRAME_ALLOCATOR
197-
.allocate_frame()
198-
.expect("Failed to allocate frame to extend heap")
199-
};
200-
201-
unsafe {
202-
offset_table.map_to(
203-
page,
204-
frame,
205-
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
206-
)
207-
}
208-
.expect("Failed to map frame to extend the heap")
209-
.flush();
210-
}
211-
212-
unsafe {
213-
inner.linked_list_heap.extend(size as usize); // Now extend the heap.
214-
inner.linked_list_heap.allocate_first_fit(layout) // And try again.
215-
}
216-
})
217-
.expect("alloc: memory exhausted")
218-
.as_ptr()
157+
let size = align_up(layout.size() as _, layout.align() as _) / Size4KiB::SIZE;
158+
159+
vmalloc::get_vmalloc()
160+
.alloc(size as usize)
161+
.map(|addr| addr.as_mut_ptr::<u8>())
162+
.unwrap_or(core::ptr::null_mut())
219163
}
220164
}
221165

222166
fn dealloc(&self, ptr: *mut u8, layout: Layout) {
223-
let mut inner = self.inner.lock_irq();
224-
let address = ptr as usize;
225-
226-
if address >= HEAP_START && address < HEAP_END {
227-
unsafe {
228-
inner
229-
.linked_list_heap
230-
.deallocate(NonNull::new_unchecked(ptr), layout);
231-
}
167+
let _inner = self.inner.lock_irq();
168+
let address = VirtAddr::new(ptr as u64);
232169

170+
if address >= vmalloc::VMALLOC_START && address < vmalloc::VMALLOC_END {
171+
vmalloc::get_vmalloc().dealloc(address, layout.size() / Size4KiB::SIZE as usize);
233172
return;
234173
}
235174

@@ -395,30 +334,7 @@ fn alloc_error_handler(layout: alloc::Layout) -> ! {
395334

396335
/// Initialize the heap at the [HEAP_START].
397336
pub fn init_heap() {
398-
unsafe {
399-
let mut address_space = AddressSpace::this();
400-
let mut offset_table = address_space.offset_page_table();
401-
402-
let frame: PhysFrame = FRAME_ALLOCATOR
403-
.allocate_frame()
404-
.expect("init_heap: failed to allocate frame for the linked list allocator");
405-
406-
offset_table
407-
.map_to(
408-
Page::containing_address(VirtAddr::new(HEAP_START as _)),
409-
frame,
410-
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
411-
)
412-
.expect("init_heap: failed to initialize the heap")
413-
.flush();
414-
415-
AERO_SYSTEM_ALLOCATOR
416-
.0
417-
.inner
418-
.lock_irq()
419-
.linked_list_heap
420-
.init(HEAP_START, Size4KiB::SIZE as usize);
421-
}
337+
vmalloc::init();
422338

423339
#[cfg(feature = "kmemleak")]
424340
kmemleak::MEM_LEAK_CATCHER.init();

src/aero_kernel/src/mem/mod.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
pub mod alloc;
2121
pub mod paging;
2222
pub mod pti;
23+
mod vmalloc;
2324

2425
use core::alloc::Layout;
2526

src/aero_kernel/src/mem/paging/page.rs

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,12 @@ impl<S: PageSize> Page<S> {
118118
pub fn range_inclusive(start: Self, end: Self) -> PageRangeInclusive<S> {
119119
PageRangeInclusive { start, end }
120120
}
121+
122+
/// Returns a range of pages, exclusive `end`.
123+
#[inline]
124+
pub const fn range(start: Self, end: Self) -> PageRange<S> {
125+
PageRange { start, end }
126+
}
121127
}
122128

123129
impl<S: NotGiantPageSize> Page<S> {

src/aero_kernel/src/mem/vmalloc.rs

Lines changed: 170 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,170 @@
1+
/*
2+
* Copyright (C) 2021-2022 The Aero Project Developers.
3+
*
4+
* This file is part of The Aero Project.
5+
*
6+
* Aero is free software: you can redistribute it and/or modify
7+
* it under the terms of the GNU General Public License as published by
8+
* the Free Software Foundation, either version 3 of the License, or
9+
* (at your option) any later version.
10+
*
11+
* Aero is distributed in the hope that it will be useful,
12+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14+
* GNU General Public License for more details.
15+
*
16+
* You should have received a copy of the GNU General Public License
17+
* along with Aero. If not, see <https://www.gnu.org/licenses/>.
18+
*/
19+
20+
//! Due to internal-fragmentation in the buddy frame allocator, we cannot allocate large
21+
//! amount of contiguous physical memory. We instead use [`vmalloc`] to allocate virtually
22+
//! contiguous memory.
23+
//!
24+
//! An area is reserved for [`vmalloc`] in the kernel address space, starting at [`VMALLOC_VIRT_START`] and
25+
//! ending at [`VMALLOC_VIRT_END`].
26+
27+
use alloc::collections::LinkedList;
28+
use spin::Once;
29+
30+
use crate::utils::sync::{Mutex, MutexGuard};
31+
32+
use super::{paging::*, AddressSpace};
33+
34+
pub(super) const VMALLOC_MAX_SIZE: usize = 128 * 1024 * 1024; // 128 GiB
35+
pub(super) const VMALLOC_START: VirtAddr = VirtAddr::new(0xfffff80000000000);
36+
pub(super) const VMALLOC_END: VirtAddr =
37+
VirtAddr::new(0xfffff80000000000 + VMALLOC_MAX_SIZE as u64);
38+
39+
static VMALLOC: Once<Mutex<Vmalloc>> = Once::new();
40+
41+
struct VmallocArea {
42+
addr: VirtAddr,
43+
size: usize,
44+
}
45+
46+
impl VmallocArea {
47+
fn new(addr: VirtAddr, size: usize) -> Self {
48+
Self { addr, size }
49+
}
50+
}
51+
52+
pub(super) struct Vmalloc {
53+
free_list: LinkedList<VmallocArea>,
54+
}
55+
56+
impl Vmalloc {
57+
fn new() -> Self {
58+
let mut this = Self {
59+
free_list: LinkedList::new(),
60+
};
61+
62+
this.free_list
63+
.push_front(VmallocArea::new(VMALLOC_START, VMALLOC_MAX_SIZE));
64+
this
65+
}
66+
67+
pub(super) fn alloc(&mut self, mut npages: usize) -> Option<VirtAddr> {
68+
npages += 1; // allocate a guard page
69+
log::debug!("vmalloc: (npages={npages})");
70+
71+
let size_bytes = npages * Size4KiB::SIZE as usize;
72+
73+
let area = self
74+
.free_list
75+
.iter_mut()
76+
.find(|area| area.size >= size_bytes)?;
77+
78+
let address = area.addr.clone();
79+
80+
if area.size > size_bytes {
81+
area.addr = area.addr + size_bytes;
82+
area.size -= size_bytes;
83+
} else {
84+
// the size of the area is exactly the size we need, so remove it from
85+
// the free list.
86+
log::warn!("todo: implement this")
87+
}
88+
89+
log::debug!("{:?}", address);
90+
91+
let mut address_space = AddressSpace::this();
92+
let mut offset_table = address_space.offset_page_table();
93+
94+
let page_range = {
95+
let start_page: Page = Page::containing_address(address);
96+
let end_page = Page::containing_address(address + size_bytes);
97+
98+
Page::range(start_page, end_page)
99+
};
100+
101+
// map the pages at the allocated address.
102+
for page in page_range {
103+
let frame: PhysFrame<Size4KiB> = unsafe {
104+
FRAME_ALLOCATOR
105+
.allocate_frame()
106+
.expect("vmalloc: physical memory exhausted")
107+
};
108+
109+
unsafe {
110+
offset_table.map_to(
111+
page,
112+
frame,
113+
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
114+
)
115+
}
116+
.unwrap()
117+
.flush();
118+
}
119+
120+
Some(address)
121+
}
122+
123+
pub(super) fn dealloc(&mut self, addr: VirtAddr, mut npages: usize) {
124+
npages += 1; // deallocate the vmalloc guard page
125+
126+
log::debug!("vmdealloc: (npages={npages})");
127+
128+
let size = npages * Size4KiB::SIZE as usize;
129+
130+
// check if this block can be merged into another block.
131+
let merge = self
132+
.free_list
133+
.iter_mut()
134+
.find(|area| addr + size == area.addr);
135+
136+
if let Some(merge) = merge {
137+
merge.addr = addr;
138+
merge.size += size;
139+
} else {
140+
// the block cannot be merged, so add it to the free list.
141+
self.free_list.push_back(VmallocArea::new(addr, size));
142+
}
143+
144+
let mut address_space = AddressSpace::this();
145+
let mut offset_table = address_space.offset_page_table();
146+
147+
let page_range = {
148+
let start_page: Page = Page::containing_address(addr);
149+
let end_page = Page::containing_address(addr + size);
150+
151+
Page::range(start_page, end_page)
152+
};
153+
154+
for page in page_range {
155+
// unmap the page at the address which in turn will deallocate
156+
// the frame (refcnt == 0).
157+
offset_table.unmap(page).unwrap().1.flush();
158+
}
159+
}
160+
}
161+
162+
pub fn init() {
163+
VMALLOC.call_once(|| Mutex::new(Vmalloc::new()));
164+
}
165+
166+
/// ## Panics
167+
/// * If the `vmalloc` allocator is not initialized.
168+
pub(super) fn get_vmalloc() -> MutexGuard<'static, Vmalloc> {
169+
VMALLOC.get().expect("get_vmalloc: not initialized").lock()
170+
}

0 commit comments

Comments
 (0)