Skip to content

Commit 234ca0c

Browse files
authored
Merge deallocated pages into contiguously-adjacent chunks (theseus-os#975)
* The page allocator will only attempt to merge a deallocated page range with *one* adjacentt chunk. It will not look at *both* the previous and next potentially-contiguous chunks, nor will it eagerly iterate over all possible contiguous adjacent chunks that can be merged. * This makes deallocation fast at the cost of more fragmentation, which is addressed lazily upon exhaustion of the virtual address space. This can also be de-fragmented during idle periods, in the future.
1 parent 8412048 commit 234ca0c

File tree

2 files changed

+52
-21
lines changed

2 files changed

+52
-21
lines changed

kernel/page_allocator/src/lib.rs

Lines changed: 50 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,12 @@
99
//! The core allocation function is [`allocate_pages_deferred()`](fn.allocate_pages_deferred.html),
1010
//! but there are several convenience functions that offer simpler interfaces for general usage.
1111
//!
12-
//! # Notes and Missing Features
13-
//! This allocator currently does **not** merge freed chunks (de-fragmentation) upon deallocation.
14-
//! It only merges free chunks lazily upon request, i.e., when we run out of address space
15-
//! or when a requested address is in a chunk that needs to be merged with a nearby chunk.
12+
//! # Notes
13+
//! This allocator only makes one attempt to merge deallocated pages into existing
14+
//! free chunks for de-fragmentation. It does not iteratively merge adjacent chunks in order to
15+
//! maximally combine separate chunks into the biggest single chunk.
16+
//! Instead, free chunks are lazily merged only when running out of address space
17+
//! or when needed to fulfill a specific request.
1618
1719
#![no_std]
1820

@@ -25,11 +27,9 @@ extern crate spin;
2527
extern crate intrusive_collections;
2628
use intrusive_collections::Bound;
2729

28-
2930
mod static_array_rb_tree;
3031
// mod static_array_linked_list;
3132

32-
3333
use core::{borrow::Borrow, cmp::{Ordering, max, min}, fmt, ops::{Deref, DerefMut}};
3434
use kernel_config::memory::*;
3535
use memory_structs::{VirtualAddress, Page, PageRange};
@@ -299,21 +299,52 @@ impl Drop for AllocatedPages {
299299
if self.size_in_pages() == 0 { return; }
300300
// trace!("page_allocator: deallocating {:?}", self);
301301

302-
// Simply add the newly-deallocated chunk to the free pages list.
303-
let mut locked_list = FREE_PAGE_LIST.lock();
304-
let res = locked_list.insert(Chunk {
302+
let chunk = Chunk {
305303
pages: self.pages.clone(),
306-
});
307-
match res {
308-
Ok(_inserted_free_chunk) => (),
309-
Err(c) => error!("BUG: couldn't insert deallocated chunk {:?} into free page list", c),
304+
};
305+
let mut list = FREE_PAGE_LIST.lock();
306+
match &mut list.0 {
307+
// For early allocations, just add the deallocated chunk to the free pages list.
308+
Inner::Array(_) => {
309+
if list.insert(chunk).is_ok() {
310+
return;
311+
}
312+
}
313+
314+
// For full-fledged deallocations, use the entry API to efficiently determine if
315+
// we can merge the deallocated pages with an existing contiguously-adjactent chunk
316+
// or if we need to insert a new chunk.
317+
Inner::RBTree(ref mut tree) => {
318+
let mut cursor_mut = tree.lower_bound_mut(Bound::Included(chunk.start()));
319+
if let Some(next_chunk) = cursor_mut.get() {
320+
if *chunk.end() + 1 == *next_chunk.start() {
321+
// trace!("Prepending {:?} onto beg of next {:?}", chunk, next_chunk.deref());
322+
if cursor_mut.replace_with(Wrapper::new_link(Chunk {
323+
pages: PageRange::new(*chunk.start(), *next_chunk.end()),
324+
})).is_ok() {
325+
return;
326+
}
327+
}
328+
}
329+
if let Some(prev_chunk) = cursor_mut.peek_prev().get() {
330+
if *prev_chunk.end() + 1 == *chunk.start() {
331+
// trace!("Appending {:?} onto end of prev {:?}", chunk, prev_chunk.deref());
332+
let new_page_range = PageRange::new(*prev_chunk.start(), *chunk.end());
333+
cursor_mut.move_prev();
334+
if cursor_mut.replace_with(Wrapper::new_link(Chunk {
335+
pages: new_page_range,
336+
})).is_ok() {
337+
return;
338+
}
339+
}
340+
}
341+
342+
// trace!("Inserting new chunk for deallocated {:?} ", chunk.pages);
343+
cursor_mut.insert(Wrapper::new_link(chunk));
344+
return;
345+
}
310346
}
311-
312-
// Here, we could optionally use above `_inserted_free_chunk` to merge the adjacent (contiguous) chunks
313-
// before or after the newly-inserted free chunk.
314-
// However, there's no *need* to do so until we actually run out of address space or until
315-
// a requested address is in a chunk that needs to be merged.
316-
// Thus, for performance, we save that for those future situations.
347+
log::error!("BUG: couldn't insert deallocated {:?} into free page list", self.pages);
317348
}
318349
}
319350

kernel/page_allocator/src/static_array_rb_tree.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ impl<T: Ord + 'static> StaticArrayRBTree<T> {
8989
/// If the inner collection is an array, it is pushed onto the back of the array.
9090
/// If there is no space left in the array, an `Err` containing the given `value` is returned.
9191
///
92-
/// If success
92+
/// Upon success, a reference to the newly-inserted value is returned.
9393
pub fn insert(&mut self, value: T) -> Result<ValueRefMut<T>, T> {
9494
match &mut self.0 {
9595
Inner::Array(arr) => {
@@ -135,7 +135,7 @@ impl<T: Ord + 'static> StaticArrayRBTree<T> {
135135
let mut iter_a = None;
136136
let mut iter_b = None;
137137
match &self.0 {
138-
Inner::Array(arr) => iter_a = Some(arr.iter().flatten()),
138+
Inner::Array(arr) => iter_a = Some(arr.iter().flatten()),
139139
Inner::RBTree(tree) => iter_b = Some(tree.iter()),
140140
}
141141
iter_a.into_iter()

0 commit comments

Comments
 (0)