9
9
//! The core allocation function is [`allocate_pages_deferred()`](fn.allocate_pages_deferred.html),
10
10
//! but there are several convenience functions that offer simpler interfaces for general usage.
11
11
//!
12
- //! # Notes and Missing Features
13
- //! This allocator currently does **not** merge freed chunks (de-fragmentation) upon deallocation.
14
- //! It only merges free chunks lazily upon request, i.e., when we run out of address space
15
- //! or when a requested address is in a chunk that needs to be merged with a nearby chunk.
12
+ //! # Notes
13
+ //! This allocator only makes one attempt to merge deallocated pages into existing
14
+ //! free chunks for de-fragmentation. It does not iteratively merge adjacent chunks in order to
15
+ //! maximally combine separate chunks into the biggest single chunk.
16
+ //! Instead, free chunks are lazily merged only when running out of address space
17
+ //! or when needed to fulfill a specific request.
16
18
17
19
#![ no_std]
18
20
@@ -25,11 +27,9 @@ extern crate spin;
25
27
extern crate intrusive_collections;
26
28
use intrusive_collections:: Bound ;
27
29
28
-
29
30
mod static_array_rb_tree;
30
31
// mod static_array_linked_list;
31
32
32
-
33
33
use core:: { borrow:: Borrow , cmp:: { Ordering , max, min} , fmt, ops:: { Deref , DerefMut } } ;
34
34
use kernel_config:: memory:: * ;
35
35
use memory_structs:: { VirtualAddress , Page , PageRange } ;
@@ -299,21 +299,52 @@ impl Drop for AllocatedPages {
299
299
if self . size_in_pages ( ) == 0 { return ; }
300
300
// trace!("page_allocator: deallocating {:?}", self);
301
301
302
- // Simply add the newly-deallocated chunk to the free pages list.
303
- let mut locked_list = FREE_PAGE_LIST . lock ( ) ;
304
- let res = locked_list. insert ( Chunk {
302
+ let chunk = Chunk {
305
303
pages : self . pages . clone ( ) ,
306
- } ) ;
307
- match res {
308
- Ok ( _inserted_free_chunk) => ( ) ,
309
- Err ( c) => error ! ( "BUG: couldn't insert deallocated chunk {:?} into free page list" , c) ,
304
+ } ;
305
+ let mut list = FREE_PAGE_LIST . lock ( ) ;
306
+ match & mut list. 0 {
307
+ // For early allocations, just add the deallocated chunk to the free pages list.
308
+ Inner :: Array ( _) => {
309
+ if list. insert ( chunk) . is_ok ( ) {
310
+ return ;
311
+ }
312
+ }
313
+
314
+ // For full-fledged deallocations, use the entry API to efficiently determine if
315
+ // we can merge the deallocated pages with an existing contiguously-adjactent chunk
316
+ // or if we need to insert a new chunk.
317
+ Inner :: RBTree ( ref mut tree) => {
318
+ let mut cursor_mut = tree. lower_bound_mut ( Bound :: Included ( chunk. start ( ) ) ) ;
319
+ if let Some ( next_chunk) = cursor_mut. get ( ) {
320
+ if * chunk. end ( ) + 1 == * next_chunk. start ( ) {
321
+ // trace!("Prepending {:?} onto beg of next {:?}", chunk, next_chunk.deref());
322
+ if cursor_mut. replace_with ( Wrapper :: new_link ( Chunk {
323
+ pages : PageRange :: new ( * chunk. start ( ) , * next_chunk. end ( ) ) ,
324
+ } ) ) . is_ok ( ) {
325
+ return ;
326
+ }
327
+ }
328
+ }
329
+ if let Some ( prev_chunk) = cursor_mut. peek_prev ( ) . get ( ) {
330
+ if * prev_chunk. end ( ) + 1 == * chunk. start ( ) {
331
+ // trace!("Appending {:?} onto end of prev {:?}", chunk, prev_chunk.deref());
332
+ let new_page_range = PageRange :: new ( * prev_chunk. start ( ) , * chunk. end ( ) ) ;
333
+ cursor_mut. move_prev ( ) ;
334
+ if cursor_mut. replace_with ( Wrapper :: new_link ( Chunk {
335
+ pages : new_page_range,
336
+ } ) ) . is_ok ( ) {
337
+ return ;
338
+ }
339
+ }
340
+ }
341
+
342
+ // trace!("Inserting new chunk for deallocated {:?} ", chunk.pages);
343
+ cursor_mut. insert ( Wrapper :: new_link ( chunk) ) ;
344
+ return ;
345
+ }
310
346
}
311
-
312
- // Here, we could optionally use above `_inserted_free_chunk` to merge the adjacent (contiguous) chunks
313
- // before or after the newly-inserted free chunk.
314
- // However, there's no *need* to do so until we actually run out of address space or until
315
- // a requested address is in a chunk that needs to be merged.
316
- // Thus, for performance, we save that for those future situations.
347
+ log:: error!( "BUG: couldn't insert deallocated {:?} into free page list" , self . pages) ;
317
348
}
318
349
}
319
350
0 commit comments