Skip to content

Commit 054840b

Browse files
committed
refactor(utils): remove Tlsf::append_free_block_ptr to reduce the porting effort
1 parent 4eb104a commit 054840b

File tree

3 files changed

+8
-324
lines changed

3 files changed

+8
-324
lines changed

src/r3_core/src/utils/alloc/rlsf/flex.rs

Lines changed: 2 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -376,92 +376,8 @@ impl<
376376
// The sentinel block + the block to store the allocation
377377
debug_assert!(extra_bytes_well_aligned >= GRANULARITY * 2);
378378

379-
if let (Some(growable_pool), true) = (self.growable_pool, use_growable_pool) {
380-
// Try to extend an existing memory pool first.
381-
let new_pool_len_desired =
382-
const_try!(growable_pool.pool_len.checked_add(extra_bytes_well_aligned));
383-
384-
// The following assertion should not trip because...
385-
// - `extra_bytes_well_aligned` returns a value that is at least
386-
// as large as `GRANULARITY * 2`.
387-
// - `growable_pool.alloc_len - growable_pool.pool_len` must be
388-
// less than `GRANULARITY * 2` because of
389-
// `insert_free_block_ptr`'s implementation.
390-
debug_assert!(new_pool_len_desired >= growable_pool.alloc_len);
391-
392-
// Safety: `new_pool_end_desired >= growable_pool.alloc_len`, and
393-
// `(growable_pool.alloc_start, growable_pool.alloc_len)`
394-
// represents a previous allocation.
395-
if let Some(new_alloc_len) = unsafe {
396-
self.source.realloc_inplace_grow(
397-
nonnull_slice_from_raw_parts(
398-
growable_pool.alloc_start,
399-
growable_pool.alloc_len,
400-
),
401-
new_pool_len_desired,
402-
)
403-
} {
404-
if self.source.supports_dealloc() {
405-
// Move `PoolFtr`. Note that `PoolFtr::alloc_start` is
406-
// still uninitialized because this allocation is still in
407-
// `self.growable_pool`, so we only have to move
408-
// `PoolFtr::prev_alloc_end`.
409-
let old_pool_ftr = PoolFtr::get_for_alloc(
410-
nonnull_slice_from_raw_parts(
411-
growable_pool.alloc_start,
412-
growable_pool.alloc_len,
413-
),
414-
self.source.min_align(),
415-
);
416-
let new_pool_ftr = PoolFtr::get_for_alloc(
417-
nonnull_slice_from_raw_parts(growable_pool.alloc_start, new_alloc_len),
418-
self.source.min_align(),
419-
);
420-
// Safety: Both `*new_pool_ftr` and `*old_pool_ftr`
421-
// represent pool footers we control
422-
unsafe { *new_pool_ftr = *old_pool_ftr };
423-
}
424-
425-
let num_appended_len = unsafe {
426-
// Safety: `self.source` allocated some memory after
427-
// `alloc_start + pool_len`, so it shouldn't be
428-
// null
429-
let append_start = NonNull::new_unchecked(
430-
growable_pool
431-
.alloc_start
432-
.as_ptr()
433-
.wrapping_add(growable_pool.pool_len),
434-
);
435-
// Safety: `append_start` follows an existing memory pool,
436-
// and the contained bytes are owned by us
437-
self.tlsf
438-
.append_free_block_ptr(nonnull_slice_from_raw_parts(
439-
append_start,
440-
new_alloc_len - growable_pool.pool_len,
441-
))
442-
};
443-
444-
// This assumption is based on `extra_bytes_well_aligned`'s
445-
// implementation. The `debug_assert!` above depends on this.
446-
debug_assert!(
447-
(growable_pool.pool_len + num_appended_len) - new_alloc_len < GRANULARITY * 2
448-
);
449-
450-
self.growable_pool = Some(Pool {
451-
alloc_start: growable_pool.alloc_start,
452-
alloc_len: new_alloc_len,
453-
pool_len: growable_pool.pool_len + num_appended_len,
454-
});
455-
456-
return Some(());
457-
} // if let Some(new_alloc_len) = ... realloc_inplace_grow
458-
459-
if self.source.is_contiguous_growable() {
460-
// `is_contiguous_growable`
461-
// indicates that `alloc` will also be fruitless because
462-
// `realloc_inplace_grow` failed.
463-
return None;
464-
}
379+
if let (Some(_), true) = (self.growable_pool, use_growable_pool) {
380+
// Growable pool is not supported in this version
465381
} // if let Some(growable_pool) = self.growable_pool
466382

467383
// Create a brand new allocation. `source.min_align` indicates the

src/r3_core/src/utils/alloc/rlsf/tlsf.rs

Lines changed: 1 addition & 155 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,7 @@ use core::{
1212
use super::{
1313
debug_assert_eq, debug_assert_ne,
1414
int::BinInteger,
15-
utils::{
16-
min_usize, nonnull_slice_from_raw_parts, nonnull_slice_len, nonnull_slice_start,
17-
option_nonnull_as_ptr,
18-
},
15+
utils::{min_usize, nonnull_slice_len, option_nonnull_as_ptr},
1916
};
2017

2118
#[doc = svgbobdoc::transform!(
@@ -556,157 +553,6 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons
556553
NonZeroUsize::new(cursor.wrapping_sub(start))
557554
}
558555

559-
/// Extend an existing memory pool by incorporating the specified memory
560-
/// block.
561-
///
562-
/// Returns the number of incorporated bytes, counted from the beginning of
563-
/// `block`.
564-
///
565-
/// In the current implementation, this method can coalesce memory pools
566-
/// only if the maximum pool size is outside the range of `usize`, i.e.,
567-
/// `log2(GRANULARITY) + FLLEN >= usize::BITS`. This is because it does not
568-
/// track each pool's size and cannot check whether the resulting pool will
569-
/// have a valid size.
570-
///
571-
/// # Time Complexity
572-
///
573-
/// This method will complete in linear time (`O(block.len())`) because
574-
/// it might need to divide the memory block to meet the maximum block size
575-
/// requirement (`(GRANULARITY << FLLEN) - GRANULARITY`).
576-
///
577-
/// # Examples
578-
///
579-
/// ```rust,ignore
580-
/// use rlsf::Tlsf;
581-
/// use std::{mem::MaybeUninit, ptr::NonNull};
582-
///
583-
/// static mut POOL: MaybeUninit<[u8; 1024]> = MaybeUninit::uninit();
584-
/// let mut cursor = unsafe { POOL.as_mut_ptr() } as *mut u8;
585-
/// let mut remaining_len = 1024;
586-
///
587-
/// let mut tlsf: Tlsf<u8, u8, 8, 8> = Tlsf::INIT;
588-
/// let pool0_len = unsafe {
589-
/// tlsf.insert_free_block_ptr(nonnull_slice_from_raw_parts(
590-
/// NonNull::new(cursor).unwrap(), remaining_len / 2))
591-
/// }.unwrap().get();
592-
/// cursor = cursor.wrapping_add(pool0_len);
593-
/// remaining_len -= pool0_len;
594-
///
595-
/// let pool1_len = unsafe {
596-
/// tlsf.append_free_block_ptr(nonnull_slice_from_raw_parts(
597-
/// NonNull::new(cursor).unwrap(), remaining_len / 2))
598-
/// };
599-
/// cursor = cursor.wrapping_add(pool1_len);
600-
/// remaining_len -= pool1_len;
601-
///
602-
/// let pool2_len = unsafe {
603-
/// tlsf.append_free_block_ptr(nonnull_slice_from_raw_parts(
604-
/// NonNull::new(cursor).unwrap(), remaining_len))
605-
/// };
606-
/// cursor = cursor.wrapping_add(pool2_len);
607-
/// remaining_len -= pool2_len;
608-
///
609-
/// // polyfill for <https://github.com/rust-lang/rust/issues/71941>
610-
/// fn nonnull_slice_from_raw_parts<T>(ptr: NonNull<T>, len: usize) -> NonNull<[T]> {
611-
/// NonNull::new(std::ptr::slice_from_raw_parts_mut(ptr.as_ptr(), len)).unwrap()
612-
/// }
613-
/// ```
614-
///
615-
/// # Safety
616-
///
617-
/// The memory block will be considered owned by `self`. The memory block
618-
/// must outlive `self`.
619-
///
620-
/// `block`'s starting address must match an existing memory pool's
621-
/// ending address. See the above example for how to obtain one.
622-
///
623-
/// # Panics
624-
///
625-
/// This method never panics.
626-
pub const unsafe fn append_free_block_ptr(&mut self, block: NonNull<[u8]>) -> usize
627-
where
628-
FLBitmap: ~const BinInteger,
629-
SLBitmap: ~const BinInteger,
630-
{
631-
// Round down the length
632-
let start = nonnull_slice_start(block);
633-
let len = nonnull_slice_len(block) & !(GRANULARITY - 1);
634-
635-
if Self::MAX_POOL_SIZE.is_some() {
636-
// If `MAX_POOL_SIZE` is `Some(_)`, it's dangerous to coalesce
637-
// memory pools of unknown sizes, so fall back to calling
638-
// `insert_free_block_ptr_aligned`.
639-
let block = nonnull_slice_from_raw_parts(start, len);
640-
return self
641-
.insert_free_block_ptr_aligned(block)
642-
.map(NonZeroUsize::get)
643-
.unwrap_or(0);
644-
} else if len == 0 {
645-
// `block` is so short that the `insert_free_block_ptr` will not
646-
// even create a sentinel block. We'll corrupt the structure if we
647-
// proceed.
648-
return 0;
649-
}
650-
651-
let original_start = start.as_ptr();
652-
let mut start = original_start;
653-
let end = (start as usize).wrapping_add(len);
654-
655-
// The sentinel block from the preceding memory pool will be
656-
// assimilated into `[start..end]`.
657-
start = start.wrapping_sub(super::GRANULARITY);
658-
let sentinel_block = start as *mut UsedBlockHdr;
659-
debug_assert_eq!(
660-
(*sentinel_block).common.size,
661-
GRANULARITY | SIZE_USED | SIZE_SENTINEL
662-
);
663-
664-
// The adjacent free block (if there's one) from the preceding memory
665-
// pool will be assimilated into `[start..end]`.
666-
let penultimate_block = (*sentinel_block)
667-
.common
668-
.prev_phys_block
669-
.expect("sentinel block has no `prev_phys_block`");
670-
let last_nonassimilated_block;
671-
if (penultimate_block.as_ref().size & SIZE_USED) == 0 {
672-
let free_block = penultimate_block.cast::<FreeBlockHdr>();
673-
let free_block_size = free_block.as_ref().common.size;
674-
debug_assert_eq!(
675-
free_block_size,
676-
free_block.as_ref().common.size & SIZE_SIZE_MASK
677-
);
678-
self.unlink_free_block(free_block, free_block_size);
679-
680-
// Assimilation success
681-
start = free_block.as_ptr() as *mut u8;
682-
last_nonassimilated_block = free_block.as_ref().common.prev_phys_block;
683-
} else {
684-
// Assimilation failed
685-
last_nonassimilated_block = Some(penultimate_block);
686-
}
687-
688-
// Safety: `start` points to a location inside an existion memory pool,
689-
// so it's non-null
690-
let block = nonnull_slice_from_raw_parts(
691-
NonNull::new_unchecked(start),
692-
end.wrapping_sub(start as usize),
693-
);
694-
695-
// Create a memory pool
696-
let pool_len = self
697-
.insert_free_block_ptr_aligned(block)
698-
.expect("`pool_size_to_contain_allocation` is an impostor")
699-
.get();
700-
701-
// Link the created pool's first block to the preceding memory pool's
702-
// last non-assimilated block to form one continuous memory pool
703-
let mut first_block = nonnull_slice_start(block).cast::<FreeBlockHdr>();
704-
first_block.as_mut().common.prev_phys_block = last_nonassimilated_block;
705-
706-
// Exclude the assimilated part from the returned value
707-
pool_len - (original_start as usize).wrapping_sub(start as usize)
708-
}
709-
710556
/// Create a new memory pool at the location specified by a slice.
711557
///
712558
/// This method does nothing if the given memory block is too small.

0 commit comments

Comments
 (0)