@@ -12,10 +12,7 @@ use core::{
12
12
use super :: {
13
13
debug_assert_eq, debug_assert_ne,
14
14
int:: BinInteger ,
15
- utils:: {
16
- min_usize, nonnull_slice_from_raw_parts, nonnull_slice_len, nonnull_slice_start,
17
- option_nonnull_as_ptr,
18
- } ,
15
+ utils:: { min_usize, nonnull_slice_len, option_nonnull_as_ptr} ,
19
16
} ;
20
17
21
18
#[ doc = svgbobdoc:: transform!(
@@ -556,157 +553,6 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons
556
553
NonZeroUsize :: new ( cursor. wrapping_sub ( start) )
557
554
}
558
555
559
- /// Extend an existing memory pool by incorporating the specified memory
560
- /// block.
561
- ///
562
- /// Returns the number of incorporated bytes, counted from the beginning of
563
- /// `block`.
564
- ///
565
- /// In the current implementation, this method can coalesce memory pools
566
- /// only if the maximum pool size is outside the range of `usize`, i.e.,
567
- /// `log2(GRANULARITY) + FLLEN >= usize::BITS`. This is because it does not
568
- /// track each pool's size and cannot check whether the resulting pool will
569
- /// have a valid size.
570
- ///
571
- /// # Time Complexity
572
- ///
573
- /// This method will complete in linear time (`O(block.len())`) because
574
- /// it might need to divide the memory block to meet the maximum block size
575
- /// requirement (`(GRANULARITY << FLLEN) - GRANULARITY`).
576
- ///
577
- /// # Examples
578
- ///
579
- /// ```rust,ignore
580
- /// use rlsf::Tlsf;
581
- /// use std::{mem::MaybeUninit, ptr::NonNull};
582
- ///
583
- /// static mut POOL: MaybeUninit<[u8; 1024]> = MaybeUninit::uninit();
584
- /// let mut cursor = unsafe { POOL.as_mut_ptr() } as *mut u8;
585
- /// let mut remaining_len = 1024;
586
- ///
587
- /// let mut tlsf: Tlsf<u8, u8, 8, 8> = Tlsf::INIT;
588
- /// let pool0_len = unsafe {
589
- /// tlsf.insert_free_block_ptr(nonnull_slice_from_raw_parts(
590
- /// NonNull::new(cursor).unwrap(), remaining_len / 2))
591
- /// }.unwrap().get();
592
- /// cursor = cursor.wrapping_add(pool0_len);
593
- /// remaining_len -= pool0_len;
594
- ///
595
- /// let pool1_len = unsafe {
596
- /// tlsf.append_free_block_ptr(nonnull_slice_from_raw_parts(
597
- /// NonNull::new(cursor).unwrap(), remaining_len / 2))
598
- /// };
599
- /// cursor = cursor.wrapping_add(pool1_len);
600
- /// remaining_len -= pool1_len;
601
- ///
602
- /// let pool2_len = unsafe {
603
- /// tlsf.append_free_block_ptr(nonnull_slice_from_raw_parts(
604
- /// NonNull::new(cursor).unwrap(), remaining_len))
605
- /// };
606
- /// cursor = cursor.wrapping_add(pool2_len);
607
- /// remaining_len -= pool2_len;
608
- ///
609
- /// // polyfill for <https://github.com/rust-lang/rust/issues/71941>
610
- /// fn nonnull_slice_from_raw_parts<T>(ptr: NonNull<T>, len: usize) -> NonNull<[T]> {
611
- /// NonNull::new(std::ptr::slice_from_raw_parts_mut(ptr.as_ptr(), len)).unwrap()
612
- /// }
613
- /// ```
614
- ///
615
- /// # Safety
616
- ///
617
- /// The memory block will be considered owned by `self`. The memory block
618
- /// must outlive `self`.
619
- ///
620
- /// `block`'s starting address must match an existing memory pool's
621
- /// ending address. See the above example for how to obtain one.
622
- ///
623
- /// # Panics
624
- ///
625
- /// This method never panics.
626
- pub const unsafe fn append_free_block_ptr ( & mut self , block : NonNull < [ u8 ] > ) -> usize
627
- where
628
- FLBitmap : ~const BinInteger ,
629
- SLBitmap : ~const BinInteger ,
630
- {
631
- // Round down the length
632
- let start = nonnull_slice_start ( block) ;
633
- let len = nonnull_slice_len ( block) & !( GRANULARITY - 1 ) ;
634
-
635
- if Self :: MAX_POOL_SIZE . is_some ( ) {
636
- // If `MAX_POOL_SIZE` is `Some(_)`, it's dangerous to coalesce
637
- // memory pools of unknown sizes, so fall back to calling
638
- // `insert_free_block_ptr_aligned`.
639
- let block = nonnull_slice_from_raw_parts ( start, len) ;
640
- return self
641
- . insert_free_block_ptr_aligned ( block)
642
- . map ( NonZeroUsize :: get)
643
- . unwrap_or ( 0 ) ;
644
- } else if len == 0 {
645
- // `block` is so short that the `insert_free_block_ptr` will not
646
- // even create a sentinel block. We'll corrupt the structure if we
647
- // proceed.
648
- return 0 ;
649
- }
650
-
651
- let original_start = start. as_ptr ( ) ;
652
- let mut start = original_start;
653
- let end = ( start as usize ) . wrapping_add ( len) ;
654
-
655
- // The sentinel block from the preceding memory pool will be
656
- // assimilated into `[start..end]`.
657
- start = start. wrapping_sub ( super :: GRANULARITY ) ;
658
- let sentinel_block = start as * mut UsedBlockHdr ;
659
- debug_assert_eq ! (
660
- ( * sentinel_block) . common. size,
661
- GRANULARITY | SIZE_USED | SIZE_SENTINEL
662
- ) ;
663
-
664
- // The adjacent free block (if there's one) from the preceding memory
665
- // pool will be assimilated into `[start..end]`.
666
- let penultimate_block = ( * sentinel_block)
667
- . common
668
- . prev_phys_block
669
- . expect ( "sentinel block has no `prev_phys_block`" ) ;
670
- let last_nonassimilated_block;
671
- if ( penultimate_block. as_ref ( ) . size & SIZE_USED ) == 0 {
672
- let free_block = penultimate_block. cast :: < FreeBlockHdr > ( ) ;
673
- let free_block_size = free_block. as_ref ( ) . common . size ;
674
- debug_assert_eq ! (
675
- free_block_size,
676
- free_block. as_ref( ) . common. size & SIZE_SIZE_MASK
677
- ) ;
678
- self . unlink_free_block ( free_block, free_block_size) ;
679
-
680
- // Assimilation success
681
- start = free_block. as_ptr ( ) as * mut u8 ;
682
- last_nonassimilated_block = free_block. as_ref ( ) . common . prev_phys_block ;
683
- } else {
684
- // Assimilation failed
685
- last_nonassimilated_block = Some ( penultimate_block) ;
686
- }
687
-
688
- // Safety: `start` points to a location inside an existion memory pool,
689
- // so it's non-null
690
- let block = nonnull_slice_from_raw_parts (
691
- NonNull :: new_unchecked ( start) ,
692
- end. wrapping_sub ( start as usize ) ,
693
- ) ;
694
-
695
- // Create a memory pool
696
- let pool_len = self
697
- . insert_free_block_ptr_aligned ( block)
698
- . expect ( "`pool_size_to_contain_allocation` is an impostor" )
699
- . get ( ) ;
700
-
701
- // Link the created pool's first block to the preceding memory pool's
702
- // last non-assimilated block to form one continuous memory pool
703
- let mut first_block = nonnull_slice_start ( block) . cast :: < FreeBlockHdr > ( ) ;
704
- first_block. as_mut ( ) . common . prev_phys_block = last_nonassimilated_block;
705
-
706
- // Exclude the assimilated part from the returned value
707
- pool_len - ( original_start as usize ) . wrapping_sub ( start as usize )
708
- }
709
-
710
556
/// Create a new memory pool at the location specified by a slice.
711
557
///
712
558
/// This method does nothing if the given memory block is too small.
0 commit comments