1
1
use crate :: prelude:: * ;
2
2
use alloc:: collections:: BTreeMap ;
3
- use core:: cmp;
4
3
use core:: { alloc:: Layout , num:: NonZeroU32 , ops:: Bound } ;
5
4
6
5
/// A very simple first-fit free list for use by our garbage collectors.
7
6
pub ( crate ) struct FreeList {
8
7
/// The total capacity of the contiguous range of memory we are managing.
8
+ ///
9
+ /// NB: we keep `self.capacity` unrounded because otherwise we would get
10
+ /// rounding errors where we lose track of the actual capacity we have when
11
+ /// repeatedly adding capacity `n` where `n < ALIGN`:
12
+ ///
13
+ /// ```ignore
14
+ /// let mut free_list = FreeList::new(0);
15
+ /// loop {
16
+ /// free_list.add_capacity(1);
17
+ /// }
18
+ /// ```
19
+ ///
20
+ /// If we eagerly rounded capacity down to our alignment on every call to
21
+ /// `add_capacity`, the free list would always think it has zero capacity,
22
+ /// even though it would have enough capacity for many allocations after
23
+ /// enough iterations of the loop.
9
24
capacity : usize ,
10
25
/// Our free blocks, as a map from index to length of the free block at that
11
26
/// index.
@@ -28,7 +43,7 @@ impl FreeList {
28
43
/// Create a new `FreeList` for a contiguous region of memory of the given
29
44
/// size.
30
45
pub fn new ( capacity : usize ) -> Self {
31
- log:: trace !( "FreeList::new({capacity})" ) ;
46
+ log:: debug !( "FreeList::new({capacity})" ) ;
32
47
let mut free_list = FreeList {
33
48
capacity,
34
49
free_block_index_to_len : BTreeMap :: new ( ) ,
@@ -37,8 +52,66 @@ impl FreeList {
37
52
free_list
38
53
}
39
54
55
+ /// Add additional capacity to this free list.
56
+ #[ allow( dead_code) ] // TODO: becomes used in https://github.com/bytecodealliance/wasmtime/pull/10503
57
+ pub fn add_capacity ( & mut self , additional : usize ) {
58
+ let old_cap = self . capacity ;
59
+ self . capacity = self . capacity . saturating_add ( additional) ;
60
+ log:: debug!(
61
+ "FreeList::add_capacity({additional:#x}): capacity growing from {old_cap:#x} to {:#x}" ,
62
+ self . capacity
63
+ ) ;
64
+
65
+ // See the comment on `self.capacity` about why we need to do the
66
+ // alignment-rounding here, rather than keeping `self.capacity` aligned
67
+ // at rest.
68
+ let old_cap_rounded = round_usize_down_to_pow2 ( old_cap, ALIGN_USIZE ) ;
69
+
70
+ // If we are adding capacity beyond what a `u32` can address, then we
71
+ // can't actually use that capacity, so don't bother adding a new block
72
+ // to the free list.
73
+ let Ok ( old_cap_rounded) = u32:: try_from ( old_cap_rounded) else {
74
+ return ;
75
+ } ;
76
+
77
+ // Our new block's index is the end of the old capacity.
78
+ let index = NonZeroU32 :: new ( old_cap_rounded) . unwrap_or (
79
+ // But additionally all indices must be non-zero, so start the new
80
+ // block at the first aligned index if necessary.
81
+ NonZeroU32 :: new ( ALIGN_U32 ) . unwrap ( ) ,
82
+ ) ;
83
+
84
+ // If, after rounding everything to our alignment, we aren't actually
85
+ // gaining any new capacity, then don't add a new block to the free
86
+ // list.
87
+ let new_cap = u32:: try_from ( self . capacity ) . unwrap_or ( u32:: MAX ) ;
88
+ let new_cap = round_u32_down_to_pow2 ( new_cap, ALIGN_U32 ) ;
89
+ debug_assert ! ( new_cap >= index. get( ) ) ;
90
+ let size = new_cap - index. get ( ) ;
91
+ debug_assert_eq ! ( size % ALIGN_U32 , 0 ) ;
92
+ if size == 0 {
93
+ return ;
94
+ }
95
+
96
+ // If we can't represent this block in a `Layout`, then don't add it to
97
+ // our free list either.
98
+ let Ok ( layout) = Layout :: from_size_align ( usize:: try_from ( size) . unwrap ( ) , ALIGN_USIZE )
99
+ else {
100
+ return ;
101
+ } ;
102
+
103
+ // Okay! Add a block to our free list for the new capacity, potentially
104
+ // merging it with existing blocks at the end of the free list.
105
+ log:: trace!(
106
+ "FreeList::add_capacity(..): adding block {index:#x}..{:#x}" ,
107
+ index. get( ) + size
108
+ ) ;
109
+ self . dealloc ( index, layout) ;
110
+ }
111
+
112
+ #[ cfg( test) ]
40
113
fn max_size ( & self ) -> usize {
41
- let cap = cmp:: min ( self . capacity , usize:: try_from ( u32:: MAX ) . unwrap ( ) ) ;
114
+ let cap = core :: cmp:: min ( self . capacity , usize:: try_from ( u32:: MAX ) . unwrap ( ) ) ;
42
115
round_usize_down_to_pow2 ( cap. saturating_sub ( ALIGN_USIZE ) , ALIGN_USIZE )
43
116
}
44
117
@@ -47,21 +120,11 @@ impl FreeList {
47
120
fn check_layout ( & self , layout : Layout ) -> Result < u32 > {
48
121
ensure ! (
49
122
layout. align( ) <= ALIGN_USIZE ,
50
- "requested allocation's alignment of {} is greater than max supported alignment of {ALIGN_USIZE}" ,
123
+ "requested allocation's alignment of {} is greater than max supported \
124
+ alignment of {ALIGN_USIZE}",
51
125
layout. align( ) ,
52
126
) ;
53
127
54
- if layout. size ( ) > self . max_size ( ) {
55
- let trap = crate :: Trap :: AllocationTooLarge ;
56
- let err = anyhow:: Error :: from ( trap) ;
57
- let err = err. context ( format ! (
58
- "requested allocation's size of {} is greater than the max supported size of {}" ,
59
- layout. size( ) ,
60
- self . max_size( ) ,
61
- ) ) ;
62
- return Err ( err) ;
63
- }
64
-
65
128
let alloc_size = u32:: try_from ( layout. size ( ) ) . map_err ( |e| {
66
129
let trap = crate :: Trap :: AllocationTooLarge ;
67
130
let err = anyhow:: Error :: from ( trap) ;
@@ -377,6 +440,8 @@ mod tests {
377
440
#[ test]
378
441
#[ cfg_attr( miri, ignore) ]
379
442
fn check_no_fragmentation( ( capacity, ops) in ops( ) ) {
443
+ let _ = env_logger:: try_init( ) ;
444
+
380
445
// Map from allocation id to ptr.
381
446
let mut live = HashMap :: new( ) ;
382
447
@@ -519,23 +584,14 @@ mod tests {
519
584
fn allocate_no_split ( ) {
520
585
// Create a free list with the capacity to allocate two blocks of size
521
586
// `ALIGN_U32`.
522
- let mut free_list = FreeList :: new ( ALIGN_USIZE + usize :: try_from ( ALIGN_U32 ) . unwrap ( ) * 2 ) ;
587
+ let mut free_list = FreeList :: new ( ALIGN_USIZE + ALIGN_USIZE * 2 ) ;
523
588
524
589
assert_eq ! ( free_list. free_block_index_to_len. len( ) , 1 ) ;
525
- assert_eq ! (
526
- free_list. max_size( ) ,
527
- usize :: try_from( ALIGN_U32 ) . unwrap( ) * 2
528
- ) ;
590
+ assert_eq ! ( free_list. max_size( ) , ALIGN_USIZE * 2 ) ;
529
591
530
592
// Allocate a block such that the remainder is not worth splitting.
531
593
free_list
532
- . alloc (
533
- Layout :: from_size_align (
534
- usize:: try_from ( ALIGN_U32 ) . unwrap ( ) + ALIGN_USIZE ,
535
- ALIGN_USIZE ,
536
- )
537
- . unwrap ( ) ,
538
- )
594
+ . alloc ( Layout :: from_size_align ( ALIGN_USIZE + ALIGN_USIZE , ALIGN_USIZE ) . unwrap ( ) )
539
595
. expect ( "allocation within 'static' free list limits" )
540
596
. expect ( "have free space available for allocation" ) ;
541
597
@@ -547,23 +603,14 @@ mod tests {
547
603
fn allocate_and_split ( ) {
548
604
// Create a free list with the capacity to allocate three blocks of size
549
605
// `ALIGN_U32`.
550
- let mut free_list = FreeList :: new ( ALIGN_USIZE + usize :: try_from ( ALIGN_U32 ) . unwrap ( ) * 3 ) ;
606
+ let mut free_list = FreeList :: new ( ALIGN_USIZE + ALIGN_USIZE * 3 ) ;
551
607
552
608
assert_eq ! ( free_list. free_block_index_to_len. len( ) , 1 ) ;
553
- assert_eq ! (
554
- free_list. max_size( ) ,
555
- usize :: try_from( ALIGN_U32 ) . unwrap( ) * 3
556
- ) ;
609
+ assert_eq ! ( free_list. max_size( ) , ALIGN_USIZE * 3 ) ;
557
610
558
611
// Allocate a block such that the remainder is not worth splitting.
559
612
free_list
560
- . alloc (
561
- Layout :: from_size_align (
562
- usize:: try_from ( ALIGN_U32 ) . unwrap ( ) + ALIGN_USIZE ,
563
- ALIGN_USIZE ,
564
- )
565
- . unwrap ( ) ,
566
- )
613
+ . alloc ( Layout :: from_size_align ( ALIGN_USIZE + ALIGN_USIZE , ALIGN_USIZE ) . unwrap ( ) )
567
614
. expect ( "allocation within 'static' free list limits" )
568
615
. expect ( "have free space available for allocation" ) ;
569
616
@@ -573,10 +620,9 @@ mod tests {
573
620
574
621
#[ test]
575
622
fn dealloc_merge_prev_and_next ( ) {
576
- let layout =
577
- Layout :: from_size_align ( usize:: try_from ( ALIGN_U32 ) . unwrap ( ) , ALIGN_USIZE ) . unwrap ( ) ;
623
+ let layout = Layout :: from_size_align ( ALIGN_USIZE , ALIGN_USIZE ) . unwrap ( ) ;
578
624
579
- let mut free_list = FreeList :: new ( ALIGN_USIZE + usize :: try_from ( ALIGN_U32 ) . unwrap ( ) * 100 ) ;
625
+ let mut free_list = FreeList :: new ( ALIGN_USIZE + ALIGN_USIZE * 100 ) ;
580
626
assert_eq ! (
581
627
free_list. free_block_index_to_len. len( ) ,
582
628
1 ,
@@ -621,10 +667,9 @@ mod tests {
621
667
622
668
#[ test]
623
669
fn dealloc_merge_with_prev_and_not_next ( ) {
624
- let layout =
625
- Layout :: from_size_align ( usize:: try_from ( ALIGN_U32 ) . unwrap ( ) , ALIGN_USIZE ) . unwrap ( ) ;
670
+ let layout = Layout :: from_size_align ( ALIGN_USIZE , ALIGN_USIZE ) . unwrap ( ) ;
626
671
627
- let mut free_list = FreeList :: new ( ALIGN_USIZE + usize :: try_from ( ALIGN_U32 ) . unwrap ( ) * 100 ) ;
672
+ let mut free_list = FreeList :: new ( ALIGN_USIZE + ALIGN_USIZE * 100 ) ;
628
673
assert_eq ! (
629
674
free_list. free_block_index_to_len. len( ) ,
630
675
1 ,
@@ -669,10 +714,9 @@ mod tests {
669
714
670
715
#[ test]
671
716
fn dealloc_merge_with_next_and_not_prev ( ) {
672
- let layout =
673
- Layout :: from_size_align ( usize:: try_from ( ALIGN_U32 ) . unwrap ( ) , ALIGN_USIZE ) . unwrap ( ) ;
717
+ let layout = Layout :: from_size_align ( ALIGN_USIZE , ALIGN_USIZE ) . unwrap ( ) ;
674
718
675
- let mut free_list = FreeList :: new ( ALIGN_USIZE + usize :: try_from ( ALIGN_U32 ) . unwrap ( ) * 100 ) ;
719
+ let mut free_list = FreeList :: new ( ALIGN_USIZE + ALIGN_USIZE * 100 ) ;
676
720
assert_eq ! (
677
721
free_list. free_block_index_to_len. len( ) ,
678
722
1 ,
@@ -717,10 +761,9 @@ mod tests {
717
761
718
762
#[ test]
719
763
fn dealloc_no_merge ( ) {
720
- let layout =
721
- Layout :: from_size_align ( usize:: try_from ( ALIGN_U32 ) . unwrap ( ) , ALIGN_USIZE ) . unwrap ( ) ;
764
+ let layout = Layout :: from_size_align ( ALIGN_USIZE , ALIGN_USIZE ) . unwrap ( ) ;
722
765
723
- let mut free_list = FreeList :: new ( ALIGN_USIZE + usize :: try_from ( ALIGN_U32 ) . unwrap ( ) * 100 ) ;
766
+ let mut free_list = FreeList :: new ( ALIGN_USIZE + ALIGN_USIZE * 100 ) ;
724
767
assert_eq ! (
725
768
free_list. free_block_index_to_len. len( ) ,
726
769
1 ,
@@ -770,38 +813,27 @@ mod tests {
770
813
#[ test]
771
814
fn alloc_size_too_large ( ) {
772
815
// Free list with room for 10 min-sized blocks.
773
- let mut free_list = FreeList :: new ( ALIGN_USIZE + usize:: try_from ( ALIGN_U32 ) . unwrap ( ) * 10 ) ;
774
- assert_eq ! (
775
- free_list. max_size( ) ,
776
- usize :: try_from( ALIGN_U32 ) . unwrap( ) * 10
777
- ) ;
816
+ let mut free_list = FreeList :: new ( ALIGN_USIZE + ALIGN_USIZE * 10 ) ;
817
+ assert_eq ! ( free_list. max_size( ) , ALIGN_USIZE * 10 ) ;
778
818
779
819
// Attempt to allocate something that is 20 times the size of our
780
820
// min-sized block.
781
821
assert ! ( free_list
782
- . alloc(
783
- Layout :: from_size_align( usize :: try_from( ALIGN_U32 ) . unwrap( ) * 20 , ALIGN_USIZE )
784
- . unwrap( ) ,
785
- )
786
- . is_err( ) ) ;
822
+ . alloc( Layout :: from_size_align( ALIGN_USIZE * 20 , ALIGN_USIZE ) . unwrap( ) )
823
+ . unwrap( )
824
+ . is_none( ) ) ;
787
825
}
788
826
789
827
#[ test]
790
828
fn alloc_align_too_large ( ) {
791
829
// Free list with room for 10 min-sized blocks.
792
- let mut free_list = FreeList :: new ( ALIGN_USIZE + usize:: try_from ( ALIGN_U32 ) . unwrap ( ) * 10 ) ;
793
- assert_eq ! (
794
- free_list. max_size( ) ,
795
- usize :: try_from( ALIGN_U32 ) . unwrap( ) * 10
796
- ) ;
830
+ let mut free_list = FreeList :: new ( ALIGN_USIZE + ALIGN_USIZE * 10 ) ;
831
+ assert_eq ! ( free_list. max_size( ) , ALIGN_USIZE * 10 ) ;
797
832
798
833
// Attempt to allocate something that requires larger alignment than
799
834
// `FreeList` supports.
800
835
assert ! ( free_list
801
- . alloc(
802
- Layout :: from_size_align( usize :: try_from( ALIGN_U32 ) . unwrap( ) , ALIGN_USIZE * 2 )
803
- . unwrap( ) ,
804
- )
836
+ . alloc( Layout :: from_size_align( ALIGN_USIZE , ALIGN_USIZE * 2 ) . unwrap( ) , )
805
837
. is_err( ) ) ;
806
838
}
807
839
@@ -834,4 +866,56 @@ mod tests {
834
866
test ( & mut f, l) ;
835
867
}
836
868
}
869
+
870
+ #[ test]
871
+ fn add_capacity ( ) {
872
+ let layout = Layout :: from_size_align ( ALIGN_USIZE , ALIGN_USIZE ) . unwrap ( ) ;
873
+
874
+ let mut free_list = FreeList :: new ( 0 ) ;
875
+ assert ! ( free_list. alloc( layout) . unwrap( ) . is_none( ) , "no capacity" ) ;
876
+
877
+ free_list. add_capacity ( ALIGN_USIZE ) ;
878
+ assert ! (
879
+ free_list. alloc( layout) . unwrap( ) . is_none( ) ,
880
+ "still not enough capacity because we won't allocate the zero index"
881
+ ) ;
882
+
883
+ free_list. add_capacity ( 1 ) ;
884
+ assert ! (
885
+ free_list. alloc( layout) . unwrap( ) . is_none( ) ,
886
+ "still not enough capacity because allocations are multiples of the alignment"
887
+ ) ;
888
+
889
+ free_list. add_capacity ( ALIGN_USIZE - 1 ) ;
890
+ let a = free_list
891
+ . alloc ( layout)
892
+ . unwrap ( )
893
+ . expect ( "now we have enough capacity for one" ) ;
894
+ assert ! (
895
+ free_list. alloc( layout) . unwrap( ) . is_none( ) ,
896
+ "but not enough capacity for two"
897
+ ) ;
898
+
899
+ free_list. add_capacity ( ALIGN_USIZE ) ;
900
+ let b = free_list
901
+ . alloc ( layout)
902
+ . unwrap ( )
903
+ . expect ( "now we have enough capacity for two" ) ;
904
+
905
+ free_list. dealloc ( a, layout) ;
906
+ free_list. dealloc ( b, layout) ;
907
+ assert_eq ! (
908
+ free_list. free_block_index_to_len. len( ) ,
909
+ 1 ,
910
+ "`dealloc` should merge blocks from different `add_capacity` calls together"
911
+ ) ;
912
+
913
+ free_list. add_capacity ( ALIGN_USIZE ) ;
914
+ assert_eq ! (
915
+ free_list. free_block_index_to_len. len( ) ,
916
+ 1 ,
917
+ "`add_capacity` should eagerly merge new capacity into the last block \
918
+ in the free list, when possible"
919
+ ) ;
920
+ }
837
921
}
0 commit comments