diff --git a/libc/src/__support/block.h b/libc/src/__support/block.h index e63801301ac75..9ca3f11530c4b 100644 --- a/libc/src/__support/block.h +++ b/libc/src/__support/block.h @@ -68,13 +68,11 @@ using cpp::optional; /// The blocks store their offsets to the previous and next blocks. The latter /// is also the block's size. /// -/// The `ALIGNMENT` constant provided by the derived block is typically the -/// minimum value of `alignof(OffsetType)`. Blocks will always be aligned to a -/// `ALIGNMENT` boundary. Block sizes will always be rounded up to a multiple of -/// `ALIGNMENT`. +/// Blocks will always be aligned to a `ALIGNMENT` boundary. Block sizes will +/// always be rounded up to a multiple of `ALIGNMENT`. /// -/// As an example, the diagram below represents two contiguous -/// `Block`s. The indices indicate byte offsets: +/// As an example, the diagram below represents two contiguous `Block`s. The +/// indices indicate byte offsets: /// /// @code{.unparsed} /// Block 1: @@ -117,17 +115,6 @@ using cpp::optional; /// /// The next offset of a block matches the previous offset of its next block. /// The first block in a list is denoted by having a previous offset of `0`. -/// -/// @tparam OffsetType Unsigned integral type used to encode offsets. Larger -/// types can address more memory, but consume greater -/// overhead. -/// @tparam kAlign Sets the overall alignment for blocks. Minimum is -/// `alignof(OffsetType)`, but the default is max_align_t, -/// since the usable space will then already be -/// aligned to max_align_t if the size of OffsetType is no -/// less than half of max_align_t. Larger values cause -/// greater overhead. -template class Block { // Masks for the contents of the next_ field. static constexpr size_t PREV_FREE_MASK = 1 << 0; @@ -135,12 +122,8 @@ class Block { static constexpr size_t SIZE_MASK = ~(PREV_FREE_MASK | LAST_MASK); public: - using offset_type = OffsetType; - static_assert(cpp::is_unsigned_v, - "offset type must be unsigned"); - static constexpr size_t ALIGNMENT = - cpp::max(cpp::max(kAlign, alignof(offset_type)), size_t{4}); - static constexpr size_t BLOCK_OVERHEAD = align_up(sizeof(Block), ALIGNMENT); + static constexpr size_t ALIGNMENT = cpp::max(alignof(max_align_t), size_t{4}); + static const size_t BLOCK_OVERHEAD; // No copy or move. Block(const Block &other) = delete; @@ -157,26 +140,26 @@ class Block { /// /// @warning This method does not do any checking; passing a random /// pointer will return a non-null pointer. - static Block *from_usable_space(void *usable_space) { + LIBC_INLINE static Block *from_usable_space(void *usable_space) { auto *bytes = reinterpret_cast(usable_space); return reinterpret_cast(bytes - BLOCK_OVERHEAD); } - static const Block *from_usable_space(const void *usable_space) { + LIBC_INLINE static const Block *from_usable_space(const void *usable_space) { const auto *bytes = reinterpret_cast(usable_space); return reinterpret_cast(bytes - BLOCK_OVERHEAD); } /// @returns The total size of the block in bytes, including the header. - size_t outer_size() const { return next_ & SIZE_MASK; } + LIBC_INLINE size_t outer_size() const { return next_ & SIZE_MASK; } - static size_t outer_size(size_t inner_size) { + LIBC_INLINE static size_t outer_size(size_t inner_size) { // The usable region includes the prev_ field of the next block. return inner_size - sizeof(prev_) + BLOCK_OVERHEAD; } /// @returns The number of usable bytes inside the block were it to be /// allocated. - size_t inner_size() const { + LIBC_INLINE size_t inner_size() const { if (!next()) return 0; return inner_size(outer_size()); @@ -184,13 +167,13 @@ class Block { /// @returns The number of usable bytes inside a block with the given outer /// size were it to be allocated. - static size_t inner_size(size_t outer_size) { + LIBC_INLINE static size_t inner_size(size_t outer_size) { // The usable region includes the prev_ field of the next block. return inner_size_free(outer_size) + sizeof(prev_); } /// @returns The number of usable bytes inside the block if it remains free. - size_t inner_size_free() const { + LIBC_INLINE size_t inner_size_free() const { if (!next()) return 0; return inner_size_free(outer_size()); @@ -198,20 +181,20 @@ class Block { /// @returns The number of usable bytes inside a block with the given outer /// size if it remains free. - static size_t inner_size_free(size_t outer_size) { + LIBC_INLINE static size_t inner_size_free(size_t outer_size) { return outer_size - BLOCK_OVERHEAD; } /// @returns A pointer to the usable space inside this block. - cpp::byte *usable_space() { + LIBC_INLINE cpp::byte *usable_space() { return reinterpret_cast(this) + BLOCK_OVERHEAD; } - const cpp::byte *usable_space() const { + LIBC_INLINE const cpp::byte *usable_space() const { return reinterpret_cast(this) + BLOCK_OVERHEAD; } // @returns The region of memory the block manages, including the header. - ByteSpan region() { + LIBC_INLINE ByteSpan region() { return {reinterpret_cast(this), outer_size()}; } @@ -229,42 +212,53 @@ class Block { /// @returns The block immediately after this one, or a null pointer if this /// is the last block. - Block *next() const; + LIBC_INLINE Block *next() const { + if (next_ & LAST_MASK) + return nullptr; + return reinterpret_cast(reinterpret_cast(this) + + outer_size()); + } /// @returns The free block immediately before this one, otherwise nullptr. - Block *prev_free() const; + LIBC_INLINE Block *prev_free() const { + if (!(next_ & PREV_FREE_MASK)) + return nullptr; + return reinterpret_cast(reinterpret_cast(this) - prev_); + } /// @returns Whether the block is unavailable for allocation. - bool used() const { return !next() || !next()->prev_free(); } + LIBC_INLINE bool used() const { return !next() || !next()->prev_free(); } /// Marks this block as in use. - void mark_used() { + LIBC_INLINE void mark_used() { LIBC_ASSERT(next() && "last block is always considered used"); next()->next_ &= ~PREV_FREE_MASK; } /// Marks this block as free. - void mark_free() { + LIBC_INLINE void mark_free() { LIBC_ASSERT(next() && "last block is always considered used"); next()->next_ |= PREV_FREE_MASK; // The next block's prev_ field becomes alive, as it is no longer part of // this block's used space. - *new (&next()->prev_) offset_type = outer_size(); + *new (&next()->prev_) size_t = outer_size(); } /// Marks this block as the last one in the chain. Makes next() return /// nullptr. - void mark_last() { next_ |= LAST_MASK; } + LIBC_INLINE void mark_last() { next_ |= LAST_MASK; } - constexpr Block(size_t outer_size); + LIBC_INLINE constexpr Block(size_t outer_size) : next_(outer_size) { + LIBC_ASSERT(outer_size % ALIGNMENT == 0 && "block sizes must be aligned"); + } - bool is_usable_space_aligned(size_t alignment) const { + LIBC_INLINE bool is_usable_space_aligned(size_t alignment) const { return reinterpret_cast(usable_space()) % alignment == 0; } /// @returns The new inner size of this block that would give the usable /// space of the next block the given alignment. - size_t padding_for_alignment(size_t alignment) const { + LIBC_INLINE size_t padding_for_alignment(size_t alignment) const { if (is_usable_space_aligned(alignment)) return 0; @@ -322,7 +316,9 @@ class Block { private: /// Construct a block to represent a span of bytes. Overwrites only enough /// memory for the block header; the rest of the span is left alone. - static Block *as_block(ByteSpan bytes); + LIBC_INLINE static Block *as_block(ByteSpan bytes) { + return ::new (bytes.data()) Block(bytes.size()); + } /// Like `split`, but assumes the caller has already checked to parameters to /// ensure the split will succeed. @@ -332,11 +328,11 @@ class Block { /// block. This field is only alive when the previous block is free; /// otherwise, its memory is reused as part of the previous block's usable /// space. - offset_type prev_ = 0; + size_t prev_ = 0; /// Offset from this block to the next block. Valid even if this is the last /// block, since it equals the size of the block. - offset_type next_ = 0; + size_t next_ = 0; /// Information about the current state of the block is stored in the two low /// order bits of the next_ value. These are guaranteed free by a minimum @@ -347,9 +343,10 @@ class Block { /// previous block is free. /// * If the `last` flag is set, the block is the sentinel last block. It is /// summarily considered used and has no next block. -} __attribute__((packed, aligned(cpp::max(kAlign, size_t{4})))); +} __attribute__((packed, aligned(cpp::max(alignof(max_align_t), size_t{4})))); -// Public template method implementations. +inline constexpr size_t Block::BLOCK_OVERHEAD = + align_up(sizeof(Block), ALIGNMENT); LIBC_INLINE ByteSpan get_aligned_subspan(ByteSpan bytes, size_t alignment) { if (bytes.data() == nullptr) @@ -367,9 +364,8 @@ LIBC_INLINE ByteSpan get_aligned_subspan(ByteSpan bytes, size_t alignment) { aligned_end - aligned_start); } -template -optional *> -Block::init(ByteSpan region) { +LIBC_INLINE +optional Block::init(ByteSpan region) { optional result = get_aligned_subspan(region, ALIGNMENT); if (!result) return {}; @@ -379,7 +375,7 @@ Block::init(ByteSpan region) { if (region.size() < 2 * BLOCK_OVERHEAD) return {}; - if (cpp::numeric_limits::max() < region.size()) + if (cpp::numeric_limits::max() < region.size()) return {}; Block *block = as_block(region.first(region.size() - BLOCK_OVERHEAD)); @@ -389,9 +385,8 @@ Block::init(ByteSpan region) { return block; } -template -bool Block::can_allocate(size_t alignment, - size_t size) const { +LIBC_INLINE +bool Block::can_allocate(size_t alignment, size_t size) const { if (inner_size() < size) return false; if (is_usable_space_aligned(alignment)) @@ -406,10 +401,8 @@ bool Block::can_allocate(size_t alignment, return size <= aligned_inner_size; } -template -typename Block::BlockInfo -Block::allocate(Block *block, size_t alignment, - size_t size) { +LIBC_INLINE +Block::BlockInfo Block::allocate(Block *block, size_t alignment, size_t size) { LIBC_ASSERT( block->can_allocate(alignment, size) && "Calls to this function for a given alignment and size should only be " @@ -447,9 +440,8 @@ Block::allocate(Block *block, size_t alignment, return info; } -template -optional *> -Block::split(size_t new_inner_size) { +LIBC_INLINE +optional Block::split(size_t new_inner_size) { if (used()) return {}; // The prev_ field of the next block is always available, so there is a @@ -469,9 +461,8 @@ Block::split(size_t new_inner_size) { return split_impl(new_inner_size); } -template -Block * -Block::split_impl(size_t new_inner_size) { +LIBC_INLINE +Block *Block::split_impl(size_t new_inner_size) { size_t outer_size1 = outer_size(new_inner_size); LIBC_ASSERT(outer_size1 % ALIGNMENT == 0 && "new size must be aligned"); ByteSpan new_region = region().subspan(outer_size1); @@ -484,8 +475,8 @@ Block::split_impl(size_t new_inner_size) { return new_block; } -template -bool Block::merge_next() { +LIBC_INLINE +bool Block::merge_next() { if (used() || next()->used()) return false; size_t new_size = outer_size() + next()->outer_size(); @@ -495,34 +486,6 @@ bool Block::merge_next() { return true; } -template -Block *Block::next() const { - if (next_ & LAST_MASK) - return nullptr; - return reinterpret_cast(reinterpret_cast(this) + - outer_size()); -} - -template -Block *Block::prev_free() const { - if (!(next_ & PREV_FREE_MASK)) - return nullptr; - return reinterpret_cast(reinterpret_cast(this) - prev_); -} - -// Private template method implementations. - -template -constexpr Block::Block(size_t outer_size) - : next_(outer_size) { - LIBC_ASSERT(outer_size % ALIGNMENT == 0 && "block sizes must be aligned"); -} - -template -Block *Block::as_block(ByteSpan bytes) { - return ::new (bytes.data()) Block(bytes.size()); -} - } // namespace LIBC_NAMESPACE_DECL #endif // LLVM_LIBC_SRC___SUPPORT_BLOCK_H diff --git a/libc/src/__support/freelist.cpp b/libc/src/__support/freelist.cpp index d3dd44895130c..bfb90ae1c4db4 100644 --- a/libc/src/__support/freelist.cpp +++ b/libc/src/__support/freelist.cpp @@ -12,7 +12,7 @@ namespace LIBC_NAMESPACE_DECL { void FreeList::push(Node *node) { if (begin_) { - LIBC_ASSERT(Block<>::from_usable_space(node)->outer_size() == + LIBC_ASSERT(Block::from_usable_space(node)->outer_size() == begin_->block()->outer_size() && "freelist entries must have the same size"); // Since the list is circular, insert the node immediately before begin_. diff --git a/libc/src/__support/freelist.h b/libc/src/__support/freelist.h index eaeaeb013eeae..c51f14fe57ae7 100644 --- a/libc/src/__support/freelist.h +++ b/libc/src/__support/freelist.h @@ -26,12 +26,12 @@ class FreeList { class Node { public: /// @returns The block containing this node. - LIBC_INLINE const Block<> *block() const { - return Block<>::from_usable_space(this); + LIBC_INLINE const Block *block() const { + return Block::from_usable_space(this); } /// @returns The block containing this node. - LIBC_INLINE Block<> *block() { return Block<>::from_usable_space(this); } + LIBC_INLINE Block *block() { return Block::from_usable_space(this); } /// @returns The inner size of blocks in the list containing this node. LIBC_INLINE size_t size() const { return block()->inner_size(); } @@ -58,11 +58,11 @@ class FreeList { LIBC_INLINE Node *begin() { return begin_; } /// @returns The first block in the list. - LIBC_INLINE Block<> *front() { return begin_->block(); } + LIBC_INLINE Block *front() { return begin_->block(); } /// Push a block to the back of the list. /// The block must be large enough to contain a node. - LIBC_INLINE void push(Block<> *block) { + LIBC_INLINE void push(Block *block) { LIBC_ASSERT(!block->used() && "only free blocks can be placed on free lists"); LIBC_ASSERT(block->inner_size_free() >= sizeof(FreeList) && diff --git a/libc/src/__support/freelist_heap.h b/libc/src/__support/freelist_heap.h index cfcf72fc4c985..8fa36257cb91a 100644 --- a/libc/src/__support/freelist_heap.h +++ b/libc/src/__support/freelist_heap.h @@ -53,7 +53,7 @@ class FreeListHeap { void *allocate_impl(size_t alignment, size_t size); - span block_to_span(Block<> *block) { + span block_to_span(Block *block) { return span(block->usable_space(), block->inner_size()); } @@ -75,8 +75,8 @@ template class FreeListHeapBuffer : public FreeListHeap { LIBC_INLINE void FreeListHeap::init() { LIBC_ASSERT(!is_initialized && "duplicate initialization"); - auto result = Block<>::init(region()); - Block<> *block = *result; + auto result = Block::init(region()); + Block *block = *result; free_store.set_range({0, cpp::bit_ceil(block->inner_size())}); free_store.insert(block); is_initialized = true; @@ -93,17 +93,17 @@ LIBC_INLINE void *FreeListHeap::allocate_impl(size_t alignment, size_t size) { // TODO: usable_space should always be aligned to max_align_t. if (alignment > alignof(max_align_t) || - (Block<>::BLOCK_OVERHEAD % alignof(max_align_t) != 0)) { + (Block::BLOCK_OVERHEAD % alignof(max_align_t) != 0)) { // TODO: This bound isn't precisely calculated yet. It assumes one extra - // Block<>::ALIGNMENT to accomodate the possibility for padding block + // Block::ALIGNMENT to accomodate the possibility for padding block // overhead. (alignment - 1) ensures that there is an aligned point // somewhere in usable_space, but this isn't tight either, since // usable_space is also already somewhat aligned. - if (add_overflow(size, (alignment - 1) + Block<>::ALIGNMENT, request_size)) + if (add_overflow(size, (alignment - 1) + Block::ALIGNMENT, request_size)) return nullptr; } - Block<> *block = free_store.remove_best_fit(request_size); + Block *block = free_store.remove_best_fit(request_size); if (!block) return nullptr; @@ -111,7 +111,7 @@ LIBC_INLINE void *FreeListHeap::allocate_impl(size_t alignment, size_t size) { "block should always be large enough to allocate at the correct " "alignment"); - auto block_info = Block<>::allocate(block, alignment, size); + auto block_info = Block::allocate(block, alignment, size); if (block_info.next) free_store.insert(block_info.next); if (block_info.prev) @@ -143,14 +143,14 @@ LIBC_INLINE void FreeListHeap::free(void *ptr) { LIBC_ASSERT(is_valid_ptr(bytes) && "Invalid pointer"); - Block<> *block = Block<>::from_usable_space(bytes); + Block *block = Block::from_usable_space(bytes); LIBC_ASSERT(block->next() && "sentinel last block cannot be freed"); LIBC_ASSERT(block->used() && "double free"); block->mark_free(); // Can we combine with the left or right blocks? - Block<> *prev_free = block->prev_free(); - Block<> *next = block->next(); + Block *prev_free = block->prev_free(); + Block *next = block->next(); if (prev_free != nullptr) { // Remove from free store and merge. @@ -183,7 +183,7 @@ LIBC_INLINE void *FreeListHeap::realloc(void *ptr, size_t size) { if (!is_valid_ptr(bytes)) return nullptr; - Block<> *block = Block<>::from_usable_space(bytes); + Block *block = Block::from_usable_space(bytes); if (!block->used()) return nullptr; size_t old_size = block->inner_size(); diff --git a/libc/src/__support/freestore.h b/libc/src/__support/freestore.h index f04b561f5d91d..97197dda4b546 100644 --- a/libc/src/__support/freestore.h +++ b/libc/src/__support/freestore.h @@ -29,40 +29,40 @@ class FreeStore { /// Insert a free block. If the block is too small to be tracked, nothing /// happens. - void insert(Block<> *block); + void insert(Block *block); /// Remove a free block. If the block is too small to be tracked, nothing /// happens. - void remove(Block<> *block); + void remove(Block *block); /// Remove a best-fit free block that can contain the given size when /// allocated. Returns nullptr if there is no such block. - Block<> *remove_best_fit(size_t size); + Block *remove_best_fit(size_t size); private: static constexpr size_t ALIGNMENT = alignof(max_align_t); static constexpr size_t MIN_OUTER_SIZE = - align_up(Block<>::BLOCK_OVERHEAD + sizeof(FreeList::Node), ALIGNMENT); + align_up(Block::BLOCK_OVERHEAD + sizeof(FreeList::Node), ALIGNMENT); static constexpr size_t MIN_LARGE_OUTER_SIZE = - align_up(Block<>::BLOCK_OVERHEAD + sizeof(FreeTrie::Node), ALIGNMENT); + align_up(Block::BLOCK_OVERHEAD + sizeof(FreeTrie::Node), ALIGNMENT); static constexpr size_t NUM_SMALL_SIZES = (MIN_LARGE_OUTER_SIZE - MIN_OUTER_SIZE) / ALIGNMENT; - LIBC_INLINE static bool too_small(Block<> *block) { + LIBC_INLINE static bool too_small(Block *block) { return block->outer_size() < MIN_OUTER_SIZE; } - LIBC_INLINE static bool is_small(Block<> *block) { + LIBC_INLINE static bool is_small(Block *block) { return block->outer_size() < MIN_LARGE_OUTER_SIZE; } - FreeList &small_list(Block<> *block); + FreeList &small_list(Block *block); FreeList *find_best_small_fit(size_t size); cpp::array small_lists; FreeTrie large_trie; }; -LIBC_INLINE void FreeStore::insert(Block<> *block) { +LIBC_INLINE void FreeStore::insert(Block *block) { if (too_small(block)) return; if (is_small(block)) @@ -71,7 +71,7 @@ LIBC_INLINE void FreeStore::insert(Block<> *block) { large_trie.push(block); } -LIBC_INLINE void FreeStore::remove(Block<> *block) { +LIBC_INLINE void FreeStore::remove(Block *block) { if (too_small(block)) return; if (is_small(block)) { @@ -83,21 +83,21 @@ LIBC_INLINE void FreeStore::remove(Block<> *block) { } } -LIBC_INLINE Block<> *FreeStore::remove_best_fit(size_t size) { +LIBC_INLINE Block *FreeStore::remove_best_fit(size_t size) { if (FreeList *list = find_best_small_fit(size)) { - Block<> *block = list->front(); + Block *block = list->front(); list->pop(); return block; } if (FreeTrie::Node *best_fit = large_trie.find_best_fit(size)) { - Block<> *block = best_fit->block(); + Block *block = best_fit->block(); large_trie.remove(best_fit); return block; } return nullptr; } -LIBC_INLINE FreeList &FreeStore::small_list(Block<> *block) { +LIBC_INLINE FreeList &FreeStore::small_list(Block *block) { LIBC_ASSERT(is_small(block) && "only legal for small blocks"); return small_lists[(block->outer_size() - MIN_OUTER_SIZE) / ALIGNMENT]; } diff --git a/libc/src/__support/freetrie.h b/libc/src/__support/freetrie.h index ff1912ee94f88..42363c2c9e2f4 100644 --- a/libc/src/__support/freetrie.h +++ b/libc/src/__support/freetrie.h @@ -96,7 +96,7 @@ class FreeTrie { LIBC_INLINE bool empty() const { return !root; } /// Push a block to the trie. - void push(Block<> *block); + void push(Block *block); /// Remove a node from this trie node's free list. void remove(Node *node); @@ -117,7 +117,7 @@ class FreeTrie { SizeRange range; }; -LIBC_INLINE void FreeTrie::push(Block<> *block) { +LIBC_INLINE void FreeTrie::push(Block *block) { LIBC_ASSERT(block->inner_size_free() >= sizeof(Node) && "block too small to accomodate free trie node"); size_t size = block->inner_size(); diff --git a/libc/test/src/__support/block_test.cpp b/libc/test/src/__support/block_test.cpp index 4d23861155502..5e437db51b609 100644 --- a/libc/test/src/__support/block_test.cpp +++ b/libc/test/src/__support/block_test.cpp @@ -14,96 +14,60 @@ #include "src/string/memcpy.h" #include "test/UnitTest/Test.h" -// Block types. -using LargeOffsetBlock = LIBC_NAMESPACE::Block; -using SmallOffsetBlock = LIBC_NAMESPACE::Block; - -// For each of the block types above, we'd like to run the same tests since -// they should work independently of the parameter sizes. Rather than re-writing -// the same test for each case, let's instead create a custom test framework for -// each test case that invokes the actual testing function for each block type. -// -// It's organized this way because the ASSERT/EXPECT macros only work within a -// `Test` class due to those macros expanding to `test` methods. -#define TEST_FOR_EACH_BLOCK_TYPE(TestCase) \ - class LlvmLibcBlockTest##TestCase : public LIBC_NAMESPACE::testing::Test { \ - public: \ - template void RunTest(); \ - }; \ - TEST_F(LlvmLibcBlockTest##TestCase, TestCase) { \ - RunTest(); \ - RunTest(); \ - } \ - template void LlvmLibcBlockTest##TestCase::RunTest() - +using LIBC_NAMESPACE::Block; using LIBC_NAMESPACE::cpp::array; using LIBC_NAMESPACE::cpp::bit_ceil; using LIBC_NAMESPACE::cpp::byte; using LIBC_NAMESPACE::cpp::span; -TEST_FOR_EACH_BLOCK_TYPE(CanCreateSingleAlignedBlock) { +TEST(LlvmLibcBlockTest, CanCreateSingleAlignedBlock) { constexpr size_t kN = 1024; - alignas(BlockType::ALIGNMENT) array bytes; + alignas(Block::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; + Block *block = *result; - BlockType *last = block->next(); - ASSERT_NE(last, static_cast(nullptr)); - constexpr size_t last_outer_size = BlockType::BLOCK_OVERHEAD; + Block *last = block->next(); + ASSERT_NE(last, static_cast(nullptr)); + constexpr size_t last_outer_size = Block::BLOCK_OVERHEAD; EXPECT_EQ(last->outer_size(), last_outer_size); EXPECT_EQ(last->prev_free(), block); EXPECT_TRUE(last->used()); EXPECT_EQ(block->outer_size(), kN - last_outer_size); - constexpr size_t last_prev_field_size = - sizeof(typename BlockType::offset_type); - EXPECT_EQ(block->inner_size(), kN - last_outer_size - - BlockType::BLOCK_OVERHEAD + + constexpr size_t last_prev_field_size = sizeof(size_t); + EXPECT_EQ(block->inner_size(), kN - last_outer_size - Block::BLOCK_OVERHEAD + last_prev_field_size); - EXPECT_EQ(block->prev_free(), static_cast(nullptr)); + EXPECT_EQ(block->prev_free(), static_cast(nullptr)); EXPECT_FALSE(block->used()); } -TEST_FOR_EACH_BLOCK_TYPE(CanCreateUnalignedSingleBlock) { +TEST(LlvmLibcBlockTest, CanCreateUnalignedSingleBlock) { constexpr size_t kN = 1024; // Force alignment, so we can un-force it below - alignas(BlockType::ALIGNMENT) array bytes; + alignas(Block::ALIGNMENT) array bytes; span aligned(bytes); - auto result = BlockType::init(aligned.subspan(1)); + auto result = Block::init(aligned.subspan(1)); EXPECT_TRUE(result.has_value()); } -TEST_FOR_EACH_BLOCK_TYPE(CannotCreateTooSmallBlock) { +TEST(LlvmLibcBlockTest, CannotCreateTooSmallBlock) { array bytes; - auto result = BlockType::init(bytes); + auto result = Block::init(bytes); EXPECT_FALSE(result.has_value()); } -// This test specifically checks that we cannot allocate a block with a size -// larger than what can be held by the offset type, we don't need to test with -// multiple block types for this particular check, so we use the normal TEST -// macro and not the custom framework. -TEST(LlvmLibcBlockTest, CannotCreateTooLargeBlock) { - using BlockType = LIBC_NAMESPACE::Block; +TEST(LlvmLibcBlockTest, CanSplitBlock) { constexpr size_t kN = 1024; - - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); - EXPECT_FALSE(result.has_value()); -} - -TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlock) { - constexpr size_t kN = 1024; - constexpr size_t prev_field_size = sizeof(typename BlockType::offset_type); + constexpr size_t prev_field_size = sizeof(size_t); // Give the split position a large alignment. constexpr size_t kSplitN = 512 + prev_field_size; - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); auto *block1 = *result; size_t orig_size = block1->outer_size(); @@ -114,7 +78,7 @@ TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlock) { EXPECT_EQ(block1->inner_size(), kSplitN); EXPECT_EQ(block1->outer_size(), - kSplitN - prev_field_size + BlockType::BLOCK_OVERHEAD); + kSplitN - prev_field_size + Block::BLOCK_OVERHEAD); EXPECT_EQ(block2->outer_size(), orig_size - block1->outer_size()); EXPECT_FALSE(block2->used()); @@ -123,26 +87,26 @@ TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlock) { EXPECT_EQ(block2->prev_free(), block1); } -TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlockUnaligned) { +TEST(LlvmLibcBlockTest, CanSplitBlockUnaligned) { constexpr size_t kN = 1024; - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block1 = *result; + Block *block1 = *result; size_t orig_size = block1->outer_size(); constexpr size_t kSplitN = 513; - constexpr size_t prev_field_size = sizeof(typename BlockType::offset_type); + constexpr size_t prev_field_size = sizeof(size_t); uintptr_t split_addr = reinterpret_cast(block1) + (kSplitN - prev_field_size); // Round split_addr up to a multiple of the alignment. - split_addr += alignof(BlockType) - (split_addr % alignof(BlockType)); + split_addr += alignof(Block) - (split_addr % alignof(Block)); uintptr_t split_len = split_addr - (uintptr_t)&bytes + prev_field_size; result = block1->split(kSplitN); ASSERT_TRUE(result.has_value()); - BlockType *block2 = *result; + Block *block2 = *result; EXPECT_EQ(block1->inner_size(), split_len); @@ -153,7 +117,7 @@ TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlockUnaligned) { EXPECT_EQ(block2->prev_free(), block1); } -TEST_FOR_EACH_BLOCK_TYPE(CanSplitMidBlock) { +TEST(LlvmLibcBlockTest, CanSplitMidBlock) { // split once, then split the original block again to ensure that the // pointers get rewired properly. // I.e. @@ -167,18 +131,18 @@ TEST_FOR_EACH_BLOCK_TYPE(CanSplitMidBlock) { constexpr size_t kSplit1 = 512; constexpr size_t kSplit2 = 256; - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block1 = *result; + Block *block1 = *result; result = block1->split(kSplit1); ASSERT_TRUE(result.has_value()); - BlockType *block2 = *result; + Block *block2 = *result; result = block1->split(kSplit2); ASSERT_TRUE(result.has_value()); - BlockType *block3 = *result; + Block *block3 = *result; EXPECT_EQ(block1->next(), block3); EXPECT_EQ(block3->prev_free(), block1); @@ -186,97 +150,97 @@ TEST_FOR_EACH_BLOCK_TYPE(CanSplitMidBlock) { EXPECT_EQ(block2->prev_free(), block3); } -TEST_FOR_EACH_BLOCK_TYPE(CannotSplitTooSmallBlock) { +TEST(LlvmLibcBlockTest, CannotSplitTooSmallBlock) { constexpr size_t kN = 64; constexpr size_t kSplitN = kN + 1; - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; + Block *block = *result; result = block->split(kSplitN); ASSERT_FALSE(result.has_value()); } -TEST_FOR_EACH_BLOCK_TYPE(CannotSplitBlockWithoutHeaderSpace) { +TEST(LlvmLibcBlockTest, CannotSplitBlockWithoutHeaderSpace) { constexpr size_t kN = 1024; - constexpr size_t kSplitN = kN - 2 * BlockType::BLOCK_OVERHEAD - 1; + constexpr size_t kSplitN = kN - 2 * Block::BLOCK_OVERHEAD - 1; - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; + Block *block = *result; result = block->split(kSplitN); ASSERT_FALSE(result.has_value()); } -TEST_FOR_EACH_BLOCK_TYPE(CannotMakeBlockLargerInSplit) { +TEST(LlvmLibcBlockTest, CannotMakeBlockLargerInSplit) { // Ensure that we can't ask for more space than the block actually has... constexpr size_t kN = 1024; - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; + Block *block = *result; result = block->split(block->inner_size() + 1); ASSERT_FALSE(result.has_value()); } -TEST_FOR_EACH_BLOCK_TYPE(CannotMakeSecondBlockLargerInSplit) { +TEST(LlvmLibcBlockTest, CannotMakeSecondBlockLargerInSplit) { // Ensure that the second block in split is at least of the size of header. constexpr size_t kN = 1024; - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; + Block *block = *result; - result = block->split(block->inner_size() - BlockType::BLOCK_OVERHEAD + 1); + result = block->split(block->inner_size() - Block::BLOCK_OVERHEAD + 1); ASSERT_FALSE(result.has_value()); } -TEST_FOR_EACH_BLOCK_TYPE(CanMakeMinimalSizeFirstBlock) { +TEST(LlvmLibcBlockTest, CanMakeMinimalSizeFirstBlock) { // This block does support splitting with minimal payload size. constexpr size_t kN = 1024; - constexpr size_t minimal_size = sizeof(typename BlockType::offset_type); + constexpr size_t minimal_size = sizeof(size_t); - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; + Block *block = *result; result = block->split(minimal_size); ASSERT_TRUE(result.has_value()); EXPECT_EQ(block->inner_size(), minimal_size); } -TEST_FOR_EACH_BLOCK_TYPE(CanMakeMinimalSizeSecondBlock) { +TEST(LlvmLibcBlockTest, CanMakeMinimalSizeSecondBlock) { // Likewise, the split block can be minimal-width. constexpr size_t kN = 1024; - constexpr size_t minimal_size = sizeof(typename BlockType::offset_type); + constexpr size_t minimal_size = sizeof(size_t); - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block1 = *result; + Block *block1 = *result; - result = block1->split(block1->inner_size() - BlockType::BLOCK_OVERHEAD); + result = block1->split(block1->inner_size() - Block::BLOCK_OVERHEAD); ASSERT_TRUE(result.has_value()); - BlockType *block2 = *result; + Block *block2 = *result; EXPECT_EQ(block2->inner_size(), minimal_size); } -TEST_FOR_EACH_BLOCK_TYPE(CanMarkBlockUsed) { +TEST(LlvmLibcBlockTest, CanMarkBlockUsed) { constexpr size_t kN = 1024; - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; + Block *block = *result; size_t orig_size = block->outer_size(); block->mark_used(); @@ -287,33 +251,33 @@ TEST_FOR_EACH_BLOCK_TYPE(CanMarkBlockUsed) { EXPECT_FALSE(block->used()); } -TEST_FOR_EACH_BLOCK_TYPE(CannotSplitUsedBlock) { +TEST(LlvmLibcBlockTest, CannotSplitUsedBlock) { constexpr size_t kN = 1024; constexpr size_t kSplitN = 512; - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; + Block *block = *result; block->mark_used(); result = block->split(kSplitN); ASSERT_FALSE(result.has_value()); } -TEST_FOR_EACH_BLOCK_TYPE(CanMergeWithNextBlock) { +TEST(LlvmLibcBlockTest, CanMergeWithNextBlock) { // Do the three way merge from "CanSplitMidBlock", and let's // merge block 3 and 2 constexpr size_t kN = 1024; // Give the split positions large alignments. - constexpr size_t prev_field_size = sizeof(typename BlockType::offset_type); + constexpr size_t prev_field_size = sizeof(size_t); constexpr size_t kSplit1 = 512 + prev_field_size; constexpr size_t kSplit2 = 256 + prev_field_size; - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block1 = *result; + Block *block1 = *result; size_t orig_size = block1->outer_size(); result = block1->split(kSplit1); @@ -321,7 +285,7 @@ TEST_FOR_EACH_BLOCK_TYPE(CanMergeWithNextBlock) { result = block1->split(kSplit2); ASSERT_TRUE(result.has_value()); - BlockType *block3 = *result; + Block *block3 = *result; EXPECT_TRUE(block3->merge_next()); @@ -331,31 +295,31 @@ TEST_FOR_EACH_BLOCK_TYPE(CanMergeWithNextBlock) { EXPECT_EQ(block3->outer_size(), orig_size - block1->outer_size()); } -TEST_FOR_EACH_BLOCK_TYPE(CannotMergeWithFirstOrLastBlock) { +TEST(LlvmLibcBlockTest, CannotMergeWithFirstOrLastBlock) { constexpr size_t kN = 1024; constexpr size_t kSplitN = 512; - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block1 = *result; + Block *block1 = *result; // Do a split, just to check that the checks on next/prev are different... result = block1->split(kSplitN); ASSERT_TRUE(result.has_value()); - BlockType *block2 = *result; + Block *block2 = *result; EXPECT_FALSE(block2->merge_next()); } -TEST_FOR_EACH_BLOCK_TYPE(CannotMergeUsedBlock) { +TEST(LlvmLibcBlockTest, CannotMergeUsedBlock) { constexpr size_t kN = 1024; constexpr size_t kSplitN = 512; - alignas(BlockType::ALIGNMENT) array bytes; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; + Block *block = *result; // Do a split, just to check that the checks on next/prev are different... result = block->split(kSplitN); @@ -365,177 +329,176 @@ TEST_FOR_EACH_BLOCK_TYPE(CannotMergeUsedBlock) { EXPECT_FALSE(block->merge_next()); } -TEST_FOR_EACH_BLOCK_TYPE(CanGetBlockFromUsableSpace) { +TEST(LlvmLibcBlockTest, CanGetBlockFromUsableSpace) { constexpr size_t kN = 1024; array bytes{}; - auto result = BlockType::init(bytes); + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block1 = *result; + Block *block1 = *result; void *ptr = block1->usable_space(); - BlockType *block2 = BlockType::from_usable_space(ptr); + Block *block2 = Block::from_usable_space(ptr); EXPECT_EQ(block1, block2); } -TEST_FOR_EACH_BLOCK_TYPE(CanGetConstBlockFromUsableSpace) { +TEST(LlvmLibcBlockTest, CanGetConstBlockFromUsableSpace) { constexpr size_t kN = 1024; array bytes{}; - auto result = BlockType::init(bytes); + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - const BlockType *block1 = *result; + const Block *block1 = *result; const void *ptr = block1->usable_space(); - const BlockType *block2 = BlockType::from_usable_space(ptr); + const Block *block2 = Block::from_usable_space(ptr); EXPECT_EQ(block1, block2); } -TEST_FOR_EACH_BLOCK_TYPE(CanAllocate) { - constexpr size_t kN = 1024 + BlockType::BLOCK_OVERHEAD; +TEST(LlvmLibcBlockTest, CanAllocate) { + constexpr size_t kN = 1024 + Block::BLOCK_OVERHEAD; // Ensure we can allocate everything up to the block size within this block. - for (size_t i = 0; i < kN - 2 * BlockType::BLOCK_OVERHEAD; ++i) { - alignas(BlockType::ALIGNMENT) array bytes{}; - auto result = BlockType::init(bytes); + for (size_t i = 0; i < kN - 2 * Block::BLOCK_OVERHEAD; ++i) { + alignas(Block::ALIGNMENT) array bytes{}; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; + Block *block = *result; constexpr size_t ALIGN = 1; // Effectively ignores alignment. EXPECT_TRUE(block->can_allocate(ALIGN, i)); // For each can_allocate, we should be able to do a successful call to // allocate. - auto info = BlockType::allocate(block, ALIGN, i); - EXPECT_NE(info.block, static_cast(nullptr)); + auto info = Block::allocate(block, ALIGN, i); + EXPECT_NE(info.block, static_cast(nullptr)); } - alignas(BlockType::ALIGNMENT) array bytes{}; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes{}; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; + Block *block = *result; // Given a block of size N (assuming it's also a power of two), we should be // able to allocate a block within it that's aligned to N/2. This is // because regardless of where the buffer is located, we can always find a // starting location within it that meets this alignment. EXPECT_TRUE(block->can_allocate(block->outer_size() / 2, 1)); - auto info = BlockType::allocate(block, block->outer_size() / 2, 1); - EXPECT_NE(info.block, static_cast(nullptr)); + auto info = Block::allocate(block, block->outer_size() / 2, 1); + EXPECT_NE(info.block, static_cast(nullptr)); } -TEST_FOR_EACH_BLOCK_TYPE(AllocateAlreadyAligned) { +TEST(LlvmLibcBlockTest, AllocateAlreadyAligned) { constexpr size_t kN = 1024; - alignas(BlockType::ALIGNMENT) array bytes{}; - auto result = BlockType::init(bytes); + alignas(Block::ALIGNMENT) array bytes{}; + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; + Block *block = *result; // This should result in no new blocks. - constexpr size_t kAlignment = BlockType::ALIGNMENT; - constexpr size_t prev_field_size = sizeof(typename BlockType::offset_type); - constexpr size_t kExpectedSize = BlockType::ALIGNMENT + prev_field_size; + constexpr size_t kAlignment = Block::ALIGNMENT; + constexpr size_t prev_field_size = sizeof(size_t); + constexpr size_t kExpectedSize = Block::ALIGNMENT + prev_field_size; EXPECT_TRUE(block->can_allocate(kAlignment, kExpectedSize)); auto [aligned_block, prev, next] = - BlockType::allocate(block, BlockType::ALIGNMENT, kExpectedSize); + Block::allocate(block, Block::ALIGNMENT, kExpectedSize); // Since this is already aligned, there should be no previous block. - EXPECT_EQ(prev, static_cast(nullptr)); + EXPECT_EQ(prev, static_cast(nullptr)); // Ensure we the block is aligned and the size we expect. - EXPECT_NE(aligned_block, static_cast(nullptr)); - EXPECT_TRUE(aligned_block->is_usable_space_aligned(BlockType::ALIGNMENT)); + EXPECT_NE(aligned_block, static_cast(nullptr)); + EXPECT_TRUE(aligned_block->is_usable_space_aligned(Block::ALIGNMENT)); EXPECT_EQ(aligned_block->inner_size(), kExpectedSize); // Check the next block. - EXPECT_NE(next, static_cast(nullptr)); + EXPECT_NE(next, static_cast(nullptr)); EXPECT_EQ(aligned_block->next(), next); EXPECT_EQ(reinterpret_cast(next) + next->outer_size(), - bytes.data() + bytes.size() - BlockType::BLOCK_OVERHEAD); + bytes.data() + bytes.size() - Block::BLOCK_OVERHEAD); } -TEST_FOR_EACH_BLOCK_TYPE(AllocateNeedsAlignment) { +TEST(LlvmLibcBlockTest, AllocateNeedsAlignment) { constexpr size_t kN = 1024; alignas(kN) array bytes{}; - auto result = BlockType::init(bytes); + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; + Block *block = *result; // Ensure first the usable_data is only aligned to the block alignment. - ASSERT_EQ(block->usable_space(), bytes.data() + BlockType::BLOCK_OVERHEAD); - ASSERT_EQ(block->prev_free(), static_cast(nullptr)); + ASSERT_EQ(block->usable_space(), bytes.data() + Block::BLOCK_OVERHEAD); + ASSERT_EQ(block->prev_free(), static_cast(nullptr)); // Now pick an alignment such that the usable space is not already aligned to // it. We want to explicitly test that the block will split into one before // it. - constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8; + constexpr size_t kAlignment = bit_ceil(Block::BLOCK_OVERHEAD) * 8; ASSERT_FALSE(block->is_usable_space_aligned(kAlignment)); constexpr size_t kSize = 10; EXPECT_TRUE(block->can_allocate(kAlignment, kSize)); - auto [aligned_block, prev, next] = - BlockType::allocate(block, kAlignment, kSize); + auto [aligned_block, prev, next] = Block::allocate(block, kAlignment, kSize); // Check the previous block was created appropriately. Since this block is the // first block, a new one should be made before this. - EXPECT_NE(prev, static_cast(nullptr)); + EXPECT_NE(prev, static_cast(nullptr)); EXPECT_EQ(aligned_block->prev_free(), prev); EXPECT_EQ(prev->next(), aligned_block); EXPECT_EQ(prev->outer_size(), reinterpret_cast(aligned_block) - reinterpret_cast(prev)); // Ensure we the block is aligned and the size we expect. - EXPECT_NE(next, static_cast(nullptr)); + EXPECT_NE(next, static_cast(nullptr)); EXPECT_TRUE(aligned_block->is_usable_space_aligned(kAlignment)); // Check the next block. - EXPECT_NE(next, static_cast(nullptr)); + EXPECT_NE(next, static_cast(nullptr)); EXPECT_EQ(aligned_block->next(), next); EXPECT_EQ(reinterpret_cast(next) + next->outer_size(), - bytes.data() + bytes.size() - BlockType::BLOCK_OVERHEAD); + bytes.data() + bytes.size() - Block::BLOCK_OVERHEAD); } -TEST_FOR_EACH_BLOCK_TYPE(PreviousBlockMergedIfNotFirst) { +TEST(LlvmLibcBlockTest, PreviousBlockMergedIfNotFirst) { constexpr size_t kN = 1024; alignas(kN) array bytes{}; - auto result = BlockType::init(bytes); + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; + Block *block = *result; // Split the block roughly halfway and work on the second half. auto result2 = block->split(kN / 2); ASSERT_TRUE(result2.has_value()); - BlockType *newblock = *result2; + Block *newblock = *result2; ASSERT_EQ(newblock->prev_free(), block); size_t old_prev_size = block->outer_size(); // Now pick an alignment such that the usable space is not already aligned to // it. We want to explicitly test that the block will split into one before // it. - constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8; + constexpr size_t kAlignment = bit_ceil(Block::BLOCK_OVERHEAD) * 8; ASSERT_FALSE(newblock->is_usable_space_aligned(kAlignment)); // Ensure we can allocate in the new block. - constexpr size_t kSize = BlockType::ALIGNMENT; + constexpr size_t kSize = Block::ALIGNMENT; EXPECT_TRUE(newblock->can_allocate(kAlignment, kSize)); auto [aligned_block, prev, next] = - BlockType::allocate(newblock, kAlignment, kSize); + Block::allocate(newblock, kAlignment, kSize); // Now there should be no new previous block. Instead, the padding we did // create should be merged into the original previous block. - EXPECT_EQ(prev, static_cast(nullptr)); + EXPECT_EQ(prev, static_cast(nullptr)); EXPECT_EQ(aligned_block->prev_free(), block); EXPECT_EQ(block->next(), aligned_block); EXPECT_GT(block->outer_size(), old_prev_size); } -TEST_FOR_EACH_BLOCK_TYPE(CanRemergeBlockAllocations) { +TEST(LlvmLibcBlockTest, CanRemergeBlockAllocations) { // Finally to ensure we made the split blocks correctly via allocate. We // should be able to reconstruct the original block from the blocklets. // @@ -543,31 +506,30 @@ TEST_FOR_EACH_BLOCK_TYPE(CanRemergeBlockAllocations) { constexpr size_t kN = 1024; alignas(kN) array bytes{}; - auto result = BlockType::init(bytes); + auto result = Block::init(bytes); ASSERT_TRUE(result.has_value()); - BlockType *block = *result; - BlockType *last = block->next(); + Block *block = *result; + Block *last = block->next(); // Ensure first the usable_data is only aligned to the block alignment. - ASSERT_EQ(block->usable_space(), bytes.data() + BlockType::BLOCK_OVERHEAD); - ASSERT_EQ(block->prev_free(), static_cast(nullptr)); + ASSERT_EQ(block->usable_space(), bytes.data() + Block::BLOCK_OVERHEAD); + ASSERT_EQ(block->prev_free(), static_cast(nullptr)); // Now pick an alignment such that the usable space is not already aligned to // it. We want to explicitly test that the block will split into one before // it. - constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8; + constexpr size_t kAlignment = bit_ceil(Block::BLOCK_OVERHEAD) * 8; ASSERT_FALSE(block->is_usable_space_aligned(kAlignment)); - constexpr size_t kSize = BlockType::ALIGNMENT; + constexpr size_t kSize = Block::ALIGNMENT; EXPECT_TRUE(block->can_allocate(kAlignment, kSize)); - auto [aligned_block, prev, next] = - BlockType::allocate(block, kAlignment, kSize); + auto [aligned_block, prev, next] = Block::allocate(block, kAlignment, kSize); // Check we have the appropriate blocks. - ASSERT_NE(prev, static_cast(nullptr)); + ASSERT_NE(prev, static_cast(nullptr)); ASSERT_EQ(aligned_block->prev_free(), prev); - EXPECT_NE(next, static_cast(nullptr)); + EXPECT_NE(next, static_cast(nullptr)); EXPECT_EQ(aligned_block->next(), next); EXPECT_EQ(next->next(), last); @@ -579,7 +541,7 @@ TEST_FOR_EACH_BLOCK_TYPE(CanRemergeBlockAllocations) { // We should have the original buffer. EXPECT_EQ(reinterpret_cast(prev), &*bytes.begin()); - EXPECT_EQ(prev->outer_size(), bytes.size() - BlockType::BLOCK_OVERHEAD); + EXPECT_EQ(prev->outer_size(), bytes.size() - Block::BLOCK_OVERHEAD); EXPECT_EQ(reinterpret_cast(prev) + prev->outer_size(), - &*bytes.end() - BlockType::BLOCK_OVERHEAD); + &*bytes.end() - Block::BLOCK_OVERHEAD); } diff --git a/libc/test/src/__support/freelist_heap_test.cpp b/libc/test/src/__support/freelist_heap_test.cpp index 59ebf4e50974b..991c158825a88 100644 --- a/libc/test/src/__support/freelist_heap_test.cpp +++ b/libc/test/src/__support/freelist_heap_test.cpp @@ -42,7 +42,7 @@ using LIBC_NAMESPACE::cpp::span; void RunTest(FreeListHeap &allocator, [[maybe_unused]] size_t N); \ }; \ TEST_F(LlvmLibcFreeListHeapTest##TestCase, TestCase) { \ - alignas(Block<>) byte buf[BufferSize] = {byte(0)}; \ + alignas(Block) byte buf[BufferSize] = {byte(0)}; \ FreeListHeap allocator(buf); \ RunTest(allocator, BufferSize); \ RunTest(*freelist_heap, freelist_heap->region().size()); \ @@ -95,13 +95,13 @@ TEST_FOR_EACH_ALLOCATOR(ReturnsNullWhenAllocationTooLarge, 2048) { // is used for other test cases and we don't explicitly free them. TEST(LlvmLibcFreeListHeap, ReturnsNullWhenFull) { constexpr size_t N = 2048; - alignas(Block<>) byte buf[N] = {byte(0)}; + alignas(Block) byte buf[N] = {byte(0)}; FreeListHeap allocator(buf); // Use aligned_allocate so we don't need to worry about ensuring the `buf` // being aligned to max_align_t. - EXPECT_NE(allocator.aligned_allocate(1, N - 2 * Block<>::BLOCK_OVERHEAD), + EXPECT_NE(allocator.aligned_allocate(1, N - 2 * Block::BLOCK_OVERHEAD), static_cast(nullptr)); EXPECT_EQ(allocator.allocate(1), static_cast(nullptr)); } @@ -241,16 +241,16 @@ TEST_FOR_EACH_ALLOCATOR(AlignedAlloc, 2048) { // This test is not part of the TEST_FOR_EACH_ALLOCATOR since we want to // explicitly ensure that the buffer can still return aligned allocations even -// if the underlying buffer is at most aligned to the Block<> alignment. This +// if the underlying buffer is at most aligned to the Block alignment. This // is so we can check that we can still get aligned allocations even if the // underlying buffer is not aligned to the alignments we request. TEST(LlvmLibcFreeListHeap, AlignedAllocOnlyBlockAligned) { constexpr size_t BUFFER_SIZE = 4096; - constexpr size_t BUFFER_ALIGNMENT = alignof(Block<>) * 2; + constexpr size_t BUFFER_ALIGNMENT = alignof(Block) * 2; alignas(BUFFER_ALIGNMENT) byte buf[BUFFER_SIZE] = {byte(0)}; // Ensure the underlying buffer is at most aligned to the block type. - FreeListHeap allocator(span(buf).subspan(alignof(Block<>))); + FreeListHeap allocator(span(buf).subspan(alignof(Block))); constexpr size_t ALIGNMENTS[] = {1, 2, 4, 8, 16, 32, 64, 128, 256}; constexpr size_t SIZE_SCALES[] = {1, 2, 3, 4, 5}; diff --git a/libc/test/src/__support/freelist_malloc_test.cpp b/libc/test/src/__support/freelist_malloc_test.cpp index 583e40d947822..793e2498304fb 100644 --- a/libc/test/src/__support/freelist_malloc_test.cpp +++ b/libc/test/src/__support/freelist_malloc_test.cpp @@ -24,12 +24,12 @@ TEST(LlvmLibcFreeListMalloc, Malloc) { constexpr size_t kCallocSize = 64; void *ptr1 = LIBC_NAMESPACE::malloc(kAllocSize); - auto *block = Block<>::from_usable_space(ptr1); + auto *block = Block::from_usable_space(ptr1); EXPECT_GE(block->inner_size(), kAllocSize); LIBC_NAMESPACE::free(ptr1); - ASSERT_NE(block->next(), static_cast *>(nullptr)); - ASSERT_EQ(block->next()->next(), static_cast *>(nullptr)); + ASSERT_NE(block->next(), static_cast(nullptr)); + ASSERT_EQ(block->next()->next(), static_cast(nullptr)); size_t heap_size = block->inner_size(); void *ptr2 = LIBC_NAMESPACE::calloc(kCallocNum, kCallocSize); @@ -46,7 +46,7 @@ TEST(LlvmLibcFreeListMalloc, Malloc) { void *ptr3 = LIBC_NAMESPACE::aligned_alloc(ALIGN, kAllocSize); EXPECT_NE(ptr3, static_cast(nullptr)); EXPECT_EQ(reinterpret_cast(ptr3) % ALIGN, size_t(0)); - auto *aligned_block = reinterpret_cast *>(ptr3); + auto *aligned_block = reinterpret_cast(ptr3); EXPECT_GE(aligned_block->inner_size(), kAllocSize); LIBC_NAMESPACE::free(ptr3); diff --git a/libc/test/src/__support/freelist_test.cpp b/libc/test/src/__support/freelist_test.cpp index 1f310761bee18..bd5ecec45d921 100644 --- a/libc/test/src/__support/freelist_test.cpp +++ b/libc/test/src/__support/freelist_test.cpp @@ -18,13 +18,13 @@ using LIBC_NAMESPACE::cpp::optional; TEST(LlvmLibcFreeList, FreeList) { byte mem[1024]; - optional *> maybeBlock = Block<>::init(mem); + optional maybeBlock = Block::init(mem); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *block1 = *maybeBlock; + Block *block1 = *maybeBlock; maybeBlock = block1->split(128); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *block2 = *maybeBlock; + Block *block2 = *maybeBlock; maybeBlock = block2->split(128); ASSERT_TRUE(maybeBlock.has_value()); diff --git a/libc/test/src/__support/freestore_test.cpp b/libc/test/src/__support/freestore_test.cpp index 84f2acfab878a..7960d32c8bbf0 100644 --- a/libc/test/src/__support/freestore_test.cpp +++ b/libc/test/src/__support/freestore_test.cpp @@ -21,12 +21,12 @@ using LIBC_NAMESPACE::cpp::optional; // Inserting or removing blocks too small to be tracked does nothing. TEST(LlvmLibcFreeStore, TooSmall) { byte mem[1024]; - optional *> maybeBlock = Block<>::init(mem); + optional maybeBlock = Block::init(mem); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *too_small = *maybeBlock; - maybeBlock = too_small->split(sizeof(Block<>::offset_type)); + Block *too_small = *maybeBlock; + maybeBlock = too_small->split(sizeof(size_t)); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *remainder = *maybeBlock; + Block *remainder = *maybeBlock; FreeStore store; store.set_range({0, 4096}); @@ -39,24 +39,22 @@ TEST(LlvmLibcFreeStore, TooSmall) { TEST(LlvmLibcFreeStore, RemoveBestFit) { byte mem[1024]; - optional *> maybeBlock = Block<>::init(mem); + optional maybeBlock = Block::init(mem); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *smallest = *maybeBlock; - maybeBlock = - smallest->split(sizeof(FreeList::Node) + sizeof(Block<>::offset_type)); + Block *smallest = *maybeBlock; + maybeBlock = smallest->split(sizeof(FreeList::Node) + sizeof(size_t)); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *largest_small = *maybeBlock; - maybeBlock = - largest_small->split(sizeof(FreeTrie::Node) + - sizeof(Block<>::offset_type) - alignof(max_align_t)); + Block *largest_small = *maybeBlock; + maybeBlock = largest_small->split(sizeof(FreeTrie::Node) + sizeof(size_t) - + alignof(max_align_t)); ASSERT_TRUE(maybeBlock.has_value()); if (largest_small->inner_size() == smallest->inner_size()) largest_small = smallest; ASSERT_GE(largest_small->inner_size(), smallest->inner_size()); - Block<> *remainder = *maybeBlock; + Block *remainder = *maybeBlock; FreeStore store; store.set_range({0, 4096}); @@ -74,8 +72,7 @@ TEST(LlvmLibcFreeStore, RemoveBestFit) { store.insert(largest_small); // Search small list for best fit. - Block<> *next_smallest = - largest_small == smallest ? remainder : largest_small; + Block *next_smallest = largest_small == smallest ? remainder : largest_small; ASSERT_EQ(store.remove_best_fit(smallest->inner_size() + 1), next_smallest); store.insert(next_smallest); @@ -85,15 +82,14 @@ TEST(LlvmLibcFreeStore, RemoveBestFit) { TEST(LlvmLibcFreeStore, Remove) { byte mem[1024]; - optional *> maybeBlock = Block<>::init(mem); + optional maybeBlock = Block::init(mem); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *small = *maybeBlock; - maybeBlock = - small->split(sizeof(FreeList::Node) + sizeof(Block<>::offset_type)); + Block *small = *maybeBlock; + maybeBlock = small->split(sizeof(FreeList::Node) + sizeof(size_t)); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *remainder = *maybeBlock; + Block *remainder = *maybeBlock; FreeStore store; store.set_range({0, 4096}); @@ -102,8 +98,8 @@ TEST(LlvmLibcFreeStore, Remove) { store.remove(remainder); ASSERT_EQ(store.remove_best_fit(remainder->inner_size()), - static_cast *>(nullptr)); + static_cast(nullptr)); store.remove(small); ASSERT_EQ(store.remove_best_fit(small->inner_size()), - static_cast *>(nullptr)); + static_cast(nullptr)); } diff --git a/libc/test/src/__support/freetrie_test.cpp b/libc/test/src/__support/freetrie_test.cpp index 1e3caceb7293b..5663a01687294 100644 --- a/libc/test/src/__support/freetrie_test.cpp +++ b/libc/test/src/__support/freetrie_test.cpp @@ -21,9 +21,9 @@ TEST(LlvmLibcFreeTrie, FindBestFitRoot) { EXPECT_EQ(trie.find_best_fit(123), static_cast(nullptr)); byte mem[1024]; - optional *> maybeBlock = Block<>::init(mem); + optional maybeBlock = Block::init(mem); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *block = *maybeBlock; + Block *block = *maybeBlock; trie.push(block); FreeTrie::Node *root = trie.find_best_fit(0); @@ -37,12 +37,12 @@ TEST(LlvmLibcFreeTrie, FindBestFitRoot) { TEST(LlvmLibcFreeTrie, FindBestFitLower) { byte mem[4096]; - optional *> maybeBlock = Block<>::init(mem); + optional maybeBlock = Block::init(mem); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *lower = *maybeBlock; + Block *lower = *maybeBlock; maybeBlock = lower->split(512); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *root = *maybeBlock; + Block *root = *maybeBlock; FreeTrie trie({0, 4096}); trie.push(root); @@ -53,12 +53,12 @@ TEST(LlvmLibcFreeTrie, FindBestFitLower) { TEST(LlvmLibcFreeTrie, FindBestFitUpper) { byte mem[4096]; - optional *> maybeBlock = Block<>::init(mem); + optional maybeBlock = Block::init(mem); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *root = *maybeBlock; + Block *root = *maybeBlock; maybeBlock = root->split(512); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *upper = *maybeBlock; + Block *upper = *maybeBlock; FreeTrie trie({0, 4096}); trie.push(root); @@ -71,15 +71,15 @@ TEST(LlvmLibcFreeTrie, FindBestFitUpper) { TEST(LlvmLibcFreeTrie, FindBestFitLowerAndUpper) { byte mem[4096]; - optional *> maybeBlock = Block<>::init(mem); + optional maybeBlock = Block::init(mem); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *root = *maybeBlock; + Block *root = *maybeBlock; maybeBlock = root->split(1024); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *lower = *maybeBlock; + Block *lower = *maybeBlock; maybeBlock = lower->split(128); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *upper = *maybeBlock; + Block *upper = *maybeBlock; FreeTrie trie({0, 4096}); trie.push(root); @@ -95,16 +95,16 @@ TEST(LlvmLibcFreeTrie, FindBestFitLowerAndUpper) { TEST(LlvmLibcFreeTrie, Remove) { byte mem[4096]; - optional *> maybeBlock = Block<>::init(mem); + optional maybeBlock = Block::init(mem); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *small1 = *maybeBlock; + Block *small1 = *maybeBlock; maybeBlock = small1->split(512); ASSERT_TRUE(maybeBlock.has_value()); - Block<> *small2 = *maybeBlock; + Block *small2 = *maybeBlock; maybeBlock = small2->split(512); ASSERT_TRUE(maybeBlock.has_value()); ASSERT_EQ(small1->inner_size(), small2->inner_size()); - Block<> *large = *maybeBlock; + Block *large = *maybeBlock; // Removing the root empties the trie. FreeTrie trie({0, 4096});