diff --git a/compiler-rt/lib/scudo/standalone/tests/scudo_unit_test.h b/compiler-rt/lib/scudo/standalone/tests/scudo_unit_test.h index f8b658c937889..27c0e591a2099 100644 --- a/compiler-rt/lib/scudo/standalone/tests/scudo_unit_test.h +++ b/compiler-rt/lib/scudo/standalone/tests/scudo_unit_test.h @@ -12,6 +12,7 @@ #include using Test = ::zxtest::Test; #define TEST_SKIP(message) ZXTEST_SKIP(message) +#define TEST_HAS_FAILURE true #else #include "gtest/gtest.h" using Test = ::testing::Test; @@ -19,6 +20,7 @@ using Test = ::testing::Test; do { \ GTEST_SKIP() << message; \ } while (0) +#define TEST_HAS_FAILURE Test::HasFailure() #endif // If EXPECT_DEATH isn't defined, make it a no-op. diff --git a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp index e1471dfdf6807..518c1f2f0a6e6 100644 --- a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp @@ -13,15 +13,19 @@ #include "allocator_config_wrapper.h" #include "secondary.h" +#include + #include #include #include #include #include -#include #include #include +// Get this once to use through-out the tests. +const scudo::uptr PageSize = scudo::getPageSizeCached(); + template static scudo::Options getOptionsForConfig() { if (!Config::getMaySupportMemoryTagging() || !scudo::archSupportsMemoryTagging() || @@ -32,58 +36,37 @@ template static scudo::Options getOptionsForConfig() { return AO.load(); } -template static void testBasic(void) { - using SecondaryT = scudo::MapAllocator>; - scudo::Options Options = - getOptionsForConfig>(); +template struct AllocatorInfoType { + std::unique_ptr>> + Allocator; + scudo::GlobalStats GlobalStats; + scudo::Options Options; + + AllocatorInfoType(scudo::s32 ReleaseToOsInterval) { + using SecondaryT = scudo::MapAllocator>; + Options = getOptionsForConfig>(); + GlobalStats.init(); + Allocator.reset(new SecondaryT); + Allocator->init(&GlobalStats, ReleaseToOsInterval); + } - scudo::GlobalStats S; - S.init(); - std::unique_ptr L(new SecondaryT); - L->init(&S); - const scudo::uptr Size = 1U << 16; - void *P = L->allocate(Options, Size); - EXPECT_NE(P, nullptr); - memset(P, 'A', Size); - EXPECT_GE(SecondaryT::getBlockSize(P), Size); - L->deallocate(Options, P); + AllocatorInfoType() : AllocatorInfoType(-1) {} - // If the Secondary can't cache that pointer, it will be unmapped. - if (!L->canCache(Size)) { - EXPECT_DEATH( - { - // Repeat few time to avoid missing crash if it's mmaped by unrelated - // code. - for (int i = 0; i < 10; ++i) { - P = L->allocate(Options, Size); - L->deallocate(Options, P); - memset(P, 'A', Size); - } - }, - ""); - } + ~AllocatorInfoType() { + if (Allocator == nullptr) { + return; + } - const scudo::uptr Align = 1U << 16; - P = L->allocate(Options, Size + Align, Align); - EXPECT_NE(P, nullptr); - void *AlignedP = reinterpret_cast( - scudo::roundUp(reinterpret_cast(P), Align)); - memset(AlignedP, 'A', Size); - L->deallocate(Options, P); + if (TEST_HAS_FAILURE) { + // Print all of the stats if the test fails. + scudo::ScopedString Str; + Allocator->getStats(&Str); + Str.output(); + } - std::vector V; - for (scudo::uptr I = 0; I < 32U; I++) - V.push_back(L->allocate(Options, Size)); - std::shuffle(V.begin(), V.end(), std::mt19937(std::random_device()())); - while (!V.empty()) { - L->deallocate(Options, V.back()); - V.pop_back(); + Allocator->unmapTestOnly(); } - scudo::ScopedString Str; - L->getStats(&Str); - Str.output(); - L->unmapTestOnly(); -} +}; struct TestNoCacheConfig { static const bool MaySupportMemoryTagging = false; @@ -117,30 +100,62 @@ struct TestCacheConfig { }; }; +template static void testBasic() { + using SecondaryT = scudo::MapAllocator>; + AllocatorInfoType Info; + + const scudo::uptr Size = 1U << 16; + void *P = Info.Allocator->allocate(Info.Options, Size); + EXPECT_NE(P, nullptr); + memset(P, 'A', Size); + EXPECT_GE(SecondaryT::getBlockSize(P), Size); + Info.Allocator->deallocate(Info.Options, P); + + // If the Secondary can't cache that pointer, it will be unmapped. + if (!Info.Allocator->canCache(Size)) { + EXPECT_DEATH( + { + // Repeat few time to avoid missing crash if it's mmaped by unrelated + // code. + for (int i = 0; i < 10; ++i) { + P = Info.Allocator->allocate(Info.Options, Size); + Info.Allocator->deallocate(Info.Options, P); + memset(P, 'A', Size); + } + }, + ""); + } + + const scudo::uptr Align = 1U << 16; + P = Info.Allocator->allocate(Info.Options, Size + Align, Align); + EXPECT_NE(P, nullptr); + void *AlignedP = reinterpret_cast( + scudo::roundUp(reinterpret_cast(P), Align)); + memset(AlignedP, 'A', Size); + Info.Allocator->deallocate(Info.Options, P); + + std::vector V; + for (scudo::uptr I = 0; I < 32U; I++) + V.push_back(Info.Allocator->allocate(Info.Options, Size)); + std::shuffle(V.begin(), V.end(), std::mt19937(std::random_device()())); + while (!V.empty()) { + Info.Allocator->deallocate(Info.Options, V.back()); + V.pop_back(); + } +} + TEST(ScudoSecondaryTest, Basic) { testBasic(); testBasic(); testBasic(); } -struct ScudoSecondaryAllocatorTest : public Test { - using LargeAllocator = - scudo::MapAllocator>; - - void SetUp() override { Allocator->init(nullptr); } - - void TearDown() override { Allocator->unmapTestOnly(); } - - std::unique_ptr Allocator = - std::make_unique(); - scudo::Options Options = - getOptionsForConfig>(); -}; - // This exercises a variety of combinations of size and alignment for the // MapAllocator. The size computation done here mimic the ones done by the // combined allocator. -TEST_F(ScudoSecondaryAllocatorTest, Combinations) { +TEST(ScudoSecondaryTest, AllocatorCombinations) { + AllocatorInfoType Info; + constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16); constexpr scudo::uptr HeaderSize = scudo::roundUp(8, MinAlign); for (scudo::uptr SizeLog = 0; SizeLog <= 20; SizeLog++) { @@ -154,80 +169,71 @@ TEST_F(ScudoSecondaryAllocatorTest, Combinations) { static_cast((1LL << SizeLog) + Delta), MinAlign); const scudo::uptr Size = HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0); - void *P = Allocator->allocate(Options, Size, Align); + void *P = Info.Allocator->allocate(Info.Options, Size, Align); EXPECT_NE(P, nullptr); void *AlignedP = reinterpret_cast( scudo::roundUp(reinterpret_cast(P), Align)); memset(AlignedP, 0xff, UserSize); - Allocator->deallocate(Options, P); + Info.Allocator->deallocate(Info.Options, P); } } } - scudo::ScopedString Str; - Allocator->getStats(&Str); - Str.output(); } -TEST_F(ScudoSecondaryAllocatorTest, Iterate) { +TEST(ScudoSecondaryTest, AllocatorIterate) { + AllocatorInfoType Info; + std::vector V; - const scudo::uptr PageSize = scudo::getPageSizeCached(); for (scudo::uptr I = 0; I < 32U; I++) - V.push_back(Allocator->allocate( - Options, (static_cast(std::rand()) % 16U) * PageSize)); + V.push_back(Info.Allocator->allocate( + Info.Options, + (static_cast(std::rand()) % 16U) * PageSize)); auto Lambda = [&V](scudo::uptr Block) { EXPECT_NE(std::find(V.begin(), V.end(), reinterpret_cast(Block)), V.end()); }; - Allocator->disable(); - Allocator->iterateOverBlocks(Lambda); - Allocator->enable(); + Info.Allocator->disable(); + Info.Allocator->iterateOverBlocks(Lambda); + Info.Allocator->enable(); while (!V.empty()) { - Allocator->deallocate(Options, V.back()); + Info.Allocator->deallocate(Info.Options, V.back()); V.pop_back(); } - scudo::ScopedString Str; - Allocator->getStats(&Str); - Str.output(); } -struct ScudoSecondaryAllocatorWithReleaseTest - : public ScudoSecondaryAllocatorTest { - void SetUp() override { Allocator->init(nullptr, /*ReleaseToOsInterval=*/0); } - - void performAllocations() { - std::vector V; - const scudo::uptr PageSize = scudo::getPageSizeCached(); - { - std::unique_lock Lock(Mutex); - while (!Ready) - Cv.wait(Lock); - } - for (scudo::uptr I = 0; I < 128U; I++) { - // Deallocate 75% of the blocks. - const bool Deallocate = (std::rand() & 3) != 0; - void *P = Allocator->allocate( - Options, (static_cast(std::rand()) % 16U) * PageSize); - if (Deallocate) - Allocator->deallocate(Options, P); - else - V.push_back(P); - } - while (!V.empty()) { - Allocator->deallocate(Options, V.back()); - V.pop_back(); - } - } +TEST(ScudoSecondaryTest, AllocatorWithReleaseThreadsRace) { + AllocatorInfoType Info(/*ReleaseToOsInterval=*/0); std::mutex Mutex; std::condition_variable Cv; bool Ready = false; -}; -TEST_F(ScudoSecondaryAllocatorWithReleaseTest, ThreadsRace) { std::thread Threads[16]; for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++) - Threads[I] = std::thread( - &ScudoSecondaryAllocatorWithReleaseTest::performAllocations, this); + Threads[I] = std::thread([&Mutex, &Cv, &Ready, &Info]() { + std::vector V; + { + std::unique_lock Lock(Mutex); + while (!Ready) + Cv.wait(Lock); + } + for (scudo::uptr I = 0; I < 128U; I++) { + // Deallocate 75% of the blocks. + const bool Deallocate = (std::rand() & 3) != 0; + void *P = Info.Allocator->allocate( + Info.Options, + (static_cast(std::rand()) % 16U) * PageSize); + if (Deallocate) + Info.Allocator->deallocate(Info.Options, P); + else + V.push_back(P); + } + while (!V.empty()) { + Info.Allocator->deallocate(Info.Options, V.back()); + V.pop_back(); + } + }); + { std::unique_lock Lock(Mutex); Ready = true; @@ -235,36 +241,43 @@ TEST_F(ScudoSecondaryAllocatorWithReleaseTest, ThreadsRace) { } for (auto &T : Threads) T.join(); - scudo::ScopedString Str; - Allocator->getStats(&Str); - Str.output(); } -struct ScudoSecondaryAllocatorCacheTest : public Test { - static constexpr scudo::u32 UnmappedMarker = 0xDEADBEEF; +// Value written to cache entries that are unmapped. +static scudo::u32 UnmappedMarker = 0xDEADBEEF; - static void testUnmapCallback(scudo::MemMapT &MemMap) { +template struct CacheInfoType { + static void addMarkerToMapCallback(scudo::MemMapT &MemMap) { + // When a cache entry is unmaped, don't unmap it write a special marker + // to indicate the cache entry was released. The real unmap will happen + // in the destructor. It is assumed that all of these maps will be in + // the MemMaps vector. scudo::u32 *Ptr = reinterpret_cast(MemMap.getBase()); *Ptr = UnmappedMarker; } using SecondaryConfig = scudo::SecondaryConfig; using CacheConfig = SecondaryConfig::CacheConfig; - using CacheT = scudo::MapAllocatorCache; - + using CacheT = scudo::MapAllocatorCache; + scudo::Options Options = getOptionsForConfig(); std::unique_ptr Cache = std::make_unique(); - - const scudo::uptr PageSize = scudo::getPageSizeCached(); + std::vector MemMaps; // The current test allocation size is set to the maximum // cache entry size static constexpr scudo::uptr TestAllocSize = CacheConfig::getDefaultMaxEntrySize(); - scudo::Options Options = getOptionsForConfig(); + CacheInfoType() { Cache->init(/*ReleaseToOsInterval=*/-1); } - void SetUp() override { Cache->init(/*ReleaseToOsInterval=*/-1); } + ~CacheInfoType() { + if (Cache == nullptr) { + return; + } - void TearDown() override { Cache->unmapTestOnly(); } + // Clean up MemMaps + for (auto &MemMap : MemMaps) + MemMap.unmap(); + } scudo::MemMapT allocate(scudo::uptr Size) { scudo::uptr MapSize = scudo::roundUp(Size, PageSize); @@ -278,8 +291,7 @@ struct ScudoSecondaryAllocatorCacheTest : public Test { return MemMap; } - void fillCacheWithSameSizeBlocks(std::vector &MemMaps, - scudo::uptr NumEntries, scudo::uptr Size) { + void fillCacheWithSameSizeBlocks(scudo::uptr NumEntries, scudo::uptr Size) { for (scudo::uptr I = 0; I < NumEntries; I++) { MemMaps.emplace_back(allocate(Size)); auto &MemMap = MemMaps[I]; @@ -289,58 +301,60 @@ struct ScudoSecondaryAllocatorCacheTest : public Test { } }; -TEST_F(ScudoSecondaryAllocatorCacheTest, EntryOrder) { - std::vector MemMaps; - Cache->setOption(scudo::Option::MaxCacheEntriesCount, - CacheConfig::getEntriesArraySize()); +TEST(ScudoSecondaryTest, AllocatorCacheEntryOrder) { + CacheInfoType Info; + using CacheConfig = CacheInfoType::CacheConfig; + + Info.Cache->setOption(scudo::Option::MaxCacheEntriesCount, + CacheConfig::getEntriesArraySize()); - fillCacheWithSameSizeBlocks(MemMaps, CacheConfig::getEntriesArraySize(), - TestAllocSize); + Info.fillCacheWithSameSizeBlocks(CacheConfig::getEntriesArraySize(), + Info.TestAllocSize); // Retrieval order should be the inverse of insertion order for (scudo::uptr I = CacheConfig::getEntriesArraySize(); I > 0; I--) { scudo::uptr EntryHeaderPos; - scudo::CachedBlock Entry = - Cache->retrieve(0, TestAllocSize, PageSize, 0, EntryHeaderPos); - EXPECT_EQ(Entry.MemMap.getBase(), MemMaps[I - 1].getBase()); + scudo::CachedBlock Entry = Info.Cache->retrieve( + 0, Info.TestAllocSize, PageSize, 0, EntryHeaderPos); + EXPECT_EQ(Entry.MemMap.getBase(), Info.MemMaps[I - 1].getBase()); } - - // Clean up MemMaps - for (auto &MemMap : MemMaps) - MemMap.unmap(); } -TEST_F(ScudoSecondaryAllocatorCacheTest, PartialChunkHeuristicRetrievalTest) { +TEST(ScudoSecondaryTest, AllocatorCachePartialChunkHeuristicRetrievalTest) { + CacheInfoType Info; + const scudo::uptr FragmentedPages = 1 + scudo::CachedBlock::MaxReleasedCachePages; scudo::uptr EntryHeaderPos; scudo::CachedBlock Entry; - scudo::MemMapT MemMap = allocate(PageSize + FragmentedPages * PageSize); - Cache->store(Options, MemMap.getBase(), MemMap.getCapacity(), - MemMap.getBase(), MemMap); + scudo::MemMapT MemMap = Info.allocate(PageSize + FragmentedPages * PageSize); + Info.Cache->store(Info.Options, MemMap.getBase(), MemMap.getCapacity(), + MemMap.getBase(), MemMap); // FragmentedPages > MaxAllowedFragmentedPages so PageSize // cannot be retrieved from the cache - Entry = Cache->retrieve(/*MaxAllowedFragmentedPages=*/0, PageSize, PageSize, - 0, EntryHeaderPos); + Entry = Info.Cache->retrieve(/*MaxAllowedFragmentedPages=*/0, PageSize, + PageSize, 0, EntryHeaderPos); EXPECT_FALSE(Entry.isValid()); // FragmentedPages == MaxAllowedFragmentedPages so PageSize // can be retrieved from the cache - Entry = - Cache->retrieve(FragmentedPages, PageSize, PageSize, 0, EntryHeaderPos); + Entry = Info.Cache->retrieve(FragmentedPages, PageSize, PageSize, 0, + EntryHeaderPos); EXPECT_TRUE(Entry.isValid()); MemMap.unmap(); } -TEST_F(ScudoSecondaryAllocatorCacheTest, MemoryLeakTest) { - std::vector MemMaps; +TEST(ScudoSecondaryTest, AllocatorCacheMemoryLeakTest) { + CacheInfoType Info; + using CacheConfig = CacheInfoType::CacheConfig; + // Fill the cache above MaxEntriesCount to force an eviction // The first cache entry should be evicted (because it is the oldest) // due to the maximum number of entries being reached - fillCacheWithSameSizeBlocks( - MemMaps, CacheConfig::getDefaultMaxEntriesCount() + 1, TestAllocSize); + Info.fillCacheWithSameSizeBlocks(CacheConfig::getDefaultMaxEntriesCount() + 1, + Info.TestAllocSize); std::vector RetrievedEntries; @@ -348,37 +362,40 @@ TEST_F(ScudoSecondaryAllocatorCacheTest, MemoryLeakTest) { // inserted into the cache for (scudo::uptr I = CacheConfig::getDefaultMaxEntriesCount(); I > 0; I--) { scudo::uptr EntryHeaderPos; - RetrievedEntries.push_back( - Cache->retrieve(0, TestAllocSize, PageSize, 0, EntryHeaderPos)); - EXPECT_EQ(MemMaps[I].getBase(), RetrievedEntries.back().MemMap.getBase()); + RetrievedEntries.push_back(Info.Cache->retrieve( + 0, Info.TestAllocSize, PageSize, 0, EntryHeaderPos)); + EXPECT_EQ(Info.MemMaps[I].getBase(), + RetrievedEntries.back().MemMap.getBase()); } // Evicted entry should be marked due to unmap callback - EXPECT_EQ(*reinterpret_cast(MemMaps[0].getBase()), + EXPECT_EQ(*reinterpret_cast(Info.MemMaps[0].getBase()), UnmappedMarker); - - // Clean up MemMaps - for (auto &MemMap : MemMaps) - MemMap.unmap(); } -TEST_F(ScudoSecondaryAllocatorCacheTest, Options) { +TEST(ScudoSecondaryTest, AllocatorCacheOptions) { + CacheInfoType Info; + // Attempt to set a maximum number of entries higher than the array size. - EXPECT_TRUE(Cache->setOption(scudo::Option::MaxCacheEntriesCount, 4096U)); + EXPECT_TRUE( + Info.Cache->setOption(scudo::Option::MaxCacheEntriesCount, 4096U)); // Attempt to set an invalid (negative) number of entries - EXPECT_FALSE(Cache->setOption(scudo::Option::MaxCacheEntriesCount, -1)); + EXPECT_FALSE(Info.Cache->setOption(scudo::Option::MaxCacheEntriesCount, -1)); // Various valid combinations. - EXPECT_TRUE(Cache->setOption(scudo::Option::MaxCacheEntriesCount, 4U)); - EXPECT_TRUE(Cache->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20)); - EXPECT_TRUE(Cache->canCache(1UL << 18)); - EXPECT_TRUE(Cache->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 17)); - EXPECT_FALSE(Cache->canCache(1UL << 18)); - EXPECT_TRUE(Cache->canCache(1UL << 16)); - EXPECT_TRUE(Cache->setOption(scudo::Option::MaxCacheEntriesCount, 0U)); - EXPECT_FALSE(Cache->canCache(1UL << 16)); - EXPECT_TRUE(Cache->setOption(scudo::Option::MaxCacheEntriesCount, 4U)); - EXPECT_TRUE(Cache->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20)); - EXPECT_TRUE(Cache->canCache(1UL << 16)); + EXPECT_TRUE(Info.Cache->setOption(scudo::Option::MaxCacheEntriesCount, 4U)); + EXPECT_TRUE( + Info.Cache->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20)); + EXPECT_TRUE(Info.Cache->canCache(1UL << 18)); + EXPECT_TRUE( + Info.Cache->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 17)); + EXPECT_FALSE(Info.Cache->canCache(1UL << 18)); + EXPECT_TRUE(Info.Cache->canCache(1UL << 16)); + EXPECT_TRUE(Info.Cache->setOption(scudo::Option::MaxCacheEntriesCount, 0U)); + EXPECT_FALSE(Info.Cache->canCache(1UL << 16)); + EXPECT_TRUE(Info.Cache->setOption(scudo::Option::MaxCacheEntriesCount, 4U)); + EXPECT_TRUE( + Info.Cache->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20)); + EXPECT_TRUE(Info.Cache->canCache(1UL << 16)); }