diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def index ce37b1cfaedcc..43893e9af3cf3 100644 --- a/compiler-rt/lib/scudo/standalone/allocator_config.def +++ b/compiler-rt/lib/scudo/standalone/allocator_config.def @@ -31,6 +31,9 @@ #ifndef SECONDARY_REQUIRED_TEMPLATE_TYPE #define SECONDARY_REQUIRED_TEMPLATE_TYPE(...) #endif +#ifndef SECONDARY_OPTIONAL +#define SECONDARY_OPTIONAL(...) +#endif #ifndef SECONDARY_CACHE_OPTIONAL #define SECONDARY_CACHE_OPTIONAL(...) #endif @@ -109,6 +112,11 @@ PRIMARY_OPTIONAL_TYPE(CompactPtrT, uptr) // Defines the type of Secondary Cache to use. SECONDARY_REQUIRED_TEMPLATE_TYPE(CacheT) +// SECONDARY_OPTIONAL(TYPE, NAME, DEFAULT) +// +// Add one guard page at the front and back for each allocation. +SECONDARY_OPTIONAL(const bool, EnableGuardPages, true) + // SECONDARY_CACHE_OPTIONAL(TYPE, NAME, DEFAULT) // // Defines the type of cache used by the Secondary. Some additional @@ -122,6 +130,7 @@ SECONDARY_CACHE_OPTIONAL(const s32, MaxReleaseToOsIntervalMs, INT32_MAX) SECONDARY_CACHE_OPTIONAL(const s32, DefaultReleaseToOsIntervalMs, INT32_MIN) #undef SECONDARY_CACHE_OPTIONAL +#undef SECONDARY_OPTIONAL #undef SECONDARY_REQUIRED_TEMPLATE_TYPE #undef PRIMARY_OPTIONAL_TYPE #undef PRIMARY_OPTIONAL diff --git a/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h b/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h index ea12a5b1447f6..ac639ee9dd943 100644 --- a/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h +++ b/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h @@ -95,6 +95,13 @@ template struct SecondaryConfig { #define SECONDARY_REQUIRED_TEMPLATE_TYPE(NAME) \ template \ using NAME = typename AllocatorConfig::Secondary::template NAME; + +#define SECONDARY_OPTIONAL(TYPE, NAME, DEFAULT) \ + OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, NAME) \ + static constexpr removeConst::type get##NAME() { \ + return NAME##State::getValue(); \ + } + #include "allocator_config.def" struct CacheConfig { diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h index 25b82356ca20f..f3f91c40e7a03 100644 --- a/compiler-rt/lib/scudo/standalone/secondary.h +++ b/compiler-rt/lib/scudo/standalone/secondary.h @@ -614,6 +614,12 @@ template class MapAllocator { return getBlockEnd(Ptr) - reinterpret_cast(Ptr); } + static uptr getGuardPageSize() { + if (Config::getEnableGuardPages()) + return getPageSizeCached(); + return 0U; + } + static constexpr uptr getHeadersSize() { return Chunk::getHeaderSize() + LargeBlock::getHeaderSize(); } @@ -763,11 +769,11 @@ void *MapAllocator::allocate(const Options &Options, uptr Size, uptr RoundedSize = roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize); - if (Alignment > PageSize) + if (UNLIKELY(Alignment > PageSize)) RoundedSize += Alignment - PageSize; ReservedMemoryT ReservedMemory; - const uptr MapSize = RoundedSize + 2 * PageSize; + const uptr MapSize = RoundedSize + 2 * getGuardPageSize(); if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr, MAP_ALLOWNOMEM))) { return nullptr; @@ -777,7 +783,7 @@ void *MapAllocator::allocate(const Options &Options, uptr Size, MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(), ReservedMemory.getCapacity()); uptr MapBase = MemMap.getBase(); - uptr CommitBase = MapBase + PageSize; + uptr CommitBase = MapBase + getGuardPageSize(); uptr MapEnd = MapBase + MapSize; // In the unlikely event of alignments larger than a page, adjust the amount @@ -786,25 +792,30 @@ void *MapAllocator::allocate(const Options &Options, uptr Size, // For alignments greater than or equal to a page, the user pointer (eg: // the pointer that is returned by the C or C++ allocation APIs) ends up // on a page boundary , and our headers will live in the preceding page. - CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize; - const uptr NewMapBase = CommitBase - PageSize; - DCHECK_GE(NewMapBase, MapBase); + CommitBase = + roundUp(MapBase + getGuardPageSize() + 1, Alignment) - PageSize; // We only trim the extra memory on 32-bit platforms: 64-bit platforms // are less constrained memory wise, and that saves us two syscalls. - if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) { - MemMap.unmap(MapBase, NewMapBase - MapBase); - MapBase = NewMapBase; - } - const uptr NewMapEnd = - CommitBase + PageSize + roundUp(Size, PageSize) + PageSize; - DCHECK_LE(NewMapEnd, MapEnd); - if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) { - MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd); - MapEnd = NewMapEnd; + if (SCUDO_WORDSIZE == 32U) { + const uptr NewMapBase = CommitBase - getGuardPageSize(); + DCHECK_GE(NewMapBase, MapBase); + if (NewMapBase != MapBase) { + MemMap.unmap(MapBase, NewMapBase - MapBase); + MapBase = NewMapBase; + } + // CommitBase is past the first guard page, but this computation needs + // to include a page where the header lives. + const uptr NewMapEnd = + CommitBase + PageSize + roundUp(Size, PageSize) + getGuardPageSize(); + DCHECK_LE(NewMapEnd, MapEnd); + if (NewMapEnd != MapEnd) { + MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd); + MapEnd = NewMapEnd; + } } } - const uptr CommitSize = MapEnd - PageSize - CommitBase; + const uptr CommitSize = MapEnd - getGuardPageSize() - CommitBase; const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment); if (!mapSecondary(Options, CommitBase, CommitSize, AllocPos, 0, MemMap)) { @@ -812,6 +823,8 @@ void *MapAllocator::allocate(const Options &Options, uptr Size, return nullptr; } const uptr HeaderPos = AllocPos - getHeadersSize(); + // Make sure that the header is not in the guard page or before the base. + DCHECK_GE(HeaderPos, MapBase + getGuardPageSize()); LargeBlock::Header *H = reinterpret_cast( LargeBlock::addHeaderTag(HeaderPos)); if (useMemoryTagging(Options)) diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp index ff98eb3397ee0..9d665ef88c402 100644 --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -54,16 +54,18 @@ void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size, scudo::uptr Alignment) { const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG; Size = scudo::roundUp(Size, MinAlignment); - if (Allocator->useMemoryTaggingTestOnly()) + if (Allocator->useMemoryTaggingTestOnly()) { EXPECT_DEATH( { disableDebuggerdMaybe(); reinterpret_cast(P)[-1] = 'A'; }, ""); + } if (isPrimaryAllocation(Size, Alignment) ? Allocator->useMemoryTaggingTestOnly() - : Alignment == MinAlignment) { + : Alignment == MinAlignment && + AllocatorT::SecondaryT::getGuardPageSize() > 0) { EXPECT_DEATH( { disableDebuggerdMaybe(); diff --git a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp index 518c1f2f0a6e6..d8a7f6bd66ed2 100644 --- a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp @@ -80,6 +80,19 @@ struct TestNoCacheConfig { }; }; +struct TestNoCacheNoGuardPageConfig { + static const bool MaySupportMemoryTagging = false; + template using TSDRegistryT = void; + template using PrimaryT = void; + template using SecondaryT = scudo::MapAllocator; + + struct Secondary { + template + using CacheT = scudo::MapAllocatorNoCache; + static const bool EnableGuardPages = false; + }; +}; + struct TestCacheConfig { static const bool MaySupportMemoryTagging = false; template using TSDRegistryT = void; @@ -100,6 +113,27 @@ struct TestCacheConfig { }; }; +struct TestCacheNoGuardPageConfig { + static const bool MaySupportMemoryTagging = false; + template using TSDRegistryT = void; + template using PrimaryT = void; + template using SecondaryT = void; + + struct Secondary { + struct Cache { + static const scudo::u32 EntriesArraySize = 128U; + static const scudo::u32 QuarantineSize = 0U; + static const scudo::u32 DefaultMaxEntriesCount = 64U; + static const scudo::uptr DefaultMaxEntrySize = 1UL << 20; + static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN; + static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX; + }; + + template using CacheT = scudo::MapAllocatorCache; + static const bool EnableGuardPages = false; + }; +}; + template static void testBasic() { using SecondaryT = scudo::MapAllocator>; AllocatorInfoType Info; @@ -146,15 +180,17 @@ template static void testBasic() { TEST(ScudoSecondaryTest, Basic) { testBasic(); + testBasic(); testBasic(); + testBasic(); testBasic(); } // This exercises a variety of combinations of size and alignment for the // MapAllocator. The size computation done here mimic the ones done by the // combined allocator. -TEST(ScudoSecondaryTest, AllocatorCombinations) { - AllocatorInfoType Info; +template void testAllocatorCombinations() { + AllocatorInfoType Info; constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16); constexpr scudo::uptr HeaderSize = scudo::roundUp(8, MinAlign); @@ -180,8 +216,13 @@ TEST(ScudoSecondaryTest, AllocatorCombinations) { } } -TEST(ScudoSecondaryTest, AllocatorIterate) { - AllocatorInfoType Info; +TEST(ScudoSecondaryTest, AllocatorCombinations) { + testAllocatorCombinations(); + testAllocatorCombinations(); +} + +template void testAllocatorIterate() { + AllocatorInfoType Info; std::vector V; for (scudo::uptr I = 0; I < 32U; I++) @@ -201,8 +242,13 @@ TEST(ScudoSecondaryTest, AllocatorIterate) { } } -TEST(ScudoSecondaryTest, AllocatorWithReleaseThreadsRace) { - AllocatorInfoType Info(/*ReleaseToOsInterval=*/0); +TEST(ScudoSecondaryTest, AllocatorIterate) { + testAllocatorIterate(); + testAllocatorIterate(); +} + +template void testAllocatorWithReleaseThreadsRace() { + AllocatorInfoType Info(/*ReleaseToOsInterval=*/0); std::mutex Mutex; std::condition_variable Cv; @@ -243,6 +289,64 @@ TEST(ScudoSecondaryTest, AllocatorWithReleaseThreadsRace) { T.join(); } +TEST(ScudoSecondaryTest, AllocatorWithReleaseThreadsRace) { + testAllocatorWithReleaseThreadsRace(); + testAllocatorWithReleaseThreadsRace(); +} + +template +void testGetMappedSize(scudo::uptr Size, scudo::uptr *mapped, + scudo::uptr *guard_page_size) { + AllocatorInfoType Info; + + scudo::uptr Stats[scudo::StatCount] = {}; + Info.GlobalStats.get(Stats); + *mapped = Stats[scudo::StatMapped]; + Stats[scudo::StatMapped] = 0; + + // Make sure the allocation is aligned to a page boundary so that the checks + // in the tests can avoid problems due to allocations having different + // alignments. + void *Ptr = Info.Allocator->allocate(Info.Options, Size, PageSize); + EXPECT_NE(Ptr, nullptr); + + Info.GlobalStats.get(Stats); + EXPECT_GE(Stats[scudo::StatMapped], *mapped); + *mapped = Stats[scudo::StatMapped] - *mapped; + + Info.Allocator->deallocate(Info.Options, Ptr); + + *guard_page_size = Info.Allocator->getGuardPageSize(); +} + +TEST(ScudoSecondaryTest, VerifyGuardPageOption) { + static scudo::uptr AllocSize = 1000 * PageSize; + + // Verify that a config with guard pages enabled: + // - Non-zero sized guard page + // - Mapped in at least the size of the allocation plus 2 * guard page size + scudo::uptr guard_mapped = 0; + scudo::uptr guard_page_size = 0; + testGetMappedSize(AllocSize, &guard_mapped, + &guard_page_size); + EXPECT_GT(guard_page_size, 0U); + EXPECT_GE(guard_mapped, AllocSize + 2 * guard_page_size); + + // Verify that a config with guard pages disabled: + // - Zero sized guard page + // - The total mapped in is greater than the allocation size + scudo::uptr no_guard_mapped = 0; + scudo::uptr no_guard_page_size = 0; + testGetMappedSize(AllocSize, &no_guard_mapped, + &no_guard_page_size); + EXPECT_EQ(no_guard_page_size, 0U); + EXPECT_GE(no_guard_mapped, AllocSize); + + // Verify that a guard page config mapped in at least twice the size of + // their guard page when compared to a no guard page config. + EXPECT_GE(guard_mapped, no_guard_mapped + guard_page_size * 2); +} + // Value written to cache entries that are unmapped. static scudo::u32 UnmappedMarker = 0xDEADBEEF;