diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def index 748530820cd64..0aea7b8f2fb9a 100644 --- a/compiler-rt/lib/scudo/standalone/allocator_config.def +++ b/compiler-rt/lib/scudo/standalone/allocator_config.def @@ -57,6 +57,10 @@ BASE_OPTIONAL(const bool, MaySupportMemoryTagging, false) // Disable the quarantine code. BASE_OPTIONAL(const bool, QuarantineDisabled, false) +// If set to true, malloc_usable_size returns the exact size of the allocation. +// If set to false, return the total available size in the allocation. +BASE_OPTIONAL(const bool, ExactUsableSize, true) + // PRIMARY_REQUIRED_TYPE(NAME) // // SizeClassMap to use with the Primary. diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h index c9ba28a52f780..6141f27f723cb 100644 --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -706,19 +706,24 @@ class Allocator { if (!getChunkFromBlock(Block, &Chunk, &Header) && !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header)) return; - } else { - if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header)) - return; - } - if (Header.State == Chunk::State::Allocated) { - uptr TaggedChunk = Chunk; - if (allocatorSupportsMemoryTagging()) - TaggedChunk = untagPointer(TaggedChunk); - if (useMemoryTagging(Primary.Options.load())) - TaggedChunk = loadTag(Chunk); - Callback(TaggedChunk, getSize(reinterpret_cast(Chunk), &Header), - Arg); - } + } else if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header)) + return; + + if (Header.State != Chunk::State::Allocated) + return; + + uptr TaggedChunk = Chunk; + if (allocatorSupportsMemoryTagging()) + TaggedChunk = untagPointer(TaggedChunk); + uptr Size; + if (UNLIKELY(useMemoryTagging(Primary.Options.load()))) { + TaggedChunk = loadTag(Chunk); + Size = getSize(reinterpret_cast(Chunk), &Header); + } else if (AllocatorConfig::getExactUsableSize()) + Size = getSize(reinterpret_cast(Chunk), &Header); + else + Size = getUsableSize(reinterpret_cast(Chunk), &Header); + Callback(TaggedChunk, Size, Arg); }; Primary.iterateOverBlocks(Lambda); Secondary.iterateOverBlocks(Lambda); @@ -759,16 +764,50 @@ class Allocator { return false; } - // Return the usable size for a given chunk. Technically we lie, as we just - // report the actual size of a chunk. This is done to counteract code actively - // writing past the end of a chunk (like sqlite3) when the usable size allows - // for it, which then forces realloc to copy the usable size of a chunk as - // opposed to its actual size. + ALWAYS_INLINE uptr getUsableSize(const void *Ptr, + Chunk::UnpackedHeader *Header) { + void *BlockBegin = getBlockBegin(Ptr, Header); + if (LIKELY(Header->ClassId)) { + return SizeClassMap::getSizeByClassId(Header->ClassId) - + (reinterpret_cast(Ptr) - reinterpret_cast(BlockBegin)); + } + + uptr UntaggedPtr = reinterpret_cast(Ptr); + if (allocatorSupportsMemoryTagging()) { + UntaggedPtr = untagPointer(UntaggedPtr); + BlockBegin = untagPointer(BlockBegin); + } + return SecondaryT::getBlockEnd(BlockBegin) - UntaggedPtr; + } + + // Return the usable size for a given chunk. If MTE is enabled or if the + // ExactUsableSize config parameter is true, we report the exact size of + // the original allocation size. Otherwise, we will return the total + // actual usable size. uptr getUsableSize(const void *Ptr) { if (UNLIKELY(!Ptr)) return 0; - return getAllocSize(Ptr); + if (AllocatorConfig::getExactUsableSize() || + UNLIKELY(useMemoryTagging(Primary.Options.load()))) + return getAllocSize(Ptr); + + initThreadMaybe(); + +#ifdef GWP_ASAN_HOOKS + if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) + return GuardedAlloc.getSize(Ptr); +#endif // GWP_ASAN_HOOKS + + Ptr = getHeaderTaggedPointer(const_cast(Ptr)); + Chunk::UnpackedHeader Header; + Chunk::loadHeader(Cookie, Ptr, &Header); + + // Getting the alloc size of a chunk only makes sense if it's allocated. + if (UNLIKELY(Header.State != Chunk::State::Allocated)) + reportInvalidChunkState(AllocatorAction::Sizing, Ptr); + + return getUsableSize(Ptr, &Header); } uptr getAllocSize(const void *Ptr) { diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp index 1eff9ebcb7a4f..ab466539dc314 100644 --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc; @@ -1161,3 +1162,238 @@ TEST(ScudoCombinedTest, QuarantineDisabled) { // No quarantine stats should not be present. EXPECT_EQ(Stats.find("Stats: Quarantine"), std::string::npos); } + +struct UsableSizeClassConfig { + static const scudo::uptr NumBits = 1; + static const scudo::uptr MinSizeLog = 10; + static const scudo::uptr MidSizeLog = 10; + static const scudo::uptr MaxSizeLog = 13; + static const scudo::u16 MaxNumCachedHint = 8; + static const scudo::uptr MaxBytesCachedLog = 12; + static const scudo::uptr SizeDelta = 0; +}; + +struct TestExactUsableSizeConfig { + static const bool MaySupportMemoryTagging = false; + static const bool QuarantineDisabled = true; + + template using TSDRegistryT = scudo::TSDRegistrySharedT; + + struct Primary { + // In order to properly test the usable size, this Primary config has + // four real size classes: 1024, 2048, 4096, 8192. + using SizeClassMap = scudo::FixedSizeClassMap; + static const scudo::uptr RegionSizeLog = 21U; + static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN; + static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX; + typedef scudo::uptr CompactPtrT; + static const scudo::uptr CompactPtrScale = 0; + static const bool EnableRandomOffset = true; + static const scudo::uptr MapSizeIncrement = 1UL << 18; + static const scudo::uptr GroupSizeLog = 18; + }; + template + using PrimaryT = scudo::SizeClassAllocator64; + + struct Secondary { + template + using CacheT = scudo::MapAllocatorNoCache; + }; + + template using SecondaryT = scudo::MapAllocator; +}; + +template void VerifyExactUsableSize(AllocatorT &Allocator) { + // Scan through all sizes up to 10000 then some larger sizes. + for (scudo::uptr Size = 1; Size < 10000; Size++) { + void *P = Allocator.allocate(Size, Origin); + EXPECT_EQ(Size, Allocator.getUsableSize(P)) + << "Failed usable size at allocation size " << Size; + Allocator.deallocate(P, Origin); + } + + // Verify that aligned allocations also return the exact size allocated. + const scudo::uptr AllocSize = 313; + for (scudo::uptr Align = 1; Align <= 8; Align++) { + void *P = Allocator.allocate(AllocSize, Origin, 1U << Align); + EXPECT_EQ(AllocSize, Allocator.getUsableSize(P)) + << "Failed usable size at allocation size " << AllocSize << " at align " + << 1 << Align; + Allocator.deallocate(P, Origin); + } + + // Verify an explicitly large allocations. + const scudo::uptr LargeAllocSize = 1000000; + void *P = Allocator.allocate(LargeAllocSize, Origin); + EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P)); + Allocator.deallocate(P, Origin); + + // Now do it for aligned allocations for large allocations. + for (scudo::uptr Align = 1; Align <= 8; Align++) { + void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Align); + EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P)) + << "Failed usable size at allocation size " << AllocSize << " at align " + << 1 << Align; + Allocator.deallocate(P, Origin); + } +} + +template +void VerifyIterateOverUsableSize(AllocatorT &Allocator) { + // This will not verify if the size is the exact size or the size of the + // size class. Instead verify that the size matches the usable size and + // assume the other tests have verified getUsableSize. + std::unordered_map Pointers; + Pointers.insert({Allocator.allocate(128, Origin), 0U}); + Pointers.insert({Allocator.allocate(128, Origin, 32), 0U}); + Pointers.insert({Allocator.allocate(2000, Origin), 0U}); + Pointers.insert({Allocator.allocate(2000, Origin, 64), 0U}); + Pointers.insert({Allocator.allocate(8000, Origin), 0U}); + Pointers.insert({Allocator.allocate(8000, Origin, 128), 0U}); + Pointers.insert({Allocator.allocate(2000205, Origin), 0U}); + Pointers.insert({Allocator.allocate(2000205, Origin, 128), 0U}); + Pointers.insert({Allocator.allocate(2000205, Origin, 256), 0U}); + + Allocator.disable(); + Allocator.iterateOverChunks( + 0, static_cast(SCUDO_MMAP_RANGE_SIZE - 1), + [](uintptr_t Base, size_t Size, void *Arg) { + std::unordered_map *Pointers = + reinterpret_cast *>(Arg); + (*Pointers)[reinterpret_cast(Base)] = Size; + }, + reinterpret_cast(&Pointers)); + Allocator.enable(); + + for (auto [Ptr, IterateSize] : Pointers) { + EXPECT_NE(0U, IterateSize) + << "Pointer " << Ptr << " not found in iterateOverChunks call."; + EXPECT_EQ(IterateSize, Allocator.getUsableSize(Ptr)) + << "Pointer " << Ptr + << " mismatch between iterate size and usable size."; + Allocator.deallocate(Ptr, Origin); + } +} + +TEST(ScudoCombinedTest, ExactUsableSize) { + using AllocatorT = scudo::Allocator; + auto Allocator = std::unique_ptr(new AllocatorT()); + + VerifyExactUsableSize(*Allocator); + VerifyIterateOverUsableSize(*Allocator); +} + +struct TestExactUsableSizeMTEConfig : TestExactUsableSizeConfig { + static const bool MaySupportMemoryTagging = true; +}; + +TEST(ScudoCombinedTest, ExactUsableSizeMTE) { + if (!scudo::archSupportsMemoryTagging() || + !scudo::systemDetectsMemoryTagFaultsTestOnly()) + TEST_SKIP("Only supported on systems that can enable MTE."); + + scudo::enableSystemMemoryTaggingTestOnly(); + + using AllocatorT = scudo::Allocator; + auto Allocator = std::unique_ptr(new AllocatorT()); + + VerifyExactUsableSize(*Allocator); + VerifyIterateOverUsableSize(*Allocator); +} + +template void VerifyUsableSize(AllocatorT &Allocator) { + // Check primary allocations first. + std::vector SizeClasses = {1024U, 2048U, 4096U, 8192U}; + scudo::uptr StartSize = 0; + for (auto SizeClass : SizeClasses) { + scudo::uptr UsableSize = SizeClass - scudo::Chunk::getHeaderSize(); + for (scudo::uptr Size = StartSize; Size < UsableSize; Size++) { + void *P = Allocator.allocate(Size, Origin); + EXPECT_EQ(UsableSize, Allocator.getUsableSize(P)) + << "Failed usable size at allocation size " << Size + << " for size class " << SizeClass; + Allocator.deallocate(P, Origin); + } + StartSize = UsableSize + 1; + } + + // Check different alignments to verify usable space is calculated properly. + // Currently, the pointer plus usable size is aligned to the size class size. + const scudo::uptr AllocSize = 128; + EXPECT_TRUE(isPrimaryAllocation(128, 32)); + void *P = Allocator.allocate(AllocSize, Origin, 32); + scudo::uptr UsableSize = Allocator.getUsableSize(P); + memset(P, 0xff, UsableSize); + EXPECT_GE(UsableSize, AllocSize); + EXPECT_GE(1024 - scudo::Chunk::getHeaderSize(), UsableSize); + EXPECT_EQ(0U, (reinterpret_cast(P) + UsableSize) % 1024); + Allocator.deallocate(P, Origin); + + EXPECT_TRUE(isPrimaryAllocation(AllocSize, 64)); + P = Allocator.allocate(AllocSize, Origin, 64); + UsableSize = Allocator.getUsableSize(P); + memset(P, 0xff, UsableSize); + EXPECT_GE(UsableSize, AllocSize); + EXPECT_GE(1024 - scudo::Chunk::getHeaderSize(), UsableSize); + EXPECT_EQ(0U, (reinterpret_cast(P) + UsableSize) % 1024); + Allocator.deallocate(P, Origin); + + EXPECT_TRUE(isPrimaryAllocation(AllocSize, 128)); + P = Allocator.allocate(AllocSize, Origin, 128); + UsableSize = Allocator.getUsableSize(P); + memset(P, 0xff, UsableSize); + EXPECT_GE(UsableSize, AllocSize); + EXPECT_GE(1024 - scudo::Chunk::getHeaderSize(), UsableSize); + EXPECT_EQ(0U, (reinterpret_cast(P) + UsableSize) % 1024); + Allocator.deallocate(P, Origin); + + // Check allocations in the secondary, the end of the allocation is always + // aligned to a page. + const scudo::uptr LargeAllocSize = 996780; + const scudo::uptr PageSize = scudo::getPageSizeCached(); + P = Allocator.allocate(LargeAllocSize, Origin); + UsableSize = Allocator.getUsableSize(P); + EXPECT_GE(UsableSize, LargeAllocSize); + EXPECT_EQ(0U, (reinterpret_cast(P) + UsableSize) % PageSize); + Allocator.deallocate(P, Origin); + + // Check aligned allocations now. + for (scudo::uptr Align = 1; Align <= 8; Align++) { + void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Align); + UsableSize = Allocator.getUsableSize(P); + EXPECT_GE(UsableSize, LargeAllocSize); + EXPECT_EQ(0U, (reinterpret_cast(P) + UsableSize) % PageSize); + Allocator.deallocate(P, Origin); + } +} + +struct TestFullUsableSizeConfig : TestExactUsableSizeConfig { + static const bool ExactUsableSize = false; +}; + +TEST(ScudoCombinedTest, FullUsableSize) { + using AllocatorT = scudo::Allocator; + auto Allocator = std::unique_ptr(new AllocatorT()); + + VerifyUsableSize(*Allocator); + VerifyIterateOverUsableSize(*Allocator); +} + +struct TestFullUsableSizeMTEConfig : TestFullUsableSizeConfig { + static const bool MaySupportMemoryTagging = true; +}; + +TEST(ScudoCombinedTest, FullUsableSizeMTE) { + if (!scudo::archSupportsMemoryTagging() || + !scudo::systemDetectsMemoryTagFaultsTestOnly()) + TEST_SKIP("Only supported on systems that can enable MTE."); + + scudo::enableSystemMemoryTaggingTestOnly(); + + using AllocatorT = scudo::Allocator; + auto Allocator = std::unique_ptr(new AllocatorT()); + + // When MTE is enabled, you get exact sizes. + VerifyExactUsableSize(*Allocator); + VerifyIterateOverUsableSize(*Allocator); +} diff --git a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp index 612317b3c3293..9e5d0658e5ed5 100644 --- a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp @@ -588,8 +588,13 @@ TEST_F(ScudoWrappersCTest, MallocInfo) { EXPECT_EQ(errno, 0); fclose(F); EXPECT_EQ(strncmp(Buffer, "