-
Notifications
You must be signed in to change notification settings - Fork 14.9k
[scudo] Add config option to modify get usable size behavior #158710
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
@@ -706,19 +706,24 @@ class Allocator { | |||||||||||||||||||
if (!getChunkFromBlock(Block, &Chunk, &Header) && | ||||||||||||||||||||
!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header)) | ||||||||||||||||||||
return; | ||||||||||||||||||||
} else { | ||||||||||||||||||||
if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header)) | ||||||||||||||||||||
return; | ||||||||||||||||||||
} | ||||||||||||||||||||
if (Header.State == Chunk::State::Allocated) { | ||||||||||||||||||||
uptr TaggedChunk = Chunk; | ||||||||||||||||||||
if (allocatorSupportsMemoryTagging<AllocatorConfig>()) | ||||||||||||||||||||
TaggedChunk = untagPointer(TaggedChunk); | ||||||||||||||||||||
if (useMemoryTagging<AllocatorConfig>(Primary.Options.load())) | ||||||||||||||||||||
TaggedChunk = loadTag(Chunk); | ||||||||||||||||||||
Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header), | ||||||||||||||||||||
Arg); | ||||||||||||||||||||
} | ||||||||||||||||||||
} else if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header)) | ||||||||||||||||||||
return; | ||||||||||||||||||||
|
||||||||||||||||||||
if (Header.State != Chunk::State::Allocated) | ||||||||||||||||||||
return; | ||||||||||||||||||||
|
||||||||||||||||||||
uptr TaggedChunk = Chunk; | ||||||||||||||||||||
if (allocatorSupportsMemoryTagging<AllocatorConfig>()) | ||||||||||||||||||||
TaggedChunk = untagPointer(TaggedChunk); | ||||||||||||||||||||
uptr Size; | ||||||||||||||||||||
if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Primary.Options.load()))) { | ||||||||||||||||||||
TaggedChunk = loadTag(Chunk); | ||||||||||||||||||||
Size = getSize(reinterpret_cast<void *>(Chunk), &Header); | ||||||||||||||||||||
} else if (AllocatorConfig::getExactUsableSize()) | ||||||||||||||||||||
Size = getSize(reinterpret_cast<void *>(Chunk), &Header); | ||||||||||||||||||||
else | ||||||||||||||||||||
Size = getUsableSize(reinterpret_cast<void *>(Chunk), &Header); | ||||||||||||||||||||
Comment on lines
+722
to
+725
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||||||||||||
Callback(TaggedChunk, Size, Arg); | ||||||||||||||||||||
}; | ||||||||||||||||||||
Primary.iterateOverBlocks(Lambda); | ||||||||||||||||||||
Secondary.iterateOverBlocks(Lambda); | ||||||||||||||||||||
|
@@ -759,16 +764,50 @@ class Allocator { | |||||||||||||||||||
return false; | ||||||||||||||||||||
} | ||||||||||||||||||||
|
||||||||||||||||||||
// Return the usable size for a given chunk. Technically we lie, as we just | ||||||||||||||||||||
// report the actual size of a chunk. This is done to counteract code actively | ||||||||||||||||||||
// writing past the end of a chunk (like sqlite3) when the usable size allows | ||||||||||||||||||||
// for it, which then forces realloc to copy the usable size of a chunk as | ||||||||||||||||||||
// opposed to its actual size. | ||||||||||||||||||||
ALWAYS_INLINE uptr getUsableSize(const void *Ptr, | ||||||||||||||||||||
Chunk::UnpackedHeader *Header) { | ||||||||||||||||||||
void *BlockBegin = getBlockBegin(Ptr, Header); | ||||||||||||||||||||
if (LIKELY(Header->ClassId)) { | ||||||||||||||||||||
return SizeClassMap::getSizeByClassId(Header->ClassId) - | ||||||||||||||||||||
(reinterpret_cast<uptr>(Ptr) - reinterpret_cast<uptr>(BlockBegin)); | ||||||||||||||||||||
} | ||||||||||||||||||||
|
||||||||||||||||||||
uptr UntaggedPtr = reinterpret_cast<uptr>(Ptr); | ||||||||||||||||||||
if (allocatorSupportsMemoryTagging<AllocatorConfig>()) { | ||||||||||||||||||||
UntaggedPtr = untagPointer(UntaggedPtr); | ||||||||||||||||||||
BlockBegin = untagPointer(BlockBegin); | ||||||||||||||||||||
} | ||||||||||||||||||||
return SecondaryT::getBlockEnd(BlockBegin) - UntaggedPtr; | ||||||||||||||||||||
} | ||||||||||||||||||||
|
||||||||||||||||||||
// Return the usable size for a given chunk. If MTE is enabled or if the | ||||||||||||||||||||
// ExactUsableSize config parameter is true, we report the exact size of | ||||||||||||||||||||
// the original allocation size. Otherwise, we will return the total | ||||||||||||||||||||
// actual usable size. | ||||||||||||||||||||
uptr getUsableSize(const void *Ptr) { | ||||||||||||||||||||
if (UNLIKELY(!Ptr)) | ||||||||||||||||||||
return 0; | ||||||||||||||||||||
|
||||||||||||||||||||
return getAllocSize(Ptr); | ||||||||||||||||||||
if (AllocatorConfig::getExactUsableSize() || | ||||||||||||||||||||
UNLIKELY(useMemoryTagging<AllocatorConfig>(Primary.Options.load()))) | ||||||||||||||||||||
return getAllocSize(Ptr); | ||||||||||||||||||||
|
||||||||||||||||||||
initThreadMaybe(); | ||||||||||||||||||||
|
||||||||||||||||||||
#ifdef GWP_ASAN_HOOKS | ||||||||||||||||||||
if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) | ||||||||||||||||||||
return GuardedAlloc.getSize(Ptr); | ||||||||||||||||||||
#endif // GWP_ASAN_HOOKS | ||||||||||||||||||||
|
||||||||||||||||||||
Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr)); | ||||||||||||||||||||
Chunk::UnpackedHeader Header; | ||||||||||||||||||||
Chunk::loadHeader(Cookie, Ptr, &Header); | ||||||||||||||||||||
|
||||||||||||||||||||
// Getting the alloc size of a chunk only makes sense if it's allocated. | ||||||||||||||||||||
if (UNLIKELY(Header.State != Chunk::State::Allocated)) | ||||||||||||||||||||
reportInvalidChunkState(AllocatorAction::Sizing, Ptr); | ||||||||||||||||||||
|
||||||||||||||||||||
return getUsableSize(Ptr, &Header); | ||||||||||||||||||||
} | ||||||||||||||||||||
|
||||||||||||||||||||
uptr getAllocSize(const void *Ptr) { | ||||||||||||||||||||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just some minor comment (it's not necessary to take the suggestions here) I'm thinking if we can do/simplify the test like
We don't test different alignments or some random sizes because I think 2. is enough for the usable size |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -24,6 +24,7 @@ | |
#include <set> | ||
#include <stdlib.h> | ||
#include <thread> | ||
#include <unordered_map> | ||
#include <vector> | ||
|
||
static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc; | ||
|
@@ -1161,3 +1162,238 @@ TEST(ScudoCombinedTest, QuarantineDisabled) { | |
// No quarantine stats should not be present. | ||
EXPECT_EQ(Stats.find("Stats: Quarantine"), std::string::npos); | ||
} | ||
|
||
struct UsableSizeClassConfig { | ||
static const scudo::uptr NumBits = 1; | ||
static const scudo::uptr MinSizeLog = 10; | ||
static const scudo::uptr MidSizeLog = 10; | ||
static const scudo::uptr MaxSizeLog = 13; | ||
static const scudo::u16 MaxNumCachedHint = 8; | ||
static const scudo::uptr MaxBytesCachedLog = 12; | ||
static const scudo::uptr SizeDelta = 0; | ||
}; | ||
|
||
struct TestExactUsableSizeConfig { | ||
static const bool MaySupportMemoryTagging = false; | ||
static const bool QuarantineDisabled = true; | ||
|
||
template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>; | ||
|
||
struct Primary { | ||
// In order to properly test the usable size, this Primary config has | ||
// four real size classes: 1024, 2048, 4096, 8192. | ||
using SizeClassMap = scudo::FixedSizeClassMap<UsableSizeClassConfig>; | ||
static const scudo::uptr RegionSizeLog = 21U; | ||
static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN; | ||
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX; | ||
typedef scudo::uptr CompactPtrT; | ||
static const scudo::uptr CompactPtrScale = 0; | ||
static const bool EnableRandomOffset = true; | ||
static const scudo::uptr MapSizeIncrement = 1UL << 18; | ||
static const scudo::uptr GroupSizeLog = 18; | ||
}; | ||
template <typename Config> | ||
using PrimaryT = scudo::SizeClassAllocator64<Config>; | ||
|
||
struct Secondary { | ||
template <typename Config> | ||
using CacheT = scudo::MapAllocatorNoCache<Config>; | ||
}; | ||
|
||
template <typename Config> using SecondaryT = scudo::MapAllocator<Config>; | ||
}; | ||
|
||
template <class AllocatorT> void VerifyExactUsableSize(AllocatorT &Allocator) { | ||
// Scan through all sizes up to 10000 then some larger sizes. | ||
for (scudo::uptr Size = 1; Size < 10000; Size++) { | ||
void *P = Allocator.allocate(Size, Origin); | ||
EXPECT_EQ(Size, Allocator.getUsableSize(P)) | ||
<< "Failed usable size at allocation size " << Size; | ||
Allocator.deallocate(P, Origin); | ||
} | ||
|
||
// Verify that aligned allocations also return the exact size allocated. | ||
const scudo::uptr AllocSize = 313; | ||
for (scudo::uptr Align = 1; Align <= 8; Align++) { | ||
void *P = Allocator.allocate(AllocSize, Origin, 1U << Align); | ||
EXPECT_EQ(AllocSize, Allocator.getUsableSize(P)) | ||
<< "Failed usable size at allocation size " << AllocSize << " at align " | ||
<< 1 << Align; | ||
Allocator.deallocate(P, Origin); | ||
} | ||
|
||
// Verify an explicitly large allocations. | ||
const scudo::uptr LargeAllocSize = 1000000; | ||
void *P = Allocator.allocate(LargeAllocSize, Origin); | ||
EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P)); | ||
Allocator.deallocate(P, Origin); | ||
|
||
// Now do it for aligned allocations for large allocations. | ||
for (scudo::uptr Align = 1; Align <= 8; Align++) { | ||
void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Align); | ||
EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P)) | ||
<< "Failed usable size at allocation size " << AllocSize << " at align " | ||
<< 1 << Align; | ||
Allocator.deallocate(P, Origin); | ||
} | ||
} | ||
|
||
template <class AllocatorT> | ||
void VerifyIterateOverUsableSize(AllocatorT &Allocator) { | ||
// This will not verify if the size is the exact size or the size of the | ||
// size class. Instead verify that the size matches the usable size and | ||
// assume the other tests have verified getUsableSize. | ||
std::unordered_map<void *, size_t> Pointers; | ||
Pointers.insert({Allocator.allocate(128, Origin), 0U}); | ||
Pointers.insert({Allocator.allocate(128, Origin, 32), 0U}); | ||
Pointers.insert({Allocator.allocate(2000, Origin), 0U}); | ||
Pointers.insert({Allocator.allocate(2000, Origin, 64), 0U}); | ||
Pointers.insert({Allocator.allocate(8000, Origin), 0U}); | ||
Pointers.insert({Allocator.allocate(8000, Origin, 128), 0U}); | ||
Pointers.insert({Allocator.allocate(2000205, Origin), 0U}); | ||
Pointers.insert({Allocator.allocate(2000205, Origin, 128), 0U}); | ||
Pointers.insert({Allocator.allocate(2000205, Origin, 256), 0U}); | ||
|
||
Allocator.disable(); | ||
Allocator.iterateOverChunks( | ||
0, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1), | ||
[](uintptr_t Base, size_t Size, void *Arg) { | ||
std::unordered_map<void *, size_t> *Pointers = | ||
reinterpret_cast<std::unordered_map<void *, size_t> *>(Arg); | ||
(*Pointers)[reinterpret_cast<void *>(Base)] = Size; | ||
}, | ||
reinterpret_cast<void *>(&Pointers)); | ||
Allocator.enable(); | ||
|
||
for (auto [Ptr, IterateSize] : Pointers) { | ||
EXPECT_NE(0U, IterateSize) | ||
<< "Pointer " << Ptr << " not found in iterateOverChunks call."; | ||
EXPECT_EQ(IterateSize, Allocator.getUsableSize(Ptr)) | ||
<< "Pointer " << Ptr | ||
<< " mismatch between iterate size and usable size."; | ||
Allocator.deallocate(Ptr, Origin); | ||
} | ||
} | ||
|
||
TEST(ScudoCombinedTest, ExactUsableSize) { | ||
using AllocatorT = scudo::Allocator<TestExactUsableSizeConfig>; | ||
auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); | ||
|
||
VerifyExactUsableSize<AllocatorT>(*Allocator); | ||
VerifyIterateOverUsableSize<AllocatorT>(*Allocator); | ||
} | ||
|
||
struct TestExactUsableSizeMTEConfig : TestExactUsableSizeConfig { | ||
static const bool MaySupportMemoryTagging = true; | ||
}; | ||
|
||
TEST(ScudoCombinedTest, ExactUsableSizeMTE) { | ||
if (!scudo::archSupportsMemoryTagging() || | ||
!scudo::systemDetectsMemoryTagFaultsTestOnly()) | ||
TEST_SKIP("Only supported on systems that can enable MTE."); | ||
|
||
scudo::enableSystemMemoryTaggingTestOnly(); | ||
|
||
using AllocatorT = scudo::Allocator<TestExactUsableSizeMTEConfig>; | ||
auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); | ||
|
||
VerifyExactUsableSize<AllocatorT>(*Allocator); | ||
VerifyIterateOverUsableSize<AllocatorT>(*Allocator); | ||
} | ||
|
||
template <class AllocatorT> void VerifyUsableSize(AllocatorT &Allocator) { | ||
// Check primary allocations first. | ||
std::vector<scudo::uptr> SizeClasses = {1024U, 2048U, 4096U, 8192U}; | ||
scudo::uptr StartSize = 0; | ||
for (auto SizeClass : SizeClasses) { | ||
scudo::uptr UsableSize = SizeClass - scudo::Chunk::getHeaderSize(); | ||
for (scudo::uptr Size = StartSize; Size < UsableSize; Size++) { | ||
void *P = Allocator.allocate(Size, Origin); | ||
EXPECT_EQ(UsableSize, Allocator.getUsableSize(P)) | ||
<< "Failed usable size at allocation size " << Size | ||
<< " for size class " << SizeClass; | ||
Allocator.deallocate(P, Origin); | ||
} | ||
StartSize = UsableSize + 1; | ||
} | ||
|
||
// Check different alignments to verify usable space is calculated properly. | ||
// Currently, the pointer plus usable size is aligned to the size class size. | ||
const scudo::uptr AllocSize = 128; | ||
EXPECT_TRUE(isPrimaryAllocation<AllocatorT>(128, 32)); | ||
void *P = Allocator.allocate(AllocSize, Origin, 32); | ||
scudo::uptr UsableSize = Allocator.getUsableSize(P); | ||
memset(P, 0xff, UsableSize); | ||
EXPECT_GE(UsableSize, AllocSize); | ||
EXPECT_GE(1024 - scudo::Chunk::getHeaderSize(), UsableSize); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think the |
||
EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % 1024); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I guess this checks if P + UsableSize will be the end of the block, right? If so, I think this can be wrong if the base address of a block is not aligned to 1024. But the test passes, maybe I'm misunderstanding something |
||
Allocator.deallocate(P, Origin); | ||
|
||
EXPECT_TRUE(isPrimaryAllocation<AllocatorT>(AllocSize, 64)); | ||
P = Allocator.allocate(AllocSize, Origin, 64); | ||
UsableSize = Allocator.getUsableSize(P); | ||
memset(P, 0xff, UsableSize); | ||
EXPECT_GE(UsableSize, AllocSize); | ||
EXPECT_GE(1024 - scudo::Chunk::getHeaderSize(), UsableSize); | ||
EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % 1024); | ||
Allocator.deallocate(P, Origin); | ||
|
||
EXPECT_TRUE(isPrimaryAllocation<AllocatorT>(AllocSize, 128)); | ||
P = Allocator.allocate(AllocSize, Origin, 128); | ||
UsableSize = Allocator.getUsableSize(P); | ||
memset(P, 0xff, UsableSize); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why do we need to dirty the page here? |
||
EXPECT_GE(UsableSize, AllocSize); | ||
EXPECT_GE(1024 - scudo::Chunk::getHeaderSize(), UsableSize); | ||
EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % 1024); | ||
Allocator.deallocate(P, Origin); | ||
|
||
// Check allocations in the secondary, the end of the allocation is always | ||
// aligned to a page. | ||
const scudo::uptr LargeAllocSize = 996780; | ||
const scudo::uptr PageSize = scudo::getPageSizeCached(); | ||
P = Allocator.allocate(LargeAllocSize, Origin); | ||
UsableSize = Allocator.getUsableSize(P); | ||
EXPECT_GE(UsableSize, LargeAllocSize); | ||
EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % PageSize); | ||
Allocator.deallocate(P, Origin); | ||
|
||
// Check aligned allocations now. | ||
for (scudo::uptr Align = 1; Align <= 8; Align++) { | ||
void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Align); | ||
UsableSize = Allocator.getUsableSize(P); | ||
EXPECT_GE(UsableSize, LargeAllocSize); | ||
EXPECT_EQ(0U, (reinterpret_cast<scudo::uptr>(P) + UsableSize) % PageSize); | ||
Allocator.deallocate(P, Origin); | ||
} | ||
} | ||
|
||
struct TestFullUsableSizeConfig : TestExactUsableSizeConfig { | ||
static const bool ExactUsableSize = false; | ||
}; | ||
|
||
TEST(ScudoCombinedTest, FullUsableSize) { | ||
using AllocatorT = scudo::Allocator<TestFullUsableSizeConfig>; | ||
auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); | ||
|
||
VerifyUsableSize<AllocatorT>(*Allocator); | ||
VerifyIterateOverUsableSize<AllocatorT>(*Allocator); | ||
} | ||
|
||
struct TestFullUsableSizeMTEConfig : TestFullUsableSizeConfig { | ||
static const bool MaySupportMemoryTagging = true; | ||
}; | ||
|
||
TEST(ScudoCombinedTest, FullUsableSizeMTE) { | ||
if (!scudo::archSupportsMemoryTagging() || | ||
!scudo::systemDetectsMemoryTagFaultsTestOnly()) | ||
TEST_SKIP("Only supported on systems that can enable MTE."); | ||
|
||
scudo::enableSystemMemoryTaggingTestOnly(); | ||
|
||
using AllocatorT = scudo::Allocator<TestFullUsableSizeMTEConfig>; | ||
auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); | ||
|
||
// When MTE is enabled, you get exact sizes. | ||
VerifyExactUsableSize<AllocatorT>(*Allocator); | ||
VerifyIterateOverUsableSize<AllocatorT>(*Allocator); | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm thinking if we want to make this enabled when it's MTE, disabled otherwise. And make this configurable when we get more evidence that this is useful. what do you think?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The problem is that for aarch64 it's always going to be maybe MTE is supported, so it would have to be a runtime check.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Maybe I miss something. I mean something like
ExactUsableSize
is replaced byuseMemoryTagging<AllocatorConfig>(Primary.Options.load())
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You mean remove the configuration parameter completely and always use the real usable size when MTE is not on?
I'm not opposed to that but I do worry that it changes the behavior of existing configs.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, I was thinking how useful this being flag is. For people don't use usable-size, either size is fine to them and it doesn't give direct security benefit. So reporting block size by default seems to avoid the flags that will be barely used (or hard to understand when to use)
No strong opinion here. Will leave you to make the decision