-
Notifications
You must be signed in to change notification settings - Fork 15.4k
[scudo] Allow the quarantine code to be compiled out #151064
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Add a new configuration option QuarantineDisabled that allows all of the quarantine code to be compiled out. Add new tests that verify that the code is removed properly. On Android, this saves ~4000 bytes for 32 bit and ~6000 bytes for 64 bit. On Android, I used some microbenchmarks that do malloc/free in a loop and for allocations in the primary, the performance is about the same for both 32 bit and 64 bit. For secondary allocations, I saw ~8% speed up on 32 bit and ~3% on 64 bit speed up which feels like it could just be code size improvements.
|
@llvm/pr-subscribers-compiler-rt-sanitizer Author: Christopher Ferris (cferris1000) ChangesAdd a new configuration option QuarantineDisabled that allows all of the quarantine code to be compiled out. Add new tests that verify that the code is removed properly. On Android, this saves ~4000 bytes for 32 bit and ~6000 bytes for 64 bit. On Android, I used some microbenchmarks that do malloc/free in a loop and for allocations in the primary, the performance is about the same for both 32 bit and 64 bit. For secondary allocations, I saw ~8% speed up on 32 bit and ~3% on 64 bit speed up which feels like it could just be code size improvements. Full diff: https://github.com/llvm/llvm-project/pull/151064.diff 5 Files Affected:
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def
index 84fcec0877d40..748530820cd64 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.def
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.def
@@ -54,6 +54,9 @@ BASE_REQUIRED_TEMPLATE_TYPE(SecondaryT)
// Indicates possible support for Memory Tagging.
BASE_OPTIONAL(const bool, MaySupportMemoryTagging, false)
+// Disable the quarantine code.
+BASE_OPTIONAL(const bool, QuarantineDisabled, false)
+
// PRIMARY_REQUIRED_TYPE(NAME)
//
// SizeClassMap to use with the Primary.
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h b/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
index ac639ee9dd943..5bfa700c2f8d8 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
+++ b/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
@@ -60,6 +60,10 @@ template <typename AllocatorConfig> struct PrimaryConfig {
return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging();
}
+ static constexpr bool getQuarantineDisabled() {
+ return BaseConfig<AllocatorConfig>::getQuarantineDisabled();
+ }
+
#define PRIMARY_REQUIRED_TYPE(NAME) \
using NAME = typename AllocatorConfig::Primary::NAME;
@@ -92,6 +96,10 @@ template <typename AllocatorConfig> struct SecondaryConfig {
return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging();
}
+ static constexpr bool getQuarantineDisabled() {
+ return BaseConfig<AllocatorConfig>::getQuarantineDisabled();
+ }
+
#define SECONDARY_REQUIRED_TEMPLATE_TYPE(NAME) \
template <typename T> \
using NAME = typename AllocatorConfig::Secondary::template NAME<T>;
@@ -111,6 +119,10 @@ template <typename AllocatorConfig> struct SecondaryConfig {
return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging();
}
+ static constexpr bool getQuarantineDisabled() {
+ return BaseConfig<AllocatorConfig>::getQuarantineDisabled();
+ }
+
#define SECONDARY_CACHE_OPTIONAL(TYPE, NAME, DEFAULT) \
OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, Cache::NAME) \
static constexpr removeConst<TYPE>::type get##NAME() { \
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 87acdec2a3bac..48a30ad0306ec 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -184,9 +184,10 @@ class Allocator {
const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
Primary.init(ReleaseToOsIntervalMs);
Secondary.init(&Stats, ReleaseToOsIntervalMs);
- Quarantine.init(
- static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
- static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
+ if (!AllocatorConfig::getQuarantineDisabled())
+ Quarantine.init(
+ static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
+ static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
}
void enableRingBuffer() NO_THREAD_SAFETY_ANALYSIS {
@@ -276,16 +277,18 @@ class Allocator {
// the last two items).
void commitBack(TSD<ThisT> *TSD) {
TSD->assertLocked(/*BypassCheck=*/true);
- Quarantine.drain(&TSD->getQuarantineCache(),
- QuarantineCallback(*this, TSD->getSizeClassAllocator()));
+ if (!AllocatorConfig::getQuarantineDisabled())
+ Quarantine.drain(&TSD->getQuarantineCache(),
+ QuarantineCallback(*this, TSD->getSizeClassAllocator()));
TSD->getSizeClassAllocator().destroy(&Stats);
}
void drainCache(TSD<ThisT> *TSD) {
TSD->assertLocked(/*BypassCheck=*/true);
- Quarantine.drainAndRecycle(
- &TSD->getQuarantineCache(),
- QuarantineCallback(*this, TSD->getSizeClassAllocator()));
+ if (!AllocatorConfig::getQuarantineDisabled())
+ Quarantine.drainAndRecycle(
+ &TSD->getQuarantineCache(),
+ QuarantineCallback(*this, TSD->getSizeClassAllocator()));
TSD->getSizeClassAllocator().drain();
}
void drainCaches() { TSDRegistry.drainCaches(this); }
@@ -612,7 +615,8 @@ class Allocator {
#endif
TSDRegistry.disable();
Stats.disable();
- Quarantine.disable();
+ if (!AllocatorConfig::getQuarantineDisabled())
+ Quarantine.disable();
Primary.disable();
Secondary.disable();
disableRingBuffer();
@@ -623,7 +627,8 @@ class Allocator {
enableRingBuffer();
Secondary.enable();
Primary.enable();
- Quarantine.enable();
+ if (!AllocatorConfig::getQuarantineDisabled())
+ Quarantine.enable();
Stats.enable();
TSDRegistry.enable();
#ifdef GWP_ASAN_HOOKS
@@ -1252,7 +1257,8 @@ class Allocator {
// If the quarantine is disabled, the actual size of a chunk is 0 or larger
// than the maximum allowed, we return a chunk directly to the backend.
// This purposefully underflows for Size == 0.
- const bool BypassQuarantine = !Quarantine.getCacheSize() ||
+ const bool BypassQuarantine = AllocatorConfig::getQuarantineDisabled() ||
+ !Quarantine.getCacheSize() ||
((Size - 1) >= QuarantineMaxChunkSize) ||
!Header->ClassId;
if (BypassQuarantine)
@@ -1642,7 +1648,8 @@ class Allocator {
uptr getStats(ScopedString *Str) {
Primary.getStats(Str);
Secondary.getStats(Str);
- Quarantine.getStats(Str);
+ if (!AllocatorConfig::getQuarantineDisabled())
+ Quarantine.getStats(Str);
TSDRegistry.getStats(Str);
return Str->length();
}
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index f04c5b7cfc2ea..38c9a9e6e2d70 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -312,7 +312,7 @@ class MapAllocatorCache {
break;
}
- if (Config::getQuarantineSize()) {
+ if (!Config::getQuarantineDisabled() && Config::getQuarantineSize()) {
QuarantinePos =
(QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u);
if (!Quarantine[QuarantinePos].isValid()) {
@@ -508,14 +508,16 @@ class MapAllocatorCache {
void disableMemoryTagging() EXCLUDES(Mutex) {
ScopedLock L(Mutex);
- for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
- if (Quarantine[I].isValid()) {
- MemMapT &MemMap = Quarantine[I].MemMap;
- unmapCallBack(MemMap);
- Quarantine[I].invalidate();
+ if (!Config::getQuarantineDisabled()) {
+ for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
+ if (Quarantine[I].isValid()) {
+ MemMapT &MemMap = Quarantine[I].MemMap;
+ unmapCallBack(MemMap);
+ Quarantine[I].invalidate();
+ }
}
+ QuarantinePos = -1U;
}
- QuarantinePos = -1U;
for (CachedBlock &Entry : LRUEntries)
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
@@ -575,8 +577,9 @@ class MapAllocatorCache {
if (!LRUEntries.size() || OldestTime == 0 || OldestTime > Time)
return;
OldestTime = 0;
- for (uptr I = 0; I < Config::getQuarantineSize(); I++)
- releaseIfOlderThan(Quarantine[I], Time);
+ if (!Config::getQuarantineDisabled())
+ for (uptr I = 0; I < Config::getQuarantineSize(); I++)
+ releaseIfOlderThan(Quarantine[I], Time);
for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
releaseIfOlderThan(Entries[I], Time);
}
diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index 7e8d5b4396d2e..e30c53c1e7e7b 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -623,20 +623,20 @@ SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) {
SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
auto *Allocator = this->Allocator.get();
- scudo::uptr BufferSize = 8192;
- std::vector<char> Buffer(BufferSize);
- scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
- while (ActualSize > BufferSize) {
- BufferSize = ActualSize + 1024;
- Buffer.resize(BufferSize);
- ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
+ std::string Stats(10000, '\0');
+ scudo::uptr ActualSize = Allocator->getStats(Stats.data(), Stats.size());
+ if (ActualSize > Stats.size()) {
+ Stats.resize(ActualSize);
+ ActualSize = Allocator->getStats(Stats.data(), Stats.size());
}
- std::string Stats(Buffer.begin(), Buffer.end());
+ EXPECT_GE(Stats.size(), ActualSize);
+
// Basic checks on the contents of the statistics output, which also allows us
// to verify that we got it all.
EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
- EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
+ // Do not explicitly check for quarantine stats since a config can disable
+ // them. Other tests verify this (QuarantineEnabled/QuarantineDisabled).
}
SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest, Drain) {
@@ -1076,3 +1076,88 @@ TEST(ScudoCombinedTest, BasicTrustyConfig) {
#endif
#endif
+
+struct TestQuarantineSizeClassConfig {
+ static const scudo::uptr NumBits = 1;
+ static const scudo::uptr MinSizeLog = 10;
+ static const scudo::uptr MidSizeLog = 10;
+ static const scudo::uptr MaxSizeLog = 13;
+ static const scudo::u16 MaxNumCachedHint = 8;
+ static const scudo::uptr MaxBytesCachedLog = 12;
+ static const scudo::uptr SizeDelta = 0;
+};
+
+struct TestQuarantineConfig {
+ static const bool MaySupportMemoryTagging = false;
+
+ template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
+
+ struct Primary {
+ // Tiny allocator, its Primary only serves chunks of four sizes.
+ using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
+ static const scudo::uptr RegionSizeLog = DeathRegionSizeLog;
+ static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ typedef scudo::uptr CompactPtrT;
+ static const scudo::uptr CompactPtrScale = 0;
+ static const bool EnableRandomOffset = true;
+ static const scudo::uptr MapSizeIncrement = 1UL << 18;
+ static const scudo::uptr GroupSizeLog = 18;
+ };
+ template <typename Config>
+ using PrimaryT = scudo::SizeClassAllocator64<Config>;
+
+ struct Secondary {
+ template <typename Config>
+ using CacheT = scudo::MapAllocatorNoCache<Config>;
+ };
+
+ template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
+};
+
+// Verify that the quarantine exists by default.
+TEST(ScudoCombinedTest, QuarantineEnabled) {
+ using AllocatorT = scudo::Allocator<TestQuarantineConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ const scudo::uptr Size = 1000U;
+ void *P = Allocator->allocate(Size, Origin);
+ EXPECT_NE(P, nullptr);
+ Allocator->deallocate(P, Origin);
+
+ std::string Stats(10000, '\0');
+ scudo::uptr ActualSize = Allocator->getStats(Stats.data(), Stats.size());
+ if (ActualSize > Stats.size()) {
+ Stats.resize(ActualSize);
+ ActualSize = Allocator->getStats(Stats.data(), Stats.size());
+ }
+ EXPECT_GE(Stats.size(), ActualSize);
+
+ // No quarantine stats should exist.
+ EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
+}
+
+struct TestQuarantineDisabledConfig : TestQuarantineConfig {
+ static const bool QuarantineDisabled = true;
+};
+
+TEST(ScudoCombinedTest, QuarantineDisabled) {
+ using AllocatorT = scudo::Allocator<TestQuarantineDisabledConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ const scudo::uptr Size = 1000U;
+ void *P = Allocator->allocate(Size, Origin);
+ EXPECT_NE(P, nullptr);
+ Allocator->deallocate(P, Origin);
+
+ std::string Stats(10000, '\0');
+ scudo::uptr ActualSize = Allocator->getStats(Stats.data(), Stats.size());
+ if (ActualSize > Stats.size()) {
+ Stats.resize(ActualSize);
+ ActualSize = Allocator->getStats(Stats.data(), Stats.size());
+ }
+ EXPECT_GE(Stats.size(), ActualSize);
+
+ // No quarantine stats should exist.
+ EXPECT_EQ(Stats.find("Stats: Quarantine"), std::string::npos);
+}
|
ChiaHungDuan
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
No sure if we want to do it like MapAllocatorNoCache but either way looks fine to me
| if (!AllocatorConfig::getQuarantineDisabled()) | ||
| Quarantine.init( | ||
| static_cast<uptr>(getFlags()->quarantine_size_kb << 10), | ||
| static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10)); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
hit: brackets
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fixed.
| TSD->assertLocked(/*BypassCheck=*/true); | ||
| Quarantine.drain(&TSD->getQuarantineCache(), | ||
| QuarantineCallback(*this, TSD->getSizeClassAllocator())); | ||
| if (!AllocatorConfig::getQuarantineDisabled()) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
nit:brackets
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fixed.
| if (!AllocatorConfig::getQuarantineDisabled()) | ||
| Quarantine.drainAndRecycle( | ||
| &TSD->getQuarantineCache(), | ||
| QuarantineCallback(*this, TSD->getSizeClassAllocator())); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
nit:brackets
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fixed.
Add a new configuration option QuarantineDisabled that allows all of the quarantine code to be compiled out.
Add new tests that verify that the code is removed properly.
On Android, this saves ~4000 bytes for 32 bit and ~6000 bytes for 64 bit.
On Android, I used some microbenchmarks that do malloc/free in a loop and for allocations in the primary, the performance is about the same for both 32 bit and 64 bit. For secondary allocations, I saw ~8% speed up on 32 bit and ~3% on 64 bit speed up which feels like it could just be code size improvements.