diff --git a/compiler-rt/lib/scudo/standalone/list.h b/compiler-rt/lib/scudo/standalone/list.h index 6b952a610e305..c6bd32a8fa325 100644 --- a/compiler-rt/lib/scudo/standalone/list.h +++ b/compiler-rt/lib/scudo/standalone/list.h @@ -61,10 +61,11 @@ template class LinkOp { using LinkTy = decltype(T::Next); LinkOp() = default; - LinkOp(T *BaseT, uptr BaseSize) : Base(BaseT), Size(BaseSize) {} + // TODO: Check if the `BaseSize` can fit in `Size`. + LinkOp(T *BaseT, uptr BaseSize) + : Base(BaseT), Size(static_cast(BaseSize)) {} void init(T *LinkBase, uptr BaseSize) { Base = LinkBase; - // TODO: Check if the `BaseSize` can fit in `Size`. Size = static_cast(BaseSize); } T *getBase() const { return Base; } diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h index 2fae29e5a2168..25b82356ca20f 100644 --- a/compiler-rt/lib/scudo/standalone/secondary.h +++ b/compiler-rt/lib/scudo/standalone/secondary.h @@ -71,7 +71,8 @@ namespace { struct CachedBlock { static constexpr u16 CacheIndexMax = UINT16_MAX; - static constexpr u16 InvalidEntry = CacheIndexMax; + static constexpr u16 EndOfListVal = CacheIndexMax; + // We allow a certain amount of fragmentation and part of the fragmented bytes // will be released by `releaseAndZeroPagesToOS()`. This increases the chance // of cache hit rate and reduces the overhead to the RSS at the same time. See @@ -206,17 +207,16 @@ class MapAllocatorCache { &Fractional); const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs); Str->append( - "Stats: MapAllocatorCache: EntriesCount: %d, " + "Stats: MapAllocatorCache: EntriesCount: %zu, " "MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n", - EntriesCount, atomic_load_relaxed(&MaxEntriesCount), + LRUEntries.size(), atomic_load_relaxed(&MaxEntriesCount), atomic_load_relaxed(&MaxEntrySize), Interval >= 0 ? Interval : -1); Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u " "(%zu.%02zu%%)\n", SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional); Str->append("Cache Entry Info (Most Recent -> Least Recent):\n"); - for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) { - CachedBlock &Entry = Entries[I]; + for (CachedBlock &Entry : LRUEntries) { Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, " "BlockSize: %zu %s\n", Entry.CommitBase, Entry.CommitBase + Entry.CommitSize, @@ -234,7 +234,7 @@ class MapAllocatorCache { "Cache entry array is too large to be indexed."); void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS { - DCHECK_EQ(EntriesCount, 0U); + DCHECK_EQ(LRUEntries.size(), 0U); setOption(Option::MaxCacheEntriesCount, static_cast(Config::getDefaultMaxEntriesCount())); setOption(Option::MaxCacheEntrySize, @@ -244,17 +244,13 @@ class MapAllocatorCache { ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs(); setOption(Option::ReleaseInterval, static_cast(ReleaseToOsInterval)); - // The cache is initially empty - LRUHead = CachedBlock::InvalidEntry; - LRUTail = CachedBlock::InvalidEntry; - - // Available entries will be retrieved starting from the beginning of the - // Entries array - AvailableHead = 0; - for (u32 I = 0; I < Config::getEntriesArraySize() - 1; I++) - Entries[I].Next = static_cast(I + 1); + LRUEntries.clear(); + LRUEntries.init(Entries, sizeof(Entries)); - Entries[Config::getEntriesArraySize() - 1].Next = CachedBlock::InvalidEntry; + AvailEntries.clear(); + AvailEntries.init(Entries, sizeof(Entries)); + for (u32 I = 0; I < Config::getEntriesArraySize(); I++) + AvailEntries.push_back(&Entries[I]); } void store(const Options &Options, uptr CommitBase, uptr CommitSize, @@ -326,11 +322,15 @@ class MapAllocatorCache { Entry = PrevEntry; } - // All excess entries are evicted from the cache - while (needToEvict()) { + // All excess entries are evicted from the cache. Note that when + // `MaxEntriesCount` is zero, cache storing shouldn't happen and it's + // guarded by the `DCHECK(canCache(CommitSize))` above. As a result, we + // won't try to pop `LRUEntries` when it's empty. + while (LRUEntries.size() >= atomic_load_relaxed(&MaxEntriesCount)) { // Save MemMaps of evicted entries to perform unmap outside of lock - EvictionMemMaps.push_back(Entries[LRUTail].MemMap); - remove(LRUTail); + CachedBlock *Entry = LRUEntries.back(); + EvictionMemMaps.push_back(Entry->MemMap); + remove(Entry); } insert(Entry); @@ -360,9 +360,9 @@ class MapAllocatorCache { { ScopedLock L(Mutex); CallsToRetrieve++; - if (EntriesCount == 0) + if (LRUEntries.size() == 0) return {}; - u16 RetrievedIndex = CachedBlock::InvalidEntry; + CachedBlock *RetrievedEntry = nullptr; uptr MinDiff = UINTPTR_MAX; // Since allocation sizes don't always match cached memory chunk sizes @@ -382,10 +382,9 @@ class MapAllocatorCache { // well as the header metadata. If EntryHeaderPos - CommitBase exceeds // MaxAllowedFragmentedPages * PageSize, the cached memory chunk is // not considered valid for retrieval. - for (u16 I = LRUHead; I != CachedBlock::InvalidEntry; - I = Entries[I].Next) { - const uptr CommitBase = Entries[I].CommitBase; - const uptr CommitSize = Entries[I].CommitSize; + for (CachedBlock &Entry : LRUEntries) { + const uptr CommitBase = Entry.CommitBase; + const uptr CommitSize = Entry.CommitSize; const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment); const uptr HeaderPos = AllocPos - HeadersSize; @@ -408,7 +407,7 @@ class MapAllocatorCache { continue; MinDiff = Diff; - RetrievedIndex = I; + RetrievedEntry = &Entry; EntryHeaderPos = HeaderPos; // Immediately use a cached block if its size is close enough to the @@ -418,9 +417,10 @@ class MapAllocatorCache { if (Diff <= OptimalFitThesholdBytes) break; } - if (RetrievedIndex != CachedBlock::InvalidEntry) { - Entry = Entries[RetrievedIndex]; - remove(RetrievedIndex); + + if (RetrievedEntry != nullptr) { + Entry = *RetrievedEntry; + remove(RetrievedEntry); SuccessfulRetrieves++; } } @@ -499,10 +499,8 @@ class MapAllocatorCache { Quarantine[I].invalidate(); } } - for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) { - Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase, - Entries[I].CommitSize, 0); - } + for (CachedBlock &Entry : LRUEntries) + Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0); QuarantinePos = -1U; } @@ -513,64 +511,19 @@ class MapAllocatorCache { void unmapTestOnly() { empty(); } private: - bool needToEvict() REQUIRES(Mutex) { - return (EntriesCount >= atomic_load_relaxed(&MaxEntriesCount)); - } - void insert(const CachedBlock &Entry) REQUIRES(Mutex) { - DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount)); - - // Cache should be populated with valid entries when not empty - DCHECK_NE(AvailableHead, CachedBlock::InvalidEntry); - - u32 FreeIndex = AvailableHead; - AvailableHead = Entries[AvailableHead].Next; - - if (EntriesCount == 0) { - LRUTail = static_cast(FreeIndex); - } else { - // Check list order - if (EntriesCount > 1) - DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time); - Entries[LRUHead].Prev = static_cast(FreeIndex); - } + CachedBlock *AvailEntry = AvailEntries.front(); + AvailEntries.pop_front(); - Entries[FreeIndex] = Entry; - Entries[FreeIndex].Next = LRUHead; - Entries[FreeIndex].Prev = CachedBlock::InvalidEntry; - LRUHead = static_cast(FreeIndex); - EntriesCount++; - - // Availability stack should not have available entries when all entries - // are in use - if (EntriesCount == Config::getEntriesArraySize()) - DCHECK_EQ(AvailableHead, CachedBlock::InvalidEntry); + *AvailEntry = Entry; + LRUEntries.push_front(AvailEntry); } - void remove(uptr I) REQUIRES(Mutex) { - DCHECK(Entries[I].isValid()); - - Entries[I].invalidate(); - - if (I == LRUHead) - LRUHead = Entries[I].Next; - else - Entries[Entries[I].Prev].Next = Entries[I].Next; - - if (I == LRUTail) - LRUTail = Entries[I].Prev; - else - Entries[Entries[I].Next].Prev = Entries[I].Prev; - - Entries[I].Next = AvailableHead; - AvailableHead = static_cast(I); - EntriesCount--; - - // Cache should not have valid entries when not empty - if (EntriesCount == 0) { - DCHECK_EQ(LRUHead, CachedBlock::InvalidEntry); - DCHECK_EQ(LRUTail, CachedBlock::InvalidEntry); - } + void remove(CachedBlock *Entry) REQUIRES(Mutex) { + DCHECK(Entry->isValid()); + LRUEntries.remove(Entry); + Entry->invalidate(); + AvailEntries.push_front(Entry); } void empty() { @@ -578,14 +531,10 @@ class MapAllocatorCache { uptr N = 0; { ScopedLock L(Mutex); - for (uptr I = 0; I < Config::getEntriesArraySize(); I++) { - if (!Entries[I].isValid()) - continue; - MapInfo[N] = Entries[I].MemMap; - remove(I); - N++; - } - EntriesCount = 0; + + for (CachedBlock &Entry : LRUEntries) + MapInfo[N++] = Entry.MemMap; + LRUEntries.clear(); } for (uptr I = 0; I < N; I++) { MemMapT &MemMap = MapInfo[I]; @@ -607,7 +556,7 @@ class MapAllocatorCache { void releaseOlderThan(u64 Time) EXCLUDES(Mutex) { ScopedLock L(Mutex); - if (!EntriesCount || OldestTime == 0 || OldestTime > Time) + if (!LRUEntries.size() || OldestTime == 0 || OldestTime > Time) return; OldestTime = 0; for (uptr I = 0; I < Config::getQuarantineSize(); I++) @@ -617,7 +566,6 @@ class MapAllocatorCache { } HybridMutex Mutex; - u32 EntriesCount GUARDED_BY(Mutex) = 0; u32 QuarantinePos GUARDED_BY(Mutex) = 0; atomic_u32 MaxEntriesCount = {}; atomic_uptr MaxEntrySize = {}; @@ -630,12 +578,10 @@ class MapAllocatorCache { NonZeroLengthArray Quarantine GUARDED_BY(Mutex) = {}; - // The LRUHead of the cache is the most recently used cache entry - u16 LRUHead GUARDED_BY(Mutex) = 0; - // The LRUTail of the cache is the least recently used cache entry - u16 LRUTail GUARDED_BY(Mutex) = 0; - // The AvailableHead is the top of the stack of available entries - u16 AvailableHead GUARDED_BY(Mutex) = 0; + // Cached blocks stored in LRU order + DoublyLinkedList LRUEntries GUARDED_BY(Mutex); + // The unused Entries + SinglyLinkedList AvailEntries GUARDED_BY(Mutex); }; template class MapAllocator {