@@ -71,7 +71,8 @@ namespace {
7171
7272struct CachedBlock {
7373 static constexpr u16 CacheIndexMax = UINT16_MAX;
74- static constexpr u16 InvalidEntry = CacheIndexMax;
74+ static constexpr scudo::uptr EndOfListVal = CacheIndexMax;
75+
7576 // We allow a certain amount of fragmentation and part of the fragmented bytes
7677 // will be released by `releaseAndZeroPagesToOS()`. This increases the chance
7778 // of cache hit rate and reduces the overhead to the RSS at the same time. See
@@ -206,17 +207,16 @@ class MapAllocatorCache {
206207 &Fractional);
207208 const s32 Interval = atomic_load_relaxed (&ReleaseToOsIntervalMs);
208209 Str->append (
209- " Stats: MapAllocatorCache: EntriesCount: %d , "
210+ " Stats: MapAllocatorCache: EntriesCount: %zu , "
210211 " MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n " ,
211- EntriesCount , atomic_load_relaxed (&MaxEntriesCount),
212+ LRUEntries. size () , atomic_load_relaxed (&MaxEntriesCount),
212213 atomic_load_relaxed (&MaxEntrySize), Interval >= 0 ? Interval : -1 );
213214 Str->append (" Stats: CacheRetrievalStats: SuccessRate: %u/%u "
214215 " (%zu.%02zu%%)\n " ,
215216 SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
216217 Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
217218
218- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
219- CachedBlock &Entry = Entries[I];
219+ for (CachedBlock &Entry : LRUEntries) {
220220 Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
221221 " BlockSize: %zu %s\n " ,
222222 Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
@@ -234,7 +234,7 @@ class MapAllocatorCache {
234234 " Cache entry array is too large to be indexed." );
235235
236236 void init (s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
237- DCHECK_EQ (EntriesCount , 0U );
237+ DCHECK_EQ (LRUEntries. size () , 0U );
238238 setOption (Option::MaxCacheEntriesCount,
239239 static_cast <sptr>(Config::getDefaultMaxEntriesCount ()));
240240 setOption (Option::MaxCacheEntrySize,
@@ -244,17 +244,13 @@ class MapAllocatorCache {
244244 ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs ();
245245 setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
246246
247- // The cache is initially empty
248- LRUHead = CachedBlock::InvalidEntry;
249- LRUTail = CachedBlock::InvalidEntry;
250-
251- // Available entries will be retrieved starting from the beginning of the
252- // Entries array
253- AvailableHead = 0 ;
254- for (u32 I = 0 ; I < Config::getEntriesArraySize () - 1 ; I++)
255- Entries[I].Next = static_cast <u16 >(I + 1 );
247+ LRUEntries.clear ();
248+ LRUEntries.init (Entries, sizeof (Entries));
256249
257- Entries[Config::getEntriesArraySize () - 1 ].Next = CachedBlock::InvalidEntry;
250+ AvailEntries.clear ();
251+ AvailEntries.init (Entries, sizeof (Entries));
252+ for (u32 I = 0 ; I < Config::getEntriesArraySize (); I++)
253+ AvailEntries.push_back (&Entries[I]);
258254 }
259255
260256 void store (const Options &Options, uptr CommitBase, uptr CommitSize,
@@ -329,8 +325,9 @@ class MapAllocatorCache {
329325 // All excess entries are evicted from the cache
330326 while (needToEvict ()) {
331327 // Save MemMaps of evicted entries to perform unmap outside of lock
332- EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
333- remove (LRUTail);
328+ CachedBlock *Entry = LRUEntries.back ();
329+ EvictionMemMaps.push_back (Entry->MemMap );
330+ remove (Entry);
334331 }
335332
336333 insert (Entry);
@@ -360,9 +357,9 @@ class MapAllocatorCache {
360357 {
361358 ScopedLock L (Mutex);
362359 CallsToRetrieve++;
363- if (EntriesCount == 0 )
360+ if (LRUEntries. size () == 0 )
364361 return {};
365- u16 RetrievedIndex = CachedBlock::InvalidEntry ;
362+ CachedBlock *RetrievedEntry = nullptr ;
366363 uptr MinDiff = UINTPTR_MAX;
367364
368365 // Since allocation sizes don't always match cached memory chunk sizes
@@ -382,10 +379,9 @@ class MapAllocatorCache {
382379 // well as the header metadata. If EntryHeaderPos - CommitBase exceeds
383380 // MaxAllowedFragmentedPages * PageSize, the cached memory chunk is
384381 // not considered valid for retrieval.
385- for (u16 I = LRUHead; I != CachedBlock::InvalidEntry;
386- I = Entries[I].Next ) {
387- const uptr CommitBase = Entries[I].CommitBase ;
388- const uptr CommitSize = Entries[I].CommitSize ;
382+ for (CachedBlock &Entry : LRUEntries) {
383+ const uptr CommitBase = Entry.CommitBase ;
384+ const uptr CommitSize = Entry.CommitSize ;
389385 const uptr AllocPos =
390386 roundDown (CommitBase + CommitSize - Size, Alignment);
391387 const uptr HeaderPos = AllocPos - HeadersSize;
@@ -408,7 +404,7 @@ class MapAllocatorCache {
408404 continue ;
409405
410406 MinDiff = Diff;
411- RetrievedIndex = I ;
407+ RetrievedEntry = &Entry ;
412408 EntryHeaderPos = HeaderPos;
413409
414410 // Immediately use a cached block if its size is close enough to the
@@ -418,9 +414,10 @@ class MapAllocatorCache {
418414 if (Diff <= OptimalFitThesholdBytes)
419415 break ;
420416 }
421- if (RetrievedIndex != CachedBlock::InvalidEntry) {
422- Entry = Entries[RetrievedIndex];
423- remove (RetrievedIndex);
417+
418+ if (RetrievedEntry != nullptr ) {
419+ Entry = *RetrievedEntry;
420+ remove (RetrievedEntry);
424421 SuccessfulRetrieves++;
425422 }
426423 }
@@ -499,9 +496,8 @@ class MapAllocatorCache {
499496 Quarantine[I].invalidate ();
500497 }
501498 }
502- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
503- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
504- Entries[I].CommitSize , 0 );
499+ for (CachedBlock &Entry : LRUEntries) {
500+ Entry.MemMap .setMemoryPermission (Entry.CommitBase , Entry.CommitSize , 0 );
505501 }
506502 QuarantinePos = -1U ;
507503 }
@@ -514,78 +510,33 @@ class MapAllocatorCache {
514510
515511private:
516512 bool needToEvict () REQUIRES(Mutex) {
517- return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
513+ return (LRUEntries. size () >= atomic_load_relaxed (&MaxEntriesCount));
518514 }
519515
520516 void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
521- DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
522-
523- // Cache should be populated with valid entries when not empty
524- DCHECK_NE (AvailableHead, CachedBlock::InvalidEntry);
525-
526- u32 FreeIndex = AvailableHead;
527- AvailableHead = Entries[AvailableHead].Next ;
528-
529- if (EntriesCount == 0 ) {
530- LRUTail = static_cast <u16 >(FreeIndex);
531- } else {
532- // Check list order
533- if (EntriesCount > 1 )
534- DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
535- Entries[LRUHead].Prev = static_cast <u16 >(FreeIndex);
536- }
537-
538- Entries[FreeIndex] = Entry;
539- Entries[FreeIndex].Next = LRUHead;
540- Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
541- LRUHead = static_cast <u16 >(FreeIndex);
542- EntriesCount++;
517+ CachedBlock *FreeEntry = AvailEntries.front ();
518+ AvailEntries.pop_front ();
543519
544- // Availability stack should not have available entries when all entries
545- // are in use
546- if (EntriesCount == Config::getEntriesArraySize ())
547- DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
520+ *FreeEntry = Entry;
521+ LRUEntries.push_front (FreeEntry);
548522 }
549523
550- void remove (uptr I) REQUIRES(Mutex) {
551- DCHECK (Entries[I].isValid ());
552-
553- Entries[I].invalidate ();
554-
555- if (I == LRUHead)
556- LRUHead = Entries[I].Next ;
557- else
558- Entries[Entries[I].Prev ].Next = Entries[I].Next ;
559-
560- if (I == LRUTail)
561- LRUTail = Entries[I].Prev ;
562- else
563- Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
564-
565- Entries[I].Next = AvailableHead;
566- AvailableHead = static_cast <u16 >(I);
567- EntriesCount--;
568-
569- // Cache should not have valid entries when not empty
570- if (EntriesCount == 0 ) {
571- DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
572- DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
573- }
524+ void remove (CachedBlock *Entry) REQUIRES(Mutex) {
525+ DCHECK (Entry->isValid ());
526+ LRUEntries.remove (Entry);
527+ Entry->invalidate ();
528+ AvailEntries.push_front (Entry);
574529 }
575530
576531 void empty () {
577532 MemMapT MapInfo[Config::getEntriesArraySize ()];
578533 uptr N = 0 ;
579534 {
580535 ScopedLock L (Mutex);
581- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
582- if (!Entries[I].isValid ())
583- continue ;
584- MapInfo[N] = Entries[I].MemMap ;
585- remove (I);
586- N++;
587- }
588- EntriesCount = 0 ;
536+
537+ for (CachedBlock &Entry : LRUEntries)
538+ MapInfo[N++] = Entry.MemMap ;
539+ LRUEntries.clear ();
589540 }
590541 for (uptr I = 0 ; I < N; I++) {
591542 MemMapT &MemMap = MapInfo[I];
@@ -607,7 +558,7 @@ class MapAllocatorCache {
607558
608559 void releaseOlderThan (u64 Time) EXCLUDES(Mutex) {
609560 ScopedLock L (Mutex);
610- if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
561+ if (!LRUEntries. size () || OldestTime == 0 || OldestTime > Time)
611562 return ;
612563 OldestTime = 0 ;
613564 for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
@@ -617,7 +568,6 @@ class MapAllocatorCache {
617568 }
618569
619570 HybridMutex Mutex;
620- u32 EntriesCount GUARDED_BY (Mutex) = 0;
621571 u32 QuarantinePos GUARDED_BY (Mutex) = 0;
622572 atomic_u32 MaxEntriesCount = {};
623573 atomic_uptr MaxEntrySize = {};
@@ -630,12 +580,9 @@ class MapAllocatorCache {
630580 NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
631581 Quarantine GUARDED_BY (Mutex) = {};
632582
633- // The LRUHead of the cache is the most recently used cache entry
634- u16 LRUHead GUARDED_BY (Mutex) = 0;
635- // The LRUTail of the cache is the least recently used cache entry
636- u16 LRUTail GUARDED_BY (Mutex) = 0;
637- // The AvailableHead is the top of the stack of available entries
638- u16 AvailableHead GUARDED_BY (Mutex) = 0;
583+ DoublyLinkedList<CachedBlock> LRUEntries GUARDED_BY (Mutex);
584+ // The unused Entries
585+ SinglyLinkedList<CachedBlock> AvailEntries GUARDED_BY (Mutex);
639586};
640587
641588template <typename Config> class MapAllocator {
0 commit comments