@@ -180,14 +180,6 @@ template <typename T> class NonZeroLengthArray<T, 0> {
180180
181181template <typename Config> class MapAllocatorCache {
182182public:
183- typedef enum { COMMITTED = 0 , DECOMMITTED = 1 , NONE } EntryListT;
184-
185- // TODO: Refactor the intrusive list to support non-pointer link type
186- typedef struct {
187- u16 Head;
188- u16 Tail;
189- } ListInfo;
190-
191183 void getStats (ScopedString *Str) {
192184 ScopedLock L (Mutex);
193185 uptr Integral;
@@ -205,18 +197,13 @@ template <typename Config> class MapAllocatorCache {
205197 SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
206198 Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
207199
208- auto printList = [&](EntryListT ListType) REQUIRES (Mutex) {
209- for (u32 I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
210- I = Entries[I].Next ) {
211- CachedBlock &Entry = Entries[I];
212- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
213- " BlockSize: %zu %s\n " ,
214- Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
215- Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
216- }
217- };
218- printList (COMMITTED);
219- printList (DECOMMITTED);
200+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
201+ CachedBlock &Entry = Entries[I];
202+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
203+ " BlockSize: %zu %s\n " ,
204+ Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
205+ Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
206+ }
220207 }
221208
222209 // Ensure the default maximum specified fits the array.
@@ -240,10 +227,8 @@ template <typename Config> class MapAllocatorCache {
240227 setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
241228
242229 // The cache is initially empty
243- EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
244- EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
245- EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
246- EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
230+ LRUHead = CachedBlock::InvalidEntry;
231+ LRUTail = CachedBlock::InvalidEntry;
247232
248233 // Available entries will be retrieved starting from the beginning of the
249234 // Entries array
@@ -325,19 +310,15 @@ template <typename Config> class MapAllocatorCache {
325310 // All excess entries are evicted from the cache
326311 while (needToEvict ()) {
327312 // Save MemMaps of evicted entries to perform unmap outside of lock
328- EntryListT EvictionListType;
329- if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
330- EvictionListType = COMMITTED;
331- else
332- EvictionListType = DECOMMITTED;
333- remove (EntryLists[EvictionListType].Tail , EvictionListType);
313+ EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
314+ remove (LRUTail);
334315 }
335316
336- insert (Entry, (Entry. Time == 0 ) ? DECOMMITTED : COMMITTED );
317+ insert (Entry);
337318
338319 if (OldestTime == 0 )
339320 OldestTime = Entry.Time ;
340- } while (0 ); // ScopedLock L(Mutex);
321+ } while (0 );
341322
342323 for (MemMapT &EvictMemMap : EvictionMemMaps)
343324 EvictMemMap.unmap (EvictMemMap.getBase (), EvictMemMap.getCapacity ());
@@ -354,69 +335,56 @@ template <typename Config> class MapAllocatorCache {
354335 // 10% of the requested size proved to be the optimal choice for
355336 // retrieving cached blocks after testing several options.
356337 constexpr u32 FragmentedBytesDivisor = 10 ;
338+ bool Found = false ;
357339 CachedBlock Entry;
358340 uptr EntryHeaderPos = 0 ;
359- uptr OptimalFitIndex = CachedBlock::InvalidEntry;
360341 {
361342 ScopedLock L (Mutex);
362343 CallsToRetrieve++;
363344 if (EntriesCount == 0 )
364345 return false ;
346+ u32 OptimalFitIndex = 0 ;
365347 uptr MinDiff = UINTPTR_MAX;
366- EntryListT OptimalFitListType = NONE;
367- auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES (Mutex) {
368- for (uptr I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
369- I = Entries[I].Next ) {
370- const uptr CommitBase = Entries[I].CommitBase ;
371- const uptr CommitSize = Entries[I].CommitSize ;
372- const uptr AllocPos =
373- roundDown (CommitBase + CommitSize - Size, Alignment);
374- const uptr HeaderPos = AllocPos - HeadersSize;
375- if (HeaderPos > CommitBase + CommitSize)
376- continue ;
377- if (HeaderPos < CommitBase ||
378- AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
379- continue ;
380-
381- const uptr Diff = HeaderPos - CommitBase;
382- // immediately use a cached block if it's size is close enough to
383- // the requested size.
384- const uptr MaxAllowedFragmentedBytes =
385- (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
386- if (Diff <= MaxAllowedFragmentedBytes) {
387- OptimalFitIndex = I;
388- EntryHeaderPos = HeaderPos;
389- OptimalFitListType = ListType;
390- return Entries[OptimalFitIndex];
391- }
392-
393- // keep track of the smallest cached block
394- // that is greater than (AllocSize + HeaderSize)
395- if (Diff > MinDiff)
396- continue ;
348+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
349+ I = Entries[I].Next ) {
350+ const uptr CommitBase = Entries[I].CommitBase ;
351+ const uptr CommitSize = Entries[I].CommitSize ;
352+ const uptr AllocPos =
353+ roundDown (CommitBase + CommitSize - Size, Alignment);
354+ const uptr HeaderPos = AllocPos - HeadersSize;
355+ if (HeaderPos > CommitBase + CommitSize)
356+ continue ;
357+ if (HeaderPos < CommitBase ||
358+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
359+ continue ;
360+ }
361+ Found = true ;
362+ const uptr Diff = HeaderPos - CommitBase;
363+ // immediately use a cached block if it's size is close enough to the
364+ // requested size.
365+ const uptr MaxAllowedFragmentedBytes =
366+ (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
367+ if (Diff <= MaxAllowedFragmentedBytes) {
397368 OptimalFitIndex = I;
398- MinDiff = Diff;
399- OptimalFitListType = ListType;
400369 EntryHeaderPos = HeaderPos;
370+ break ;
401371 }
402- CachedBlock FoundEntry;
403- if (OptimalFitIndex != CachedBlock::InvalidEntry)
404- FoundEntry = Entries[OptimalFitIndex];
405- return FoundEntry;
406- };
407-
408- // Prioritize valid fit from COMMITTED entries over
409- // optimal fit from DECOMMITTED entries
410- Entry = FindAvailableEntry (COMMITTED);
411- if (!Entry.isValid ())
412- Entry = FindAvailableEntry (DECOMMITTED);
413-
414- if (!Entry.isValid ())
415- return false ;
416-
417- remove (OptimalFitIndex, OptimalFitListType);
418- SuccessfulRetrieves++;
419- } // ScopedLock L(Mutex);
372+ // keep track of the smallest cached block
373+ // that is greater than (AllocSize + HeaderSize)
374+ if (Diff > MinDiff)
375+ continue ;
376+ OptimalFitIndex = I;
377+ MinDiff = Diff;
378+ EntryHeaderPos = HeaderPos;
379+ }
380+ if (Found) {
381+ Entry = Entries[OptimalFitIndex];
382+ remove (OptimalFitIndex);
383+ SuccessfulRetrieves++;
384+ }
385+ }
386+ if (!Found)
387+ return false ;
420388
421389 *H = reinterpret_cast <LargeBlock::Header *>(
422390 LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
@@ -480,15 +448,10 @@ template <typename Config> class MapAllocatorCache {
480448 Quarantine[I].invalidate ();
481449 }
482450 }
483- auto disableLists = [&](EntryListT EntryList) REQUIRES (Mutex) {
484- for (u32 I = EntryLists[COMMITTED].Head ; I != CachedBlock::InvalidEntry;
485- I = Entries[I].Next ) {
486- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
487- Entries[I].CommitSize , 0 );
488- }
489- };
490- disableLists (COMMITTED);
491- disableLists (DECOMMITTED);
451+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
452+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
453+ Entries[I].CommitSize , 0 );
454+ }
492455 QuarantinePos = -1U ;
493456 }
494457
@@ -503,7 +466,7 @@ template <typename Config> class MapAllocatorCache {
503466 return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
504467 }
505468
506- void insert (const CachedBlock &Entry, EntryListT ListType ) REQUIRES(Mutex) {
469+ void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
507470 DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
508471
509472 // Cache should be populated with valid entries when not empty
@@ -512,92 +475,71 @@ template <typename Config> class MapAllocatorCache {
512475 u32 FreeIndex = AvailableHead;
513476 AvailableHead = Entries[AvailableHead].Next ;
514477
478+ if (EntriesCount == 0 ) {
479+ LRUTail = static_cast <u16 >(FreeIndex);
480+ } else {
481+ // Check list order
482+ if (EntriesCount > 1 )
483+ DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
484+ Entries[LRUHead].Prev = static_cast <u16 >(FreeIndex);
485+ }
486+
515487 Entries[FreeIndex] = Entry;
516- pushFront (FreeIndex, ListType);
488+ Entries[FreeIndex].Next = LRUHead;
489+ Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
490+ LRUHead = static_cast <u16 >(FreeIndex);
517491 EntriesCount++;
518492
519- if (Entries[EntryLists[ListType].Head ].Next != CachedBlock::InvalidEntry) {
520- DCHECK_GE (Entries[EntryLists[ListType].Head ].Time ,
521- Entries[Entries[EntryLists[ListType].Head ].Next ].Time );
522- }
523493 // Availability stack should not have available entries when all entries
524494 // are in use
525495 if (EntriesCount == Config::getEntriesArraySize ())
526496 DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
527497 }
528498
529- // Joins the entries adjacent to Entries[I], effectively
530- // unlinking Entries[I] from the list
531- void unlink (uptr I, EntryListT ListType) REQUIRES(Mutex) {
532- if (I == EntryLists[ListType].Head )
533- EntryLists[ListType].Head = Entries[I].Next ;
499+ void remove (uptr I) REQUIRES(Mutex) {
500+ DCHECK (Entries[I].isValid ());
501+
502+ Entries[I].invalidate ();
503+
504+ if (I == LRUHead)
505+ LRUHead = Entries[I].Next ;
534506 else
535507 Entries[Entries[I].Prev ].Next = Entries[I].Next ;
536508
537- if (I == EntryLists[ListType]. Tail )
538- EntryLists[ListType]. Tail = Entries[I].Prev ;
509+ if (I == LRUTail )
510+ LRUTail = Entries[I].Prev ;
539511 else
540512 Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
541- }
542513
543- // Invalidates Entries[I], removes Entries[I] from list, and pushes
544- // Entries[I] onto the stack of available entries
545- void remove (uptr I, EntryListT ListType) REQUIRES(Mutex) {
546- DCHECK (Entries[I].isValid ());
547-
548- Entries[I].invalidate ();
549-
550- unlink (I, ListType);
551514 Entries[I].Next = AvailableHead;
552515 AvailableHead = static_cast <u16 >(I);
553516 EntriesCount--;
554517
555518 // Cache should not have valid entries when not empty
556519 if (EntriesCount == 0 ) {
557- DCHECK_EQ (EntryLists[COMMITTED].Head , CachedBlock::InvalidEntry);
558- DCHECK_EQ (EntryLists[COMMITTED].Tail , CachedBlock::InvalidEntry);
559- DCHECK_EQ (EntryLists[DECOMMITTED].Head , CachedBlock::InvalidEntry);
560- DCHECK_EQ (EntryLists[DECOMMITTED].Tail , CachedBlock::InvalidEntry);
520+ DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
521+ DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
561522 }
562523 }
563524
564- inline void pushFront (uptr I, EntryListT ListType) REQUIRES(Mutex) {
565- if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
566- EntryLists[ListType].Tail = static_cast <u16 >(I);
567- else
568- Entries[EntryLists[ListType].Head ].Prev = static_cast <u16 >(I);
569-
570- Entries[I].Next = EntryLists[ListType].Head ;
571- Entries[I].Prev = CachedBlock::InvalidEntry;
572- EntryLists[ListType].Head = static_cast <u16 >(I);
573- }
574-
575525 void empty () {
576526 MemMapT MapInfo[Config::getEntriesArraySize ()];
577527 uptr N = 0 ;
578528 {
579529 ScopedLock L (Mutex);
580- auto emptyList = [&](EntryListT ListType) REQUIRES (Mutex) {
581- for (uptr I = EntryLists[ListType].Head ;
582- I != CachedBlock::InvalidEntry;) {
583- uptr ToRemove = I;
584- I = Entries[I].Next ;
585- MapInfo[N] = Entries[ToRemove].MemMap ;
586- remove (ToRemove, ListType);
587- N++;
588- }
589- };
590- emptyList (COMMITTED);
591- emptyList (DECOMMITTED);
530+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
531+ if (!Entries[I].isValid ())
532+ continue ;
533+ MapInfo[N] = Entries[I].MemMap ;
534+ remove (I);
535+ N++;
536+ }
592537 EntriesCount = 0 ;
593538 }
594539 for (uptr I = 0 ; I < N; I++) {
595540 MemMapT &MemMap = MapInfo[I];
596541 MemMap.unmap (MemMap.getBase (), MemMap.getCapacity ());
597542 }
598-
599- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
600- DCHECK (!Entries[I].isValid ());
601543 }
602544
603545 void releaseIfOlderThan (CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
@@ -619,13 +561,8 @@ template <typename Config> class MapAllocatorCache {
619561 OldestTime = 0 ;
620562 for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
621563 releaseIfOlderThan (Quarantine[I], Time);
622- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
623- if (Entries[I].isValid () && Entries[I].Time && Entries[I].Time <= Time) {
624- unlink (I, COMMITTED);
625- pushFront (I, DECOMMITTED);
626- }
564+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
627565 releaseIfOlderThan (Entries[I], Time);
628- }
629566 }
630567
631568 HybridMutex Mutex;
@@ -642,12 +579,10 @@ template <typename Config> class MapAllocatorCache {
642579 NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
643580 Quarantine GUARDED_BY (Mutex) = {};
644581
645- // EntryLists stores the head and tail indices of all
646- // lists being used to store valid cache entries.
647- // Currently there are lists storing COMMITTED and DECOMMITTED entries.
648- // COMMITTED entries are those that are not madvise()'d
649- // DECOMMITTED entries are those that are madvise()'d
650- ListInfo EntryLists[2 ] GUARDED_BY(Mutex) = {};
582+ // The LRUHead of the cache is the most recently used cache entry
583+ u16 LRUHead GUARDED_BY (Mutex) = 0;
584+ // The LRUTail of the cache is the least recently used cache entry
585+ u16 LRUTail GUARDED_BY (Mutex) = 0;
651586 // The AvailableHead is the top of the stack of available entries
652587 u16 AvailableHead GUARDED_BY (Mutex) = 0;
653588};
0 commit comments