@@ -180,6 +180,14 @@ template <typename T> class NonZeroLengthArray<T, 0> {
180180
181181template <typename Config> class MapAllocatorCache {
182182public:
183+ typedef enum { COMMITTED = 0 , DECOMMITTED = 1 , NONE } EntryListT;
184+
185+ // TODO: Refactor the intrusive list to support non-pointer link type
186+ typedef struct {
187+ u16 Head;
188+ u16 Tail;
189+ } ListInfo;
190+
183191 void getStats (ScopedString *Str) {
184192 ScopedLock L (Mutex);
185193 uptr Integral;
@@ -197,13 +205,18 @@ template <typename Config> class MapAllocatorCache {
197205 SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
198206 Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
199207
200- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
201- CachedBlock &Entry = Entries[I];
202- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
203- " BlockSize: %zu %s\n " ,
204- Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
205- Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
206- }
208+ auto printList = [&](EntryListT ListType) REQUIRES (Mutex) {
209+ for (u32 I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
210+ I = Entries[I].Next ) {
211+ CachedBlock &Entry = Entries[I];
212+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
213+ " BlockSize: %zu %s\n " ,
214+ Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
215+ Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
216+ }
217+ };
218+ printList (COMMITTED);
219+ printList (DECOMMITTED);
207220 }
208221
209222 // Ensure the default maximum specified fits the array.
@@ -227,8 +240,10 @@ template <typename Config> class MapAllocatorCache {
227240 setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
228241
229242 // The cache is initially empty
230- LRUHead = CachedBlock::InvalidEntry;
231- LRUTail = CachedBlock::InvalidEntry;
243+ EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
244+ EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
245+ EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
246+ EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
232247
233248 // Available entries will be retrieved starting from the beginning of the
234249 // Entries array
@@ -310,15 +325,19 @@ template <typename Config> class MapAllocatorCache {
310325 // All excess entries are evicted from the cache
311326 while (needToEvict ()) {
312327 // Save MemMaps of evicted entries to perform unmap outside of lock
313- EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
314- remove (LRUTail);
328+ EntryListT EvictionListType;
329+ if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
330+ EvictionListType = COMMITTED;
331+ else
332+ EvictionListType = DECOMMITTED;
333+ remove (EntryLists[EvictionListType].Tail , EvictionListType);
315334 }
316335
317- insert (Entry);
336+ insert (Entry, (Entry. Time == 0 ) ? DECOMMITTED : COMMITTED );
318337
319338 if (OldestTime == 0 )
320339 OldestTime = Entry.Time ;
321- } while (0 );
340+ } while (0 ); // ScopedLock L(Mutex);
322341
323342 for (MemMapT &EvictMemMap : EvictionMemMaps)
324343 EvictMemMap.unmap (EvictMemMap.getBase (), EvictMemMap.getCapacity ());
@@ -335,56 +354,69 @@ template <typename Config> class MapAllocatorCache {
335354 // 10% of the requested size proved to be the optimal choice for
336355 // retrieving cached blocks after testing several options.
337356 constexpr u32 FragmentedBytesDivisor = 10 ;
338- bool Found = false ;
339357 CachedBlock Entry;
340358 uptr EntryHeaderPos = 0 ;
359+ uptr OptimalFitIndex = CachedBlock::InvalidEntry;
341360 {
342361 ScopedLock L (Mutex);
343362 CallsToRetrieve++;
344363 if (EntriesCount == 0 )
345364 return false ;
346- u32 OptimalFitIndex = 0 ;
347365 uptr MinDiff = UINTPTR_MAX;
348- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
349- I = Entries[I].Next ) {
350- const uptr CommitBase = Entries[I].CommitBase ;
351- const uptr CommitSize = Entries[I].CommitSize ;
352- const uptr AllocPos =
353- roundDown (CommitBase + CommitSize - Size, Alignment);
354- const uptr HeaderPos = AllocPos - HeadersSize;
355- if (HeaderPos > CommitBase + CommitSize)
356- continue ;
357- if (HeaderPos < CommitBase ||
358- AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
359- continue ;
360- }
361- Found = true ;
362- const uptr Diff = HeaderPos - CommitBase;
363- // immediately use a cached block if it's size is close enough to the
364- // requested size.
365- const uptr MaxAllowedFragmentedBytes =
366- (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
367- if (Diff <= MaxAllowedFragmentedBytes) {
366+ EntryListT OptimalFitListType = NONE;
367+ auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES (Mutex) {
368+ for (uptr I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
369+ I = Entries[I].Next ) {
370+ const uptr CommitBase = Entries[I].CommitBase ;
371+ const uptr CommitSize = Entries[I].CommitSize ;
372+ const uptr AllocPos =
373+ roundDown (CommitBase + CommitSize - Size, Alignment);
374+ const uptr HeaderPos = AllocPos - HeadersSize;
375+ if (HeaderPos > CommitBase + CommitSize)
376+ continue ;
377+ if (HeaderPos < CommitBase ||
378+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
379+ continue ;
380+
381+ const uptr Diff = HeaderPos - CommitBase;
382+ // immediately use a cached block if it's size is close enough to
383+ // the requested size.
384+ const uptr MaxAllowedFragmentedBytes =
385+ (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
386+ if (Diff <= MaxAllowedFragmentedBytes) {
387+ OptimalFitIndex = I;
388+ EntryHeaderPos = HeaderPos;
389+ OptimalFitListType = ListType;
390+ return Entries[OptimalFitIndex];
391+ }
392+
393+ // keep track of the smallest cached block
394+ // that is greater than (AllocSize + HeaderSize)
395+ if (Diff > MinDiff)
396+ continue ;
368397 OptimalFitIndex = I;
398+ MinDiff = Diff;
399+ OptimalFitListType = ListType;
369400 EntryHeaderPos = HeaderPos;
370- break ;
371401 }
372- // keep track of the smallest cached block
373- // that is greater than (AllocSize + HeaderSize)
374- if (Diff > MinDiff)
375- continue ;
376- OptimalFitIndex = I;
377- MinDiff = Diff;
378- EntryHeaderPos = HeaderPos;
379- }
380- if (Found) {
381- Entry = Entries[OptimalFitIndex];
382- remove (OptimalFitIndex);
383- SuccessfulRetrieves++;
384- }
385- }
386- if (!Found)
387- return false ;
402+ CachedBlock FoundEntry;
403+ if (OptimalFitIndex != CachedBlock::InvalidEntry)
404+ FoundEntry = Entries[OptimalFitIndex];
405+ return FoundEntry;
406+ };
407+
408+ // Prioritize valid fit from COMMITTED entries over
409+ // optimal fit from DECOMMITTED entries
410+ Entry = FindAvailableEntry (COMMITTED);
411+ if (!Entry.isValid ())
412+ Entry = FindAvailableEntry (DECOMMITTED);
413+
414+ if (!Entry.isValid ())
415+ return false ;
416+
417+ remove (OptimalFitIndex, OptimalFitListType);
418+ SuccessfulRetrieves++;
419+ } // ScopedLock L(Mutex);
388420
389421 *H = reinterpret_cast <LargeBlock::Header *>(
390422 LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
@@ -448,10 +480,15 @@ template <typename Config> class MapAllocatorCache {
448480 Quarantine[I].invalidate ();
449481 }
450482 }
451- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
452- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
453- Entries[I].CommitSize , 0 );
454- }
483+ auto disableLists = [&](EntryListT EntryList) REQUIRES (Mutex) {
484+ for (u32 I = EntryLists[COMMITTED].Head ; I != CachedBlock::InvalidEntry;
485+ I = Entries[I].Next ) {
486+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
487+ Entries[I].CommitSize , 0 );
488+ }
489+ };
490+ disableLists (COMMITTED);
491+ disableLists (DECOMMITTED);
455492 QuarantinePos = -1U ;
456493 }
457494
@@ -466,7 +503,7 @@ template <typename Config> class MapAllocatorCache {
466503 return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
467504 }
468505
469- void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
506+ void insert (const CachedBlock &Entry, EntryListT ListType ) REQUIRES(Mutex) {
470507 DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
471508
472509 // Cache should be populated with valid entries when not empty
@@ -475,71 +512,92 @@ template <typename Config> class MapAllocatorCache {
475512 u32 FreeIndex = AvailableHead;
476513 AvailableHead = Entries[AvailableHead].Next ;
477514
478- if (EntriesCount == 0 ) {
479- LRUTail = static_cast <u16 >(FreeIndex);
480- } else {
481- // Check list order
482- if (EntriesCount > 1 )
483- DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
484- Entries[LRUHead].Prev = static_cast <u16 >(FreeIndex);
485- }
486-
487515 Entries[FreeIndex] = Entry;
488- Entries[FreeIndex].Next = LRUHead;
489- Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
490- LRUHead = static_cast <u16 >(FreeIndex);
516+ pushFront (FreeIndex, ListType);
491517 EntriesCount++;
492518
519+ if (Entries[EntryLists[ListType].Head ].Next != CachedBlock::InvalidEntry) {
520+ DCHECK_GE (Entries[EntryLists[ListType].Head ].Time ,
521+ Entries[Entries[EntryLists[ListType].Head ].Next ].Time );
522+ }
493523 // Availability stack should not have available entries when all entries
494524 // are in use
495525 if (EntriesCount == Config::getEntriesArraySize ())
496526 DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
497527 }
498528
499- void remove (uptr I) REQUIRES(Mutex) {
500- DCHECK (Entries[I].isValid ());
501-
502- Entries[I].invalidate ();
503-
504- if (I == LRUHead)
505- LRUHead = Entries[I].Next ;
529+ // Joins the entries adjacent to Entries[I], effectively
530+ // unlinking Entries[I] from the list
531+ void unlink (uptr I, EntryListT ListType) REQUIRES(Mutex) {
532+ if (I == EntryLists[ListType].Head )
533+ EntryLists[ListType].Head = Entries[I].Next ;
506534 else
507535 Entries[Entries[I].Prev ].Next = Entries[I].Next ;
508536
509- if (I == LRUTail )
510- LRUTail = Entries[I].Prev ;
537+ if (I == EntryLists[ListType]. Tail )
538+ EntryLists[ListType]. Tail = Entries[I].Prev ;
511539 else
512540 Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
541+ }
513542
543+ // Invalidates Entries[I], removes Entries[I] from list, and pushes
544+ // Entries[I] onto the stack of available entries
545+ void remove (uptr I, EntryListT ListType) REQUIRES(Mutex) {
546+ DCHECK (Entries[I].isValid ());
547+
548+ Entries[I].invalidate ();
549+
550+ unlink (I, ListType);
514551 Entries[I].Next = AvailableHead;
515552 AvailableHead = static_cast <u16 >(I);
516553 EntriesCount--;
517554
518555 // Cache should not have valid entries when not empty
519556 if (EntriesCount == 0 ) {
520- DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
521- DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
557+ DCHECK_EQ (EntryLists[COMMITTED].Head , CachedBlock::InvalidEntry);
558+ DCHECK_EQ (EntryLists[COMMITTED].Tail , CachedBlock::InvalidEntry);
559+ DCHECK_EQ (EntryLists[DECOMMITTED].Head , CachedBlock::InvalidEntry);
560+ DCHECK_EQ (EntryLists[DECOMMITTED].Tail , CachedBlock::InvalidEntry);
522561 }
523562 }
524563
564+ inline void pushFront (uptr I, EntryListT ListType) REQUIRES(Mutex) {
565+ if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
566+ EntryLists[ListType].Tail = static_cast <u16 >(I);
567+ else
568+ Entries[EntryLists[ListType].Head ].Prev = static_cast <u16 >(I);
569+
570+ Entries[I].Next = EntryLists[ListType].Head ;
571+ Entries[I].Prev = CachedBlock::InvalidEntry;
572+ EntryLists[ListType].Head = static_cast <u16 >(I);
573+ }
574+
525575 void empty () {
526576 MemMapT MapInfo[Config::getEntriesArraySize ()];
527577 uptr N = 0 ;
528578 {
529579 ScopedLock L (Mutex);
530- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
531- if (!Entries[I].isValid ())
532- continue ;
533- MapInfo[N] = Entries[I].MemMap ;
534- remove (I);
535- N++;
536- }
580+ auto emptyList = [&](EntryListT ListType) REQUIRES (Mutex) {
581+ for (uptr I = EntryLists[ListType].Head ;
582+ I != CachedBlock::InvalidEntry;) {
583+ uptr ToRemove = I;
584+ I = Entries[I].Next ;
585+ MapInfo[N] = Entries[ToRemove].MemMap ;
586+ remove (ToRemove, ListType);
587+ N++;
588+ }
589+ };
590+ emptyList (COMMITTED);
591+ emptyList (DECOMMITTED);
537592 EntriesCount = 0 ;
538593 }
539594 for (uptr I = 0 ; I < N; I++) {
540595 MemMapT &MemMap = MapInfo[I];
541596 MemMap.unmap (MemMap.getBase (), MemMap.getCapacity ());
542597 }
598+
599+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
600+ DCHECK (!Entries[I].isValid ());
543601 }
544602
545603 void releaseIfOlderThan (CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
@@ -561,8 +619,13 @@ template <typename Config> class MapAllocatorCache {
561619 OldestTime = 0 ;
562620 for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
563621 releaseIfOlderThan (Quarantine[I], Time);
564- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
622+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
623+ if (Entries[I].isValid () && Entries[I].Time && Entries[I].Time <= Time) {
624+ unlink (I, COMMITTED);
625+ pushFront (I, DECOMMITTED);
626+ }
565627 releaseIfOlderThan (Entries[I], Time);
628+ }
566629 }
567630
568631 HybridMutex Mutex;
@@ -579,10 +642,12 @@ template <typename Config> class MapAllocatorCache {
579642 NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
580643 Quarantine GUARDED_BY (Mutex) = {};
581644
582- // The LRUHead of the cache is the most recently used cache entry
583- u16 LRUHead GUARDED_BY (Mutex) = 0;
584- // The LRUTail of the cache is the least recently used cache entry
585- u16 LRUTail GUARDED_BY (Mutex) = 0;
645+ // EntryLists stores the head and tail indices of all
646+ // lists being used to store valid cache entries.
647+ // Currently there are lists storing COMMITTED and DECOMMITTED entries.
648+ // COMMITTED entries are those that are not madvise()'d
649+ // DECOMMITTED entries are those that are madvise()'d
650+ ListInfo EntryLists[2 ] GUARDED_BY(Mutex) = {};
586651 // The AvailableHead is the top of the stack of available entries
587652 u16 AvailableHead GUARDED_BY (Mutex) = 0;
588653};
0 commit comments