@@ -247,6 +247,7 @@ class MapAllocatorCache {
247
247
// The cache is initially empty
248
248
LRUHead = CachedBlock::InvalidEntry;
249
249
LRUTail = CachedBlock::InvalidEntry;
250
+ LastUnreleasedEntry = CachedBlock::InvalidEntry;
250
251
251
252
// Available entries will be retrieved starting from the beginning of the
252
253
// Entries array
@@ -321,9 +322,11 @@ class MapAllocatorCache {
321
322
}
322
323
CachedBlock PrevEntry = Quarantine[QuarantinePos];
323
324
Quarantine[QuarantinePos] = Entry;
324
- if (OldestTime == 0 )
325
- OldestTime = Entry.Time ;
326
325
Entry = PrevEntry;
326
+ // Update the entry time to reflect the time that the
327
+ // quarantined memory is placed in the Entries array
328
+ if (Entry.Time != 0 )
329
+ Entry.Time = Time;
327
330
}
328
331
329
332
// All excess entries are evicted from the cache
@@ -334,16 +337,12 @@ class MapAllocatorCache {
334
337
}
335
338
336
339
insert (Entry);
337
-
338
- if (OldestTime == 0 )
339
- OldestTime = Entry.Time ;
340
340
} while (0 );
341
341
342
342
for (MemMapT &EvictMemMap : EvictionMemMaps)
343
343
unmapCallBack (EvictMemMap);
344
344
345
345
if (Interval >= 0 ) {
346
- // TODO: Add ReleaseToOS logic to LRU algorithm
347
346
releaseOlderThan (Time - static_cast <u64>(Interval) * 1000000 );
348
347
}
349
348
}
@@ -523,22 +522,37 @@ class MapAllocatorCache {
523
522
// Cache should be populated with valid entries when not empty
524
523
DCHECK_NE (AvailableHead, CachedBlock::InvalidEntry);
525
524
526
- u32 FreeIndex = AvailableHead;
525
+ u16 FreeIndex = AvailableHead;
527
526
AvailableHead = Entries[AvailableHead].Next ;
527
+ Entries[FreeIndex] = Entry;
528
528
529
- if (EntriesCount == 0 ) {
530
- LRUTail = static_cast <u16>(FreeIndex);
529
+ // Check list order
530
+ if (EntriesCount > 1 )
531
+ DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
532
+
533
+ // Released entry goes after LastUnreleasedEntry rather than at LRUHead
534
+ if (Entry.Time == 0 && LastUnreleasedEntry != CachedBlock::InvalidEntry) {
535
+ Entries[FreeIndex].Next = Entries[LastUnreleasedEntry].Next ;
536
+ Entries[FreeIndex].Prev = LastUnreleasedEntry;
537
+ Entries[LastUnreleasedEntry].Next = FreeIndex;
538
+ if (LRUTail == LastUnreleasedEntry) {
539
+ LRUTail = FreeIndex;
540
+ } else {
541
+ Entries[Entries[FreeIndex].Next ].Prev = FreeIndex;
542
+ }
531
543
} else {
532
- // Check list order
533
- if (EntriesCount > 1 )
534
- DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
535
- Entries[LRUHead].Prev = static_cast <u16>(FreeIndex);
544
+ Entries[FreeIndex].Next = LRUHead;
545
+ Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
546
+ if (EntriesCount == 0 ) {
547
+ LRUTail = FreeIndex;
548
+ } else {
549
+ Entries[LRUHead].Prev = FreeIndex;
550
+ }
551
+ LRUHead = FreeIndex;
552
+ if (LastUnreleasedEntry == CachedBlock::InvalidEntry)
553
+ LastUnreleasedEntry = FreeIndex;
536
554
}
537
555
538
- Entries[FreeIndex] = Entry;
539
- Entries[FreeIndex].Next = LRUHead;
540
- Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
541
- LRUHead = static_cast <u16>(FreeIndex);
542
556
EntriesCount++;
543
557
544
558
// Availability stack should not have available entries when all entries
@@ -552,6 +566,9 @@ class MapAllocatorCache {
552
566
553
567
Entries[I].invalidate ();
554
568
569
+ if (I == LastUnreleasedEntry)
570
+ LastUnreleasedEntry = Entries[LastUnreleasedEntry].Prev ;
571
+
555
572
if (I == LRUHead)
556
573
LRUHead = Entries[I].Next ;
557
574
else
@@ -593,35 +610,39 @@ class MapAllocatorCache {
593
610
}
594
611
}
595
612
596
- void releaseIfOlderThan (CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
597
- if (!Entry.isValid () || !Entry.Time )
598
- return ;
599
- if (Entry.Time > Time) {
600
- if (OldestTime == 0 || Entry.Time < OldestTime)
601
- OldestTime = Entry.Time ;
602
- return ;
603
- }
613
+ inline void release (CachedBlock &Entry) {
614
+ DCHECK (Entry.Time != 0 );
604
615
Entry.MemMap .releaseAndZeroPagesToOS (Entry.CommitBase , Entry.CommitSize );
605
616
Entry.Time = 0 ;
606
617
}
607
618
608
619
void releaseOlderThan (u64 Time) EXCLUDES(Mutex) {
609
620
ScopedLock L (Mutex);
610
- if (!EntriesCount || OldestTime == 0 || OldestTime > Time )
621
+ if (!EntriesCount)
611
622
return ;
612
- OldestTime = 0 ;
613
- for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
614
- releaseIfOlderThan (Quarantine[I], Time);
615
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
616
- releaseIfOlderThan (Entries[I], Time);
617
- }
618
623
624
+ // TODO: Add conditional to skip iteration over quarantine
625
+ // if quarantine is disabled
626
+ for (uptr I = 0 ; I < Config::getQuarantineSize (); I++) {
627
+ CachedBlock &ReleaseEntry = Quarantine[I];
628
+ if (!ReleaseEntry.isValid () || !ReleaseEntry.Time ||
629
+ ReleaseEntry.Time > Time)
630
+ continue ;
631
+ release (ReleaseEntry);
632
+ }
633
+
634
+ // Release oldest entries first by releasing from decommit base
635
+ while (LastUnreleasedEntry != CachedBlock::InvalidEntry &&
636
+ Entries[LastUnreleasedEntry].Time <= Time) {
637
+ release (Entries[LastUnreleasedEntry]);
638
+ LastUnreleasedEntry = Entries[LastUnreleasedEntry].Prev ;
639
+ }
640
+ }
619
641
HybridMutex Mutex;
620
642
u32 EntriesCount GUARDED_BY (Mutex) = 0;
621
643
u32 QuarantinePos GUARDED_BY (Mutex) = 0;
622
644
atomic_u32 MaxEntriesCount = {};
623
645
atomic_uptr MaxEntrySize = {};
624
- u64 OldestTime GUARDED_BY (Mutex) = 0;
625
646
atomic_s32 ReleaseToOsIntervalMs = {};
626
647
u32 CallsToRetrieve GUARDED_BY (Mutex) = 0;
627
648
u32 SuccessfulRetrieves GUARDED_BY (Mutex) = 0;
@@ -636,6 +657,9 @@ class MapAllocatorCache {
636
657
u16 LRUTail GUARDED_BY (Mutex) = 0;
637
658
// The AvailableHead is the top of the stack of available entries
638
659
u16 AvailableHead GUARDED_BY (Mutex) = 0;
660
+ // The LastUnreleasedEntry is the least recently used entry that has not
661
+ // been released
662
+ u16 LastUnreleasedEntry GUARDED_BY (Mutex) = 0;
639
663
};
640
664
641
665
template <typename Config> class MapAllocator {
0 commit comments