Skip to content

Commit 95ea37c

Browse files
authored
[scudo] Added LRU eviction policy to secondary cache. (#99409)
The logic for emptying the cache now follows an LRU eviction policy. When the cache is full on any given free operation, the oldest entry in the cache is evicted, and the memory associated with that cache entry is unmapped. Finding empty cache entries is now a constant operation with the use of a stack of available cache entries. Through the LRU structure, the cache retrieval algorithm now only iterates through valid entries of the cache. Furthermore, the retrieval algorithm will first search cache entries that have not been decommitted (i.e. madvise() has not been called on their corresponding memory chunks) to reduce the likelihood of returning a memory chunk to the user that would induce a page fault.
1 parent 34e67ff commit 95ea37c

File tree

1 file changed

+141
-46
lines changed

1 file changed

+141
-46
lines changed

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 141 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include "stats.h"
2020
#include "string_utils.h"
2121
#include "thread_annotations.h"
22+
#include "vector.h"
2223

2324
namespace scudo {
2425

@@ -73,12 +74,18 @@ static inline void unmap(LargeBlock::Header *H) {
7374
}
7475

7576
namespace {
77+
7678
struct CachedBlock {
79+
static constexpr u16 CacheIndexMax = UINT16_MAX;
80+
static constexpr u16 InvalidEntry = CacheIndexMax;
81+
7782
uptr CommitBase = 0;
7883
uptr CommitSize = 0;
7984
uptr BlockBegin = 0;
8085
MemMapT MemMap = {};
8186
u64 Time = 0;
87+
u16 Next = 0;
88+
u16 Prev = 0;
8289

8390
bool isValid() { return CommitBase != 0; }
8491

@@ -188,10 +195,11 @@ template <typename Config> class MapAllocatorCache {
188195
Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
189196
"(%zu.%02zu%%)\n",
190197
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
191-
for (CachedBlock Entry : Entries) {
192-
if (!Entry.isValid())
193-
continue;
194-
Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
198+
Str->append("Cache Entry Info (Most Recent -> Least Recent):\n");
199+
200+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
201+
CachedBlock &Entry = Entries[I];
202+
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
195203
"BlockSize: %zu %s\n",
196204
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
197205
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
@@ -202,6 +210,10 @@ template <typename Config> class MapAllocatorCache {
202210
static_assert(Config::getDefaultMaxEntriesCount() <=
203211
Config::getEntriesArraySize(),
204212
"");
213+
// Ensure the cache entry array size fits in the LRU list Next and Prev
214+
// index fields
215+
static_assert(Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax,
216+
"Cache entry array is too large to be indexed.");
205217

206218
void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
207219
DCHECK_EQ(EntriesCount, 0U);
@@ -213,23 +225,33 @@ template <typename Config> class MapAllocatorCache {
213225
if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
214226
ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
215227
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
228+
229+
// The cache is initially empty
230+
LRUHead = CachedBlock::InvalidEntry;
231+
LRUTail = CachedBlock::InvalidEntry;
232+
233+
// Available entries will be retrieved starting from the beginning of the
234+
// Entries array
235+
AvailableHead = 0;
236+
for (u32 I = 0; I < Config::getEntriesArraySize() - 1; I++)
237+
Entries[I].Next = static_cast<u16>(I + 1);
238+
239+
Entries[Config::getEntriesArraySize() - 1].Next = CachedBlock::InvalidEntry;
216240
}
217241

218242
void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
219243
if (!canCache(H->CommitSize))
220244
return unmap(H);
221245

222-
bool EntryCached = false;
223-
bool EmptyCache = false;
224246
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
225-
const u64 Time = getMonotonicTimeFast();
226-
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
247+
u64 Time;
227248
CachedBlock Entry;
249+
228250
Entry.CommitBase = H->CommitBase;
229251
Entry.CommitSize = H->CommitSize;
230252
Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
231253
Entry.MemMap = H->MemMap;
232-
Entry.Time = Time;
254+
Entry.Time = UINT64_MAX;
233255
if (useMemoryTagging<Config>(Options)) {
234256
if (Interval == 0 && !SCUDO_FUCHSIA) {
235257
// Release the memory and make it inaccessible at the same time by
@@ -243,17 +265,32 @@ template <typename Config> class MapAllocatorCache {
243265
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
244266
MAP_NOACCESS);
245267
}
246-
} else if (Interval == 0) {
247-
Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
248-
Entry.Time = 0;
249268
}
269+
270+
// Usually only one entry will be evicted from the cache.
271+
// Only in the rare event that the cache shrinks in real-time
272+
// due to a decrease in the configurable value MaxEntriesCount
273+
// will more than one cache entry be evicted.
274+
// The vector is used to save the MemMaps of evicted entries so
275+
// that the unmap call can be performed outside the lock
276+
Vector<MemMapT, 1U> EvictionMemMaps;
277+
250278
do {
251279
ScopedLock L(Mutex);
280+
281+
// Time must be computed under the lock to ensure
282+
// that the LRU cache remains sorted with respect to
283+
// time in a multithreaded environment
284+
Time = getMonotonicTimeFast();
285+
if (Entry.Time != 0)
286+
Entry.Time = Time;
287+
252288
if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
253289
// If we get here then memory tagging was disabled in between when we
254290
// read Options and when we locked Mutex. We can't insert our entry into
255291
// the quarantine or the cache because the permissions would be wrong so
256292
// just unmap it.
293+
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
257294
break;
258295
}
259296
if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
@@ -269,30 +306,27 @@ template <typename Config> class MapAllocatorCache {
269306
OldestTime = Entry.Time;
270307
Entry = PrevEntry;
271308
}
272-
if (EntriesCount >= MaxCount) {
273-
if (IsFullEvents++ == 4U)
274-
EmptyCache = true;
275-
} else {
276-
for (u32 I = 0; I < MaxCount; I++) {
277-
if (Entries[I].isValid())
278-
continue;
279-
if (I != 0)
280-
Entries[I] = Entries[0];
281-
Entries[0] = Entry;
282-
EntriesCount++;
283-
if (OldestTime == 0)
284-
OldestTime = Entry.Time;
285-
EntryCached = true;
286-
break;
287-
}
309+
310+
// All excess entries are evicted from the cache
311+
while (needToEvict()) {
312+
// Save MemMaps of evicted entries to perform unmap outside of lock
313+
EvictionMemMaps.push_back(Entries[LRUTail].MemMap);
314+
remove(LRUTail);
288315
}
316+
317+
insert(Entry);
318+
319+
if (OldestTime == 0)
320+
OldestTime = Entry.Time;
289321
} while (0);
290-
if (EmptyCache)
291-
empty();
292-
else if (Interval >= 0)
322+
323+
for (MemMapT &EvictMemMap : EvictionMemMaps)
324+
EvictMemMap.unmap(EvictMemMap.getBase(), EvictMemMap.getCapacity());
325+
326+
if (Interval >= 0) {
327+
// TODO: Add ReleaseToOS logic to LRU algorithm
293328
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
294-
if (!EntryCached)
295-
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
329+
}
296330
}
297331

298332
bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
@@ -312,9 +346,8 @@ template <typename Config> class MapAllocatorCache {
312346
return false;
313347
u32 OptimalFitIndex = 0;
314348
uptr MinDiff = UINTPTR_MAX;
315-
for (u32 I = 0; I < MaxCount; I++) {
316-
if (!Entries[I].isValid())
317-
continue;
349+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
350+
I = Entries[I].Next) {
318351
const uptr CommitBase = Entries[I].CommitBase;
319352
const uptr CommitSize = Entries[I].CommitSize;
320353
const uptr AllocPos =
@@ -347,8 +380,7 @@ template <typename Config> class MapAllocatorCache {
347380
}
348381
if (Found) {
349382
Entry = Entries[OptimalFitIndex];
350-
Entries[OptimalFitIndex].invalidate();
351-
EntriesCount--;
383+
remove(OptimalFitIndex);
352384
SuccessfulRetrieves++;
353385
}
354386
}
@@ -418,11 +450,9 @@ template <typename Config> class MapAllocatorCache {
418450
}
419451
}
420452
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
421-
for (u32 I = 0; I < MaxCount; I++) {
422-
if (Entries[I].isValid()) {
423-
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
424-
Entries[I].CommitSize, 0);
425-
}
453+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
454+
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
455+
Entries[I].CommitSize, 0);
426456
}
427457
QuarantinePos = -1U;
428458
}
@@ -434,6 +464,66 @@ template <typename Config> class MapAllocatorCache {
434464
void unmapTestOnly() { empty(); }
435465

436466
private:
467+
bool needToEvict() REQUIRES(Mutex) {
468+
return (EntriesCount >= atomic_load_relaxed(&MaxEntriesCount));
469+
}
470+
471+
void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
472+
DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount));
473+
474+
// Cache should be populated with valid entries when not empty
475+
DCHECK_NE(AvailableHead, CachedBlock::InvalidEntry);
476+
477+
u32 FreeIndex = AvailableHead;
478+
AvailableHead = Entries[AvailableHead].Next;
479+
480+
if (EntriesCount == 0) {
481+
LRUTail = static_cast<u16>(FreeIndex);
482+
} else {
483+
// Check list order
484+
if (EntriesCount > 1)
485+
DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
486+
Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
487+
}
488+
489+
Entries[FreeIndex] = Entry;
490+
Entries[FreeIndex].Next = LRUHead;
491+
Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
492+
LRUHead = static_cast<u16>(FreeIndex);
493+
EntriesCount++;
494+
495+
// Availability stack should not have available entries when all entries
496+
// are in use
497+
if (EntriesCount == Config::getEntriesArraySize())
498+
DCHECK_EQ(AvailableHead, CachedBlock::InvalidEntry);
499+
}
500+
501+
void remove(uptr I) REQUIRES(Mutex) {
502+
DCHECK(Entries[I].isValid());
503+
504+
Entries[I].invalidate();
505+
506+
if (I == LRUHead)
507+
LRUHead = Entries[I].Next;
508+
else
509+
Entries[Entries[I].Prev].Next = Entries[I].Next;
510+
511+
if (I == LRUTail)
512+
LRUTail = Entries[I].Prev;
513+
else
514+
Entries[Entries[I].Next].Prev = Entries[I].Prev;
515+
516+
Entries[I].Next = AvailableHead;
517+
AvailableHead = static_cast<u16>(I);
518+
EntriesCount--;
519+
520+
// Cache should not have valid entries when not empty
521+
if (EntriesCount == 0) {
522+
DCHECK_EQ(LRUHead, CachedBlock::InvalidEntry);
523+
DCHECK_EQ(LRUTail, CachedBlock::InvalidEntry);
524+
}
525+
}
526+
437527
void empty() {
438528
MemMapT MapInfo[Config::getEntriesArraySize()];
439529
uptr N = 0;
@@ -443,11 +533,10 @@ template <typename Config> class MapAllocatorCache {
443533
if (!Entries[I].isValid())
444534
continue;
445535
MapInfo[N] = Entries[I].MemMap;
446-
Entries[I].invalidate();
536+
remove(I);
447537
N++;
448538
}
449539
EntriesCount = 0;
450-
IsFullEvents = 0;
451540
}
452541
for (uptr I = 0; I < N; I++) {
453542
MemMapT &MemMap = MapInfo[I];
@@ -484,14 +573,20 @@ template <typename Config> class MapAllocatorCache {
484573
atomic_u32 MaxEntriesCount = {};
485574
atomic_uptr MaxEntrySize = {};
486575
u64 OldestTime GUARDED_BY(Mutex) = 0;
487-
u32 IsFullEvents GUARDED_BY(Mutex) = 0;
488576
atomic_s32 ReleaseToOsIntervalMs = {};
489577
u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
490578
u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
491579

492580
CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
493581
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
494582
Quarantine GUARDED_BY(Mutex) = {};
583+
584+
// The LRUHead of the cache is the most recently used cache entry
585+
u16 LRUHead GUARDED_BY(Mutex) = 0;
586+
// The LRUTail of the cache is the least recently used cache entry
587+
u16 LRUTail GUARDED_BY(Mutex) = 0;
588+
// The AvailableHead is the top of the stack of available entries
589+
u16 AvailableHead GUARDED_BY(Mutex) = 0;
495590
};
496591

497592
template <typename Config> class MapAllocator {

0 commit comments

Comments
 (0)