Skip to content

Commit 47b46e0

Browse files
committed
[scudo] Separated committed and decommitted entries.
Initially, the LRU list stored all mapped entries with no distinction between the committed (non-madvise()'d) entries and decommitted (madvise()'d) entries. Now these two types of entries separated into two lists, allowing future cache logic to branch depending on whether or not entries are committed or decommitted. Furthermore, the retrieval algorithm will prioritize committed entries over decommitted entries. Specifically, committed entries that satisfy the MaxUnusedCachePages requirement are retrieved before optimal-fit, decommitted entries.
1 parent d851b5c commit 47b46e0

File tree

1 file changed

+144
-76
lines changed

1 file changed

+144
-76
lines changed

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 144 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,14 @@ template <typename T> class NonZeroLengthArray<T, 0> {
184184
template <typename Config, void (*unmapCallBack)(MemMapT &) = unmap>
185185
class MapAllocatorCache {
186186
public:
187+
typedef enum { COMMITTED = 0, DECOMMITTED = 1, NONE } EntryListT;
188+
189+
// TODO: Refactor the intrusive list to support non-pointer link type
190+
typedef struct {
191+
u16 Head;
192+
u16 Tail;
193+
} ListInfo;
194+
187195
void getStats(ScopedString *Str) {
188196
ScopedLock L(Mutex);
189197
uptr Integral;
@@ -201,13 +209,18 @@ class MapAllocatorCache {
201209
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
202210
Str->append("Cache Entry Info (Most Recent -> Least Recent):\n");
203211

204-
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
205-
CachedBlock &Entry = Entries[I];
206-
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
207-
"BlockSize: %zu %s\n",
208-
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
209-
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
210-
}
212+
auto printList = [&](EntryListT ListType) REQUIRES(Mutex) {
213+
for (u32 I = EntryLists[ListType].Head; I != CachedBlock::InvalidEntry;
214+
I = Entries[I].Next) {
215+
CachedBlock &Entry = Entries[I];
216+
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
217+
"BlockSize: %zu %s\n",
218+
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
219+
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
220+
}
221+
};
222+
printList(COMMITTED);
223+
printList(DECOMMITTED);
211224
}
212225

213226
// Ensure the default maximum specified fits the array.
@@ -231,8 +244,10 @@ class MapAllocatorCache {
231244
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
232245

233246
// The cache is initially empty
234-
LRUHead = CachedBlock::InvalidEntry;
235-
LRUTail = CachedBlock::InvalidEntry;
247+
EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
248+
EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
249+
EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
250+
EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
236251

237252
// Available entries will be retrieved starting from the beginning of the
238253
// Entries array
@@ -250,7 +265,6 @@ class MapAllocatorCache {
250265
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
251266
u64 Time;
252267
CachedBlock Entry;
253-
254268
Entry.CommitBase = CommitBase;
255269
Entry.CommitSize = CommitSize;
256270
Entry.BlockBegin = BlockBegin;
@@ -312,18 +326,27 @@ class MapAllocatorCache {
312326
Entry = PrevEntry;
313327
}
314328

315-
// All excess entries are evicted from the cache
329+
// All excess entries are evicted from the cache.
330+
// DECOMMITTED entries, being older than the COMMITTED
331+
// entries, are evicted first in least recently used (LRU)
332+
// fashioned followed by the COMMITTED entries
316333
while (needToEvict()) {
334+
EntryListT EvictionListType;
335+
if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
336+
EvictionListType = COMMITTED;
337+
else
338+
EvictionListType = DECOMMITTED;
317339
// Save MemMaps of evicted entries to perform unmap outside of lock
318-
EvictionMemMaps.push_back(Entries[LRUTail].MemMap);
319-
remove(LRUTail);
340+
EvictionMemMaps.push_back(
341+
Entries[EntryLists[EvictionListType].Tail].MemMap);
342+
remove(EntryLists[EvictionListType].Tail, EvictionListType);
320343
}
321344

322-
insert(Entry);
345+
insert(Entry, (Entry.Time == 0) ? DECOMMITTED : COMMITTED);
323346

324347
if (OldestTime == 0)
325348
OldestTime = Entry.Time;
326-
} while (0);
349+
} while (0); // ScopedLock L(Mutex);
327350

328351
for (MemMapT &EvictMemMap : EvictionMemMaps)
329352
unmapCallBack(EvictMemMap);
@@ -340,17 +363,14 @@ class MapAllocatorCache {
340363
// 10% of the requested size proved to be the optimal choice for
341364
// retrieving cached blocks after testing several options.
342365
constexpr u32 FragmentedBytesDivisor = 10;
343-
bool Found = false;
344366
CachedBlock Entry;
367+
uptr OptimalFitIndex = CachedBlock::InvalidEntry;
368+
uptr MinDiff = UINTPTR_MAX;
369+
EntryListT OptimalFitListType = NONE;
345370
EntryHeaderPos = 0;
346-
{
347-
ScopedLock L(Mutex);
348-
CallsToRetrieve++;
349-
if (EntriesCount == 0)
350-
return {};
351-
u32 OptimalFitIndex = 0;
352-
uptr MinDiff = UINTPTR_MAX;
353-
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
371+
372+
auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES(Mutex) {
373+
for (uptr I = EntryLists[ListType].Head; I != CachedBlock::InvalidEntry;
354374
I = Entries[I].Next) {
355375
const uptr CommitBase = Entries[I].CommitBase;
356376
const uptr CommitSize = Entries[I].CommitSize;
@@ -360,34 +380,48 @@ class MapAllocatorCache {
360380
if (HeaderPos > CommitBase + CommitSize)
361381
continue;
362382
if (HeaderPos < CommitBase ||
363-
AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
383+
AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
364384
continue;
365-
}
366-
Found = true;
385+
367386
const uptr Diff = HeaderPos - CommitBase;
368-
// immediately use a cached block if it's size is close enough to the
369-
// requested size.
387+
// immediately use a cached block if it's size is close enough to
388+
// the requested size.
370389
const uptr MaxAllowedFragmentedBytes =
371390
(CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
372391
if (Diff <= MaxAllowedFragmentedBytes) {
373392
OptimalFitIndex = I;
374393
EntryHeaderPos = HeaderPos;
375-
break;
394+
OptimalFitListType = ListType;
395+
return true;
376396
}
397+
377398
// keep track of the smallest cached block
378399
// that is greater than (AllocSize + HeaderSize)
379400
if (Diff > MinDiff)
380401
continue;
381402
OptimalFitIndex = I;
382403
MinDiff = Diff;
404+
OptimalFitListType = ListType;
383405
EntryHeaderPos = HeaderPos;
384406
}
385-
if (Found) {
386-
Entry = Entries[OptimalFitIndex];
387-
remove(OptimalFitIndex);
388-
SuccessfulRetrieves++;
389-
}
390-
}
407+
return (OptimalFitIndex != CachedBlock::InvalidEntry);
408+
};
409+
410+
{
411+
ScopedLock L(Mutex);
412+
CallsToRetrieve++;
413+
if (EntriesCount == 0)
414+
return {};
415+
416+
// Prioritize valid fit from COMMITTED entries over
417+
// optimal fit from DECOMMITTED entries
418+
if (!FindAvailableEntry(COMMITTED) && !FindAvailableEntry(DECOMMITTED))
419+
return {};
420+
421+
Entry = Entries[OptimalFitIndex];
422+
remove(OptimalFitIndex, OptimalFitListType);
423+
SuccessfulRetrieves++;
424+
} // ScopedLock L(Mutex);
391425

392426
return Entry;
393427
}
@@ -432,10 +466,15 @@ class MapAllocatorCache {
432466
Quarantine[I].invalidate();
433467
}
434468
}
435-
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
436-
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
437-
Entries[I].CommitSize, 0);
438-
}
469+
auto disableLists = [&](EntryListT EntryList) REQUIRES(Mutex) {
470+
for (u32 I = EntryLists[EntryList].Head; I != CachedBlock::InvalidEntry;
471+
I = Entries[I].Next) {
472+
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
473+
Entries[I].CommitSize, 0);
474+
}
475+
};
476+
disableLists(COMMITTED);
477+
disableLists(DECOMMITTED);
439478
QuarantinePos = -1U;
440479
}
441480

@@ -450,7 +489,7 @@ class MapAllocatorCache {
450489
return (EntriesCount >= atomic_load_relaxed(&MaxEntriesCount));
451490
}
452491

453-
void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
492+
void insert(const CachedBlock &Entry, EntryListT ListType) REQUIRES(Mutex) {
454493
DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount));
455494

456495
// Cache should be populated with valid entries when not empty
@@ -459,66 +498,86 @@ class MapAllocatorCache {
459498
u32 FreeIndex = AvailableHead;
460499
AvailableHead = Entries[AvailableHead].Next;
461500

462-
if (EntriesCount == 0) {
463-
LRUTail = static_cast<u16>(FreeIndex);
464-
} else {
465-
// Check list order
466-
if (EntriesCount > 1)
467-
DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
468-
Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
469-
}
470-
471501
Entries[FreeIndex] = Entry;
472-
Entries[FreeIndex].Next = LRUHead;
473-
Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
474-
LRUHead = static_cast<u16>(FreeIndex);
502+
pushFront(FreeIndex, ListType);
475503
EntriesCount++;
476504

505+
if (Entries[EntryLists[ListType].Head].Next != CachedBlock::InvalidEntry) {
506+
DCHECK_GE(Entries[EntryLists[ListType].Head].Time,
507+
Entries[Entries[EntryLists[ListType].Head].Next].Time);
508+
}
477509
// Availability stack should not have available entries when all entries
478510
// are in use
479511
if (EntriesCount == Config::getEntriesArraySize())
480512
DCHECK_EQ(AvailableHead, CachedBlock::InvalidEntry);
481513
}
482514

483-
void remove(uptr I) REQUIRES(Mutex) {
484-
DCHECK(Entries[I].isValid());
485-
486-
Entries[I].invalidate();
487-
488-
if (I == LRUHead)
489-
LRUHead = Entries[I].Next;
515+
// Joins the entries adjacent to Entries[I], effectively
516+
// unlinking Entries[I] from the list
517+
void unlink(uptr I, EntryListT ListType) REQUIRES(Mutex) {
518+
if (I == EntryLists[ListType].Head)
519+
EntryLists[ListType].Head = Entries[I].Next;
490520
else
491521
Entries[Entries[I].Prev].Next = Entries[I].Next;
492522

493-
if (I == LRUTail)
494-
LRUTail = Entries[I].Prev;
523+
if (I == EntryLists[ListType].Tail)
524+
EntryLists[ListType].Tail = Entries[I].Prev;
495525
else
496526
Entries[Entries[I].Next].Prev = Entries[I].Prev;
527+
}
528+
529+
// Invalidates Entries[I], removes Entries[I] from list, and pushes
530+
// Entries[I] onto the stack of available entries
531+
void remove(uptr I, EntryListT ListType) REQUIRES(Mutex) {
532+
DCHECK(Entries[I].isValid());
533+
534+
Entries[I].invalidate();
497535

536+
unlink(I, ListType);
498537
Entries[I].Next = AvailableHead;
499538
AvailableHead = static_cast<u16>(I);
500539
EntriesCount--;
501540

502541
// Cache should not have valid entries when not empty
503542
if (EntriesCount == 0) {
504-
DCHECK_EQ(LRUHead, CachedBlock::InvalidEntry);
505-
DCHECK_EQ(LRUTail, CachedBlock::InvalidEntry);
543+
DCHECK_EQ(EntryLists[COMMITTED].Head, CachedBlock::InvalidEntry);
544+
DCHECK_EQ(EntryLists[COMMITTED].Tail, CachedBlock::InvalidEntry);
545+
DCHECK_EQ(EntryLists[DECOMMITTED].Head, CachedBlock::InvalidEntry);
546+
DCHECK_EQ(EntryLists[DECOMMITTED].Tail, CachedBlock::InvalidEntry);
506547
}
507548
}
508549

550+
inline void pushFront(uptr I, EntryListT ListType) REQUIRES(Mutex) {
551+
if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
552+
EntryLists[ListType].Tail = static_cast<u16>(I);
553+
else
554+
Entries[EntryLists[ListType].Head].Prev = static_cast<u16>(I);
555+
556+
Entries[I].Next = EntryLists[ListType].Head;
557+
Entries[I].Prev = CachedBlock::InvalidEntry;
558+
EntryLists[ListType].Head = static_cast<u16>(I);
559+
}
560+
509561
void empty() {
510562
MemMapT MapInfo[Config::getEntriesArraySize()];
511563
uptr N = 0;
512564
{
513565
ScopedLock L(Mutex);
514-
for (uptr I = 0; I < Config::getEntriesArraySize(); I++) {
515-
if (!Entries[I].isValid())
516-
continue;
517-
MapInfo[N] = Entries[I].MemMap;
518-
remove(I);
519-
N++;
520-
}
566+
auto emptyList = [&](EntryListT ListType) REQUIRES(Mutex) {
567+
for (uptr I = EntryLists[ListType].Head;
568+
I != CachedBlock::InvalidEntry;) {
569+
uptr ToRemove = I;
570+
I = Entries[I].Next;
571+
MapInfo[N] = Entries[ToRemove].MemMap;
572+
remove(ToRemove, ListType);
573+
N++;
574+
}
575+
};
576+
emptyList(COMMITTED);
577+
emptyList(DECOMMITTED);
521578
EntriesCount = 0;
579+
for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
580+
DCHECK(!Entries[I].isValid());
522581
}
523582
for (uptr I = 0; I < N; I++) {
524583
MemMapT &MemMap = MapInfo[I];
@@ -545,8 +604,14 @@ class MapAllocatorCache {
545604
OldestTime = 0;
546605
for (uptr I = 0; I < Config::getQuarantineSize(); I++)
547606
releaseIfOlderThan(Quarantine[I], Time);
548-
for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
607+
for (u16 I = EntryLists[COMMITTED].Head; I != CachedBlock::InvalidEntry;
608+
I = Entries[I].Next) {
609+
if (Entries[I].Time && Entries[I].Time <= Time) {
610+
unlink(I, COMMITTED);
611+
pushFront(I, DECOMMITTED);
612+
}
549613
releaseIfOlderThan(Entries[I], Time);
614+
}
550615
}
551616

552617
HybridMutex Mutex;
@@ -563,10 +628,12 @@ class MapAllocatorCache {
563628
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
564629
Quarantine GUARDED_BY(Mutex) = {};
565630

566-
// The LRUHead of the cache is the most recently used cache entry
567-
u16 LRUHead GUARDED_BY(Mutex) = 0;
568-
// The LRUTail of the cache is the least recently used cache entry
569-
u16 LRUTail GUARDED_BY(Mutex) = 0;
631+
// EntryLists stores the head and tail indices of all
632+
// lists being used to store valid cache entries.
633+
// Currently there are lists storing COMMITTED and DECOMMITTED entries.
634+
// COMMITTED entries have memory chunks that have not been released to the OS
635+
// DECOMMITTED entries have memory chunks that have been released to the OS
636+
ListInfo EntryLists[2] GUARDED_BY(Mutex) = {};
570637
// The AvailableHead is the top of the stack of available entries
571638
u16 AvailableHead GUARDED_BY(Mutex) = 0;
572639
};
@@ -706,6 +773,7 @@ MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
706773
}
707774
return Ptr;
708775
}
776+
709777
// As with the Primary, the size passed to this function includes any desired
710778
// alignment, so that the frontend can align the user allocation. The hint
711779
// parameter allows us to unmap spurious memory when dealing with larger

0 commit comments

Comments
 (0)