Skip to content

Commit 81cfb34

Browse files
thurstondcopybara-github
authored andcommitted
[NFCI][scudo] Remove unused variable 'MaxCount' (#100201)
Fixes "error: unused variable 'MaxCount' [-Werror,-Wunused-variable]", which is no longer used after llvm/llvm-project#99409 GitOrigin-RevId: ce811fb6d94e1d4af1fd1f52fbf109bc34834970 Change-Id: I8b7cafb6d7be6f79a94e233fcefa132623dbb421
1 parent 8a6eaa2 commit 81cfb34

File tree

1 file changed

+141
-48
lines changed

1 file changed

+141
-48
lines changed

standalone/secondary.h

Lines changed: 141 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include "stats.h"
2020
#include "string_utils.h"
2121
#include "thread_annotations.h"
22+
#include "vector.h"
2223

2324
namespace scudo {
2425

@@ -73,12 +74,18 @@ static inline void unmap(LargeBlock::Header *H) {
7374
}
7475

7576
namespace {
77+
7678
struct CachedBlock {
79+
static constexpr u16 CacheIndexMax = UINT16_MAX;
80+
static constexpr u16 InvalidEntry = CacheIndexMax;
81+
7782
uptr CommitBase = 0;
7883
uptr CommitSize = 0;
7984
uptr BlockBegin = 0;
8085
MemMapT MemMap = {};
8186
u64 Time = 0;
87+
u16 Next = 0;
88+
u16 Prev = 0;
8289

8390
bool isValid() { return CommitBase != 0; }
8491

@@ -188,10 +195,11 @@ template <typename Config> class MapAllocatorCache {
188195
Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
189196
"(%zu.%02zu%%)\n",
190197
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
191-
for (CachedBlock Entry : Entries) {
192-
if (!Entry.isValid())
193-
continue;
194-
Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
198+
Str->append("Cache Entry Info (Most Recent -> Least Recent):\n");
199+
200+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
201+
CachedBlock &Entry = Entries[I];
202+
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
195203
"BlockSize: %zu %s\n",
196204
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
197205
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
@@ -202,6 +210,10 @@ template <typename Config> class MapAllocatorCache {
202210
static_assert(Config::getDefaultMaxEntriesCount() <=
203211
Config::getEntriesArraySize(),
204212
"");
213+
// Ensure the cache entry array size fits in the LRU list Next and Prev
214+
// index fields
215+
static_assert(Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax,
216+
"Cache entry array is too large to be indexed.");
205217

206218
void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
207219
DCHECK_EQ(EntriesCount, 0U);
@@ -213,23 +225,33 @@ template <typename Config> class MapAllocatorCache {
213225
if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
214226
ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
215227
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
228+
229+
// The cache is initially empty
230+
LRUHead = CachedBlock::InvalidEntry;
231+
LRUTail = CachedBlock::InvalidEntry;
232+
233+
// Available entries will be retrieved starting from the beginning of the
234+
// Entries array
235+
AvailableHead = 0;
236+
for (u32 I = 0; I < Config::getEntriesArraySize() - 1; I++)
237+
Entries[I].Next = static_cast<u16>(I + 1);
238+
239+
Entries[Config::getEntriesArraySize() - 1].Next = CachedBlock::InvalidEntry;
216240
}
217241

218242
void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
219243
if (!canCache(H->CommitSize))
220244
return unmap(H);
221245

222-
bool EntryCached = false;
223-
bool EmptyCache = false;
224246
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
225-
const u64 Time = getMonotonicTimeFast();
226-
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
247+
u64 Time;
227248
CachedBlock Entry;
249+
228250
Entry.CommitBase = H->CommitBase;
229251
Entry.CommitSize = H->CommitSize;
230252
Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
231253
Entry.MemMap = H->MemMap;
232-
Entry.Time = Time;
254+
Entry.Time = UINT64_MAX;
233255
if (useMemoryTagging<Config>(Options)) {
234256
if (Interval == 0 && !SCUDO_FUCHSIA) {
235257
// Release the memory and make it inaccessible at the same time by
@@ -243,17 +265,32 @@ template <typename Config> class MapAllocatorCache {
243265
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
244266
MAP_NOACCESS);
245267
}
246-
} else if (Interval == 0) {
247-
Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
248-
Entry.Time = 0;
249268
}
269+
270+
// Usually only one entry will be evicted from the cache.
271+
// Only in the rare event that the cache shrinks in real-time
272+
// due to a decrease in the configurable value MaxEntriesCount
273+
// will more than one cache entry be evicted.
274+
// The vector is used to save the MemMaps of evicted entries so
275+
// that the unmap call can be performed outside the lock
276+
Vector<MemMapT, 1U> EvictionMemMaps;
277+
250278
do {
251279
ScopedLock L(Mutex);
280+
281+
// Time must be computed under the lock to ensure
282+
// that the LRU cache remains sorted with respect to
283+
// time in a multithreaded environment
284+
Time = getMonotonicTimeFast();
285+
if (Entry.Time != 0)
286+
Entry.Time = Time;
287+
252288
if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
253289
// If we get here then memory tagging was disabled in between when we
254290
// read Options and when we locked Mutex. We can't insert our entry into
255291
// the quarantine or the cache because the permissions would be wrong so
256292
// just unmap it.
293+
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
257294
break;
258295
}
259296
if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
@@ -269,36 +306,32 @@ template <typename Config> class MapAllocatorCache {
269306
OldestTime = Entry.Time;
270307
Entry = PrevEntry;
271308
}
272-
if (EntriesCount >= MaxCount) {
273-
if (IsFullEvents++ == 4U)
274-
EmptyCache = true;
275-
} else {
276-
for (u32 I = 0; I < MaxCount; I++) {
277-
if (Entries[I].isValid())
278-
continue;
279-
if (I != 0)
280-
Entries[I] = Entries[0];
281-
Entries[0] = Entry;
282-
EntriesCount++;
283-
if (OldestTime == 0)
284-
OldestTime = Entry.Time;
285-
EntryCached = true;
286-
break;
287-
}
309+
310+
// All excess entries are evicted from the cache
311+
while (needToEvict()) {
312+
// Save MemMaps of evicted entries to perform unmap outside of lock
313+
EvictionMemMaps.push_back(Entries[LRUTail].MemMap);
314+
remove(LRUTail);
288315
}
316+
317+
insert(Entry);
318+
319+
if (OldestTime == 0)
320+
OldestTime = Entry.Time;
289321
} while (0);
290-
if (EmptyCache)
291-
empty();
292-
else if (Interval >= 0)
322+
323+
for (MemMapT &EvictMemMap : EvictionMemMaps)
324+
EvictMemMap.unmap(EvictMemMap.getBase(), EvictMemMap.getCapacity());
325+
326+
if (Interval >= 0) {
327+
// TODO: Add ReleaseToOS logic to LRU algorithm
293328
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
294-
if (!EntryCached)
295-
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
329+
}
296330
}
297331

298332
bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
299333
LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
300334
const uptr PageSize = getPageSizeCached();
301-
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
302335
// 10% of the requested size proved to be the optimal choice for
303336
// retrieving cached blocks after testing several options.
304337
constexpr u32 FragmentedBytesDivisor = 10;
@@ -312,9 +345,8 @@ template <typename Config> class MapAllocatorCache {
312345
return false;
313346
u32 OptimalFitIndex = 0;
314347
uptr MinDiff = UINTPTR_MAX;
315-
for (u32 I = 0; I < MaxCount; I++) {
316-
if (!Entries[I].isValid())
317-
continue;
348+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
349+
I = Entries[I].Next) {
318350
const uptr CommitBase = Entries[I].CommitBase;
319351
const uptr CommitSize = Entries[I].CommitSize;
320352
const uptr AllocPos =
@@ -347,8 +379,7 @@ template <typename Config> class MapAllocatorCache {
347379
}
348380
if (Found) {
349381
Entry = Entries[OptimalFitIndex];
350-
Entries[OptimalFitIndex].invalidate();
351-
EntriesCount--;
382+
remove(OptimalFitIndex);
352383
SuccessfulRetrieves++;
353384
}
354385
}
@@ -417,12 +448,9 @@ template <typename Config> class MapAllocatorCache {
417448
Quarantine[I].invalidate();
418449
}
419450
}
420-
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
421-
for (u32 I = 0; I < MaxCount; I++) {
422-
if (Entries[I].isValid()) {
423-
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
424-
Entries[I].CommitSize, 0);
425-
}
451+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
452+
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
453+
Entries[I].CommitSize, 0);
426454
}
427455
QuarantinePos = -1U;
428456
}
@@ -434,6 +462,66 @@ template <typename Config> class MapAllocatorCache {
434462
void unmapTestOnly() { empty(); }
435463

436464
private:
465+
bool needToEvict() REQUIRES(Mutex) {
466+
return (EntriesCount >= atomic_load_relaxed(&MaxEntriesCount));
467+
}
468+
469+
void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
470+
DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount));
471+
472+
// Cache should be populated with valid entries when not empty
473+
DCHECK_NE(AvailableHead, CachedBlock::InvalidEntry);
474+
475+
u32 FreeIndex = AvailableHead;
476+
AvailableHead = Entries[AvailableHead].Next;
477+
478+
if (EntriesCount == 0) {
479+
LRUTail = static_cast<u16>(FreeIndex);
480+
} else {
481+
// Check list order
482+
if (EntriesCount > 1)
483+
DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
484+
Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
485+
}
486+
487+
Entries[FreeIndex] = Entry;
488+
Entries[FreeIndex].Next = LRUHead;
489+
Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
490+
LRUHead = static_cast<u16>(FreeIndex);
491+
EntriesCount++;
492+
493+
// Availability stack should not have available entries when all entries
494+
// are in use
495+
if (EntriesCount == Config::getEntriesArraySize())
496+
DCHECK_EQ(AvailableHead, CachedBlock::InvalidEntry);
497+
}
498+
499+
void remove(uptr I) REQUIRES(Mutex) {
500+
DCHECK(Entries[I].isValid());
501+
502+
Entries[I].invalidate();
503+
504+
if (I == LRUHead)
505+
LRUHead = Entries[I].Next;
506+
else
507+
Entries[Entries[I].Prev].Next = Entries[I].Next;
508+
509+
if (I == LRUTail)
510+
LRUTail = Entries[I].Prev;
511+
else
512+
Entries[Entries[I].Next].Prev = Entries[I].Prev;
513+
514+
Entries[I].Next = AvailableHead;
515+
AvailableHead = static_cast<u16>(I);
516+
EntriesCount--;
517+
518+
// Cache should not have valid entries when not empty
519+
if (EntriesCount == 0) {
520+
DCHECK_EQ(LRUHead, CachedBlock::InvalidEntry);
521+
DCHECK_EQ(LRUTail, CachedBlock::InvalidEntry);
522+
}
523+
}
524+
437525
void empty() {
438526
MemMapT MapInfo[Config::getEntriesArraySize()];
439527
uptr N = 0;
@@ -443,11 +531,10 @@ template <typename Config> class MapAllocatorCache {
443531
if (!Entries[I].isValid())
444532
continue;
445533
MapInfo[N] = Entries[I].MemMap;
446-
Entries[I].invalidate();
534+
remove(I);
447535
N++;
448536
}
449537
EntriesCount = 0;
450-
IsFullEvents = 0;
451538
}
452539
for (uptr I = 0; I < N; I++) {
453540
MemMapT &MemMap = MapInfo[I];
@@ -484,14 +571,20 @@ template <typename Config> class MapAllocatorCache {
484571
atomic_u32 MaxEntriesCount = {};
485572
atomic_uptr MaxEntrySize = {};
486573
u64 OldestTime GUARDED_BY(Mutex) = 0;
487-
u32 IsFullEvents GUARDED_BY(Mutex) = 0;
488574
atomic_s32 ReleaseToOsIntervalMs = {};
489575
u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
490576
u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
491577

492578
CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
493579
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
494580
Quarantine GUARDED_BY(Mutex) = {};
581+
582+
// The LRUHead of the cache is the most recently used cache entry
583+
u16 LRUHead GUARDED_BY(Mutex) = 0;
584+
// The LRUTail of the cache is the least recently used cache entry
585+
u16 LRUTail GUARDED_BY(Mutex) = 0;
586+
// The AvailableHead is the top of the stack of available entries
587+
u16 AvailableHead GUARDED_BY(Mutex) = 0;
495588
};
496589

497590
template <typename Config> class MapAllocator {

0 commit comments

Comments
 (0)