19
19
#include " stats.h"
20
20
#include " string_utils.h"
21
21
#include " thread_annotations.h"
22
+ #include " vector.h"
22
23
23
24
namespace scudo {
24
25
@@ -73,12 +74,18 @@ static inline void unmap(LargeBlock::Header *H) {
73
74
}
74
75
75
76
namespace {
77
+
76
78
struct CachedBlock {
79
+ static constexpr u16 CacheIndexMax = UINT16_MAX;
80
+ static constexpr u16 InvalidEntry = CacheIndexMax;
81
+
77
82
uptr CommitBase = 0 ;
78
83
uptr CommitSize = 0 ;
79
84
uptr BlockBegin = 0 ;
80
85
MemMapT MemMap = {};
81
86
u64 Time = 0 ;
87
+ u16 Next = 0 ;
88
+ u16 Prev = 0 ;
82
89
83
90
bool isValid () { return CommitBase != 0 ; }
84
91
@@ -188,10 +195,11 @@ template <typename Config> class MapAllocatorCache {
188
195
Str->append (" Stats: CacheRetrievalStats: SuccessRate: %u/%u "
189
196
" (%zu.%02zu%%)\n " ,
190
197
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
191
- for (CachedBlock Entry : Entries) {
192
- if (!Entry.isValid ())
193
- continue ;
194
- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
198
+ Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
199
+
200
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
201
+ CachedBlock &Entry = Entries[I];
202
+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
195
203
" BlockSize: %zu %s\n " ,
196
204
Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
197
205
Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
@@ -202,6 +210,10 @@ template <typename Config> class MapAllocatorCache {
202
210
static_assert (Config::getDefaultMaxEntriesCount() <=
203
211
Config::getEntriesArraySize (),
204
212
" " );
213
+ // Ensure the cache entry array size fits in the LRU list Next and Prev
214
+ // index fields
215
+ static_assert (Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax,
216
+ " Cache entry array is too large to be indexed." );
205
217
206
218
void init (s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
207
219
DCHECK_EQ (EntriesCount, 0U );
@@ -213,23 +225,33 @@ template <typename Config> class MapAllocatorCache {
213
225
if (Config::getDefaultReleaseToOsIntervalMs () != INT32_MIN)
214
226
ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs ();
215
227
setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
228
+
229
+ // The cache is initially empty
230
+ LRUHead = CachedBlock::InvalidEntry;
231
+ LRUTail = CachedBlock::InvalidEntry;
232
+
233
+ // Available entries will be retrieved starting from the beginning of the
234
+ // Entries array
235
+ AvailableHead = 0 ;
236
+ for (u32 I = 0 ; I < Config::getEntriesArraySize () - 1 ; I++)
237
+ Entries[I].Next = static_cast <u16 >(I + 1 );
238
+
239
+ Entries[Config::getEntriesArraySize () - 1 ].Next = CachedBlock::InvalidEntry;
216
240
}
217
241
218
242
void store (const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
219
243
if (!canCache (H->CommitSize ))
220
244
return unmap (H);
221
245
222
- bool EntryCached = false ;
223
- bool EmptyCache = false ;
224
246
const s32 Interval = atomic_load_relaxed (&ReleaseToOsIntervalMs);
225
- const u64 Time = getMonotonicTimeFast ();
226
- const u32 MaxCount = atomic_load_relaxed (&MaxEntriesCount);
247
+ u64 Time;
227
248
CachedBlock Entry;
249
+
228
250
Entry.CommitBase = H->CommitBase ;
229
251
Entry.CommitSize = H->CommitSize ;
230
252
Entry.BlockBegin = reinterpret_cast <uptr>(H + 1 );
231
253
Entry.MemMap = H->MemMap ;
232
- Entry.Time = Time ;
254
+ Entry.Time = UINT64_MAX ;
233
255
if (useMemoryTagging<Config>(Options)) {
234
256
if (Interval == 0 && !SCUDO_FUCHSIA) {
235
257
// Release the memory and make it inaccessible at the same time by
@@ -243,17 +265,32 @@ template <typename Config> class MapAllocatorCache {
243
265
Entry.MemMap .setMemoryPermission (Entry.CommitBase , Entry.CommitSize ,
244
266
MAP_NOACCESS);
245
267
}
246
- } else if (Interval == 0 ) {
247
- Entry.MemMap .releaseAndZeroPagesToOS (Entry.CommitBase , Entry.CommitSize );
248
- Entry.Time = 0 ;
249
268
}
269
+
270
+ // Usually only one entry will be evicted from the cache.
271
+ // Only in the rare event that the cache shrinks in real-time
272
+ // due to a decrease in the configurable value MaxEntriesCount
273
+ // will more than one cache entry be evicted.
274
+ // The vector is used to save the MemMaps of evicted entries so
275
+ // that the unmap call can be performed outside the lock
276
+ Vector<MemMapT, 1U > EvictionMemMaps;
277
+
250
278
do {
251
279
ScopedLock L (Mutex);
280
+
281
+ // Time must be computed under the lock to ensure
282
+ // that the LRU cache remains sorted with respect to
283
+ // time in a multithreaded environment
284
+ Time = getMonotonicTimeFast ();
285
+ if (Entry.Time != 0 )
286
+ Entry.Time = Time;
287
+
252
288
if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U ) {
253
289
// If we get here then memory tagging was disabled in between when we
254
290
// read Options and when we locked Mutex. We can't insert our entry into
255
291
// the quarantine or the cache because the permissions would be wrong so
256
292
// just unmap it.
293
+ Entry.MemMap .unmap (Entry.MemMap .getBase (), Entry.MemMap .getCapacity ());
257
294
break ;
258
295
}
259
296
if (Config::getQuarantineSize () && useMemoryTagging<Config>(Options)) {
@@ -269,30 +306,27 @@ template <typename Config> class MapAllocatorCache {
269
306
OldestTime = Entry.Time ;
270
307
Entry = PrevEntry;
271
308
}
272
- if (EntriesCount >= MaxCount) {
273
- if (IsFullEvents++ == 4U )
274
- EmptyCache = true ;
275
- } else {
276
- for (u32 I = 0 ; I < MaxCount; I++) {
277
- if (Entries[I].isValid ())
278
- continue ;
279
- if (I != 0 )
280
- Entries[I] = Entries[0 ];
281
- Entries[0 ] = Entry;
282
- EntriesCount++;
283
- if (OldestTime == 0 )
284
- OldestTime = Entry.Time ;
285
- EntryCached = true ;
286
- break ;
287
- }
309
+
310
+ // All excess entries are evicted from the cache
311
+ while (needToEvict ()) {
312
+ // Save MemMaps of evicted entries to perform unmap outside of lock
313
+ EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
314
+ remove (LRUTail);
288
315
}
316
+
317
+ insert (Entry);
318
+
319
+ if (OldestTime == 0 )
320
+ OldestTime = Entry.Time ;
289
321
} while (0 );
290
- if (EmptyCache)
291
- empty ();
292
- else if (Interval >= 0 )
322
+
323
+ for (MemMapT &EvictMemMap : EvictionMemMaps)
324
+ EvictMemMap.unmap (EvictMemMap.getBase (), EvictMemMap.getCapacity ());
325
+
326
+ if (Interval >= 0 ) {
327
+ // TODO: Add ReleaseToOS logic to LRU algorithm
293
328
releaseOlderThan (Time - static_cast <u64 >(Interval) * 1000000 );
294
- if (!EntryCached)
295
- Entry.MemMap .unmap (Entry.MemMap .getBase (), Entry.MemMap .getCapacity ());
329
+ }
296
330
}
297
331
298
332
bool retrieve (Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
@@ -312,9 +346,8 @@ template <typename Config> class MapAllocatorCache {
312
346
return false ;
313
347
u32 OptimalFitIndex = 0 ;
314
348
uptr MinDiff = UINTPTR_MAX;
315
- for (u32 I = 0 ; I < MaxCount; I++) {
316
- if (!Entries[I].isValid ())
317
- continue ;
349
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
350
+ I = Entries[I].Next ) {
318
351
const uptr CommitBase = Entries[I].CommitBase ;
319
352
const uptr CommitSize = Entries[I].CommitSize ;
320
353
const uptr AllocPos =
@@ -347,8 +380,7 @@ template <typename Config> class MapAllocatorCache {
347
380
}
348
381
if (Found) {
349
382
Entry = Entries[OptimalFitIndex];
350
- Entries[OptimalFitIndex].invalidate ();
351
- EntriesCount--;
383
+ remove (OptimalFitIndex);
352
384
SuccessfulRetrieves++;
353
385
}
354
386
}
@@ -418,11 +450,9 @@ template <typename Config> class MapAllocatorCache {
418
450
}
419
451
}
420
452
const u32 MaxCount = atomic_load_relaxed (&MaxEntriesCount);
421
- for (u32 I = 0 ; I < MaxCount; I++) {
422
- if (Entries[I].isValid ()) {
423
- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
424
- Entries[I].CommitSize , 0 );
425
- }
453
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
454
+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
455
+ Entries[I].CommitSize , 0 );
426
456
}
427
457
QuarantinePos = -1U ;
428
458
}
@@ -434,6 +464,66 @@ template <typename Config> class MapAllocatorCache {
434
464
void unmapTestOnly () { empty (); }
435
465
436
466
private:
467
+ bool needToEvict () REQUIRES(Mutex) {
468
+ return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
469
+ }
470
+
471
+ void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
472
+ DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
473
+
474
+ // Cache should be populated with valid entries when not empty
475
+ DCHECK_NE (AvailableHead, CachedBlock::InvalidEntry);
476
+
477
+ u32 FreeIndex = AvailableHead;
478
+ AvailableHead = Entries[AvailableHead].Next ;
479
+
480
+ if (EntriesCount == 0 ) {
481
+ LRUTail = static_cast <u16 >(FreeIndex);
482
+ } else {
483
+ // Check list order
484
+ if (EntriesCount > 1 )
485
+ DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
486
+ Entries[LRUHead].Prev = static_cast <u16 >(FreeIndex);
487
+ }
488
+
489
+ Entries[FreeIndex] = Entry;
490
+ Entries[FreeIndex].Next = LRUHead;
491
+ Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
492
+ LRUHead = static_cast <u16 >(FreeIndex);
493
+ EntriesCount++;
494
+
495
+ // Availability stack should not have available entries when all entries
496
+ // are in use
497
+ if (EntriesCount == Config::getEntriesArraySize ())
498
+ DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
499
+ }
500
+
501
+ void remove (uptr I) REQUIRES(Mutex) {
502
+ DCHECK (Entries[I].isValid ());
503
+
504
+ Entries[I].invalidate ();
505
+
506
+ if (I == LRUHead)
507
+ LRUHead = Entries[I].Next ;
508
+ else
509
+ Entries[Entries[I].Prev ].Next = Entries[I].Next ;
510
+
511
+ if (I == LRUTail)
512
+ LRUTail = Entries[I].Prev ;
513
+ else
514
+ Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
515
+
516
+ Entries[I].Next = AvailableHead;
517
+ AvailableHead = static_cast <u16 >(I);
518
+ EntriesCount--;
519
+
520
+ // Cache should not have valid entries when not empty
521
+ if (EntriesCount == 0 ) {
522
+ DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
523
+ DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
524
+ }
525
+ }
526
+
437
527
void empty () {
438
528
MemMapT MapInfo[Config::getEntriesArraySize ()];
439
529
uptr N = 0 ;
@@ -443,11 +533,10 @@ template <typename Config> class MapAllocatorCache {
443
533
if (!Entries[I].isValid ())
444
534
continue ;
445
535
MapInfo[N] = Entries[I].MemMap ;
446
- Entries[I]. invalidate ( );
536
+ remove (I );
447
537
N++;
448
538
}
449
539
EntriesCount = 0 ;
450
- IsFullEvents = 0 ;
451
540
}
452
541
for (uptr I = 0 ; I < N; I++) {
453
542
MemMapT &MemMap = MapInfo[I];
@@ -484,14 +573,20 @@ template <typename Config> class MapAllocatorCache {
484
573
atomic_u32 MaxEntriesCount = {};
485
574
atomic_uptr MaxEntrySize = {};
486
575
u64 OldestTime GUARDED_BY (Mutex) = 0;
487
- u32 IsFullEvents GUARDED_BY (Mutex) = 0;
488
576
atomic_s32 ReleaseToOsIntervalMs = {};
489
577
u32 CallsToRetrieve GUARDED_BY (Mutex) = 0;
490
578
u32 SuccessfulRetrieves GUARDED_BY (Mutex) = 0;
491
579
492
580
CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
493
581
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
494
582
Quarantine GUARDED_BY (Mutex) = {};
583
+
584
+ // The LRUHead of the cache is the most recently used cache entry
585
+ u16 LRUHead GUARDED_BY (Mutex) = 0;
586
+ // The LRUTail of the cache is the least recently used cache entry
587
+ u16 LRUTail GUARDED_BY (Mutex) = 0;
588
+ // The AvailableHead is the top of the stack of available entries
589
+ u16 AvailableHead GUARDED_BY (Mutex) = 0;
495
590
};
496
591
497
592
template <typename Config> class MapAllocator {
0 commit comments