19
19
#include " stats.h"
20
20
#include " string_utils.h"
21
21
#include " thread_annotations.h"
22
+ #include " vector.h"
22
23
23
24
namespace scudo {
24
25
@@ -73,12 +74,18 @@ static inline void unmap(LargeBlock::Header *H) {
73
74
}
74
75
75
76
namespace {
77
+
76
78
struct CachedBlock {
79
+ static constexpr u16 CacheIndexMax = UINT16_MAX;
80
+ static constexpr u16 InvalidEntry = CacheIndexMax;
81
+
77
82
uptr CommitBase = 0 ;
78
83
uptr CommitSize = 0 ;
79
84
uptr BlockBegin = 0 ;
80
85
MemMapT MemMap = {};
81
86
u64 Time = 0 ;
87
+ u16 Next = 0 ;
88
+ u16 Prev = 0 ;
82
89
83
90
bool isValid () { return CommitBase != 0 ; }
84
91
@@ -188,10 +195,11 @@ template <typename Config> class MapAllocatorCache {
188
195
Str->append (" Stats: CacheRetrievalStats: SuccessRate: %u/%u "
189
196
" (%zu.%02zu%%)\n " ,
190
197
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
191
- for (CachedBlock Entry : Entries) {
192
- if (!Entry.isValid ())
193
- continue ;
194
- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
198
+ Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
199
+
200
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
201
+ CachedBlock &Entry = Entries[I];
202
+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
195
203
" BlockSize: %zu %s\n " ,
196
204
Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
197
205
Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
@@ -202,6 +210,10 @@ template <typename Config> class MapAllocatorCache {
202
210
static_assert (Config::getDefaultMaxEntriesCount() <=
203
211
Config::getEntriesArraySize (),
204
212
" " );
213
+ // Ensure the cache entry array size fits in the LRU list Next and Prev
214
+ // index fields
215
+ static_assert (Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax,
216
+ " Cache entry array is too large to be indexed." );
205
217
206
218
void init (s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
207
219
DCHECK_EQ (EntriesCount, 0U );
@@ -213,23 +225,33 @@ template <typename Config> class MapAllocatorCache {
213
225
if (Config::getDefaultReleaseToOsIntervalMs () != INT32_MIN)
214
226
ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs ();
215
227
setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
228
+
229
+ // The cache is initially empty
230
+ LRUHead = CachedBlock::InvalidEntry;
231
+ LRUTail = CachedBlock::InvalidEntry;
232
+
233
+ // Available entries will be retrieved starting from the beginning of the
234
+ // Entries array
235
+ AvailableHead = 0 ;
236
+ for (u32 I = 0 ; I < Config::getEntriesArraySize () - 1 ; I++)
237
+ Entries[I].Next = static_cast <u16>(I + 1 );
238
+
239
+ Entries[Config::getEntriesArraySize () - 1 ].Next = CachedBlock::InvalidEntry;
216
240
}
217
241
218
242
void store (const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
219
243
if (!canCache (H->CommitSize ))
220
244
return unmap (H);
221
245
222
- bool EntryCached = false ;
223
- bool EmptyCache = false ;
224
246
const s32 Interval = atomic_load_relaxed (&ReleaseToOsIntervalMs);
225
- const u64 Time = getMonotonicTimeFast ();
226
- const u32 MaxCount = atomic_load_relaxed (&MaxEntriesCount);
247
+ u64 Time;
227
248
CachedBlock Entry;
249
+
228
250
Entry.CommitBase = H->CommitBase ;
229
251
Entry.CommitSize = H->CommitSize ;
230
252
Entry.BlockBegin = reinterpret_cast <uptr>(H + 1 );
231
253
Entry.MemMap = H->MemMap ;
232
- Entry.Time = Time ;
254
+ Entry.Time = UINT64_MAX ;
233
255
if (useMemoryTagging<Config>(Options)) {
234
256
if (Interval == 0 && !SCUDO_FUCHSIA) {
235
257
// Release the memory and make it inaccessible at the same time by
@@ -243,17 +265,32 @@ template <typename Config> class MapAllocatorCache {
243
265
Entry.MemMap .setMemoryPermission (Entry.CommitBase , Entry.CommitSize ,
244
266
MAP_NOACCESS);
245
267
}
246
- } else if (Interval == 0 ) {
247
- Entry.MemMap .releaseAndZeroPagesToOS (Entry.CommitBase , Entry.CommitSize );
248
- Entry.Time = 0 ;
249
268
}
269
+
270
+ // Usually only one entry will be evicted from the cache.
271
+ // Only in the rare event that the cache shrinks in real-time
272
+ // due to a decrease in the configurable value MaxEntriesCount
273
+ // will more than one cache entry be evicted.
274
+ // The vector is used to save the MemMaps of evicted entries so
275
+ // that the unmap call can be performed outside the lock
276
+ Vector<MemMapT, 1U > EvictionMemMaps;
277
+
250
278
do {
251
279
ScopedLock L (Mutex);
280
+
281
+ // Time must be computed under the lock to ensure
282
+ // that the LRU cache remains sorted with respect to
283
+ // time in a multithreaded environment
284
+ Time = getMonotonicTimeFast ();
285
+ if (Entry.Time != 0 )
286
+ Entry.Time = Time;
287
+
252
288
if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U ) {
253
289
// If we get here then memory tagging was disabled in between when we
254
290
// read Options and when we locked Mutex. We can't insert our entry into
255
291
// the quarantine or the cache because the permissions would be wrong so
256
292
// just unmap it.
293
+ Entry.MemMap .unmap (Entry.MemMap .getBase (), Entry.MemMap .getCapacity ());
257
294
break ;
258
295
}
259
296
if (Config::getQuarantineSize () && useMemoryTagging<Config>(Options)) {
@@ -269,36 +306,32 @@ template <typename Config> class MapAllocatorCache {
269
306
OldestTime = Entry.Time ;
270
307
Entry = PrevEntry;
271
308
}
272
- if (EntriesCount >= MaxCount) {
273
- if (IsFullEvents++ == 4U )
274
- EmptyCache = true ;
275
- } else {
276
- for (u32 I = 0 ; I < MaxCount; I++) {
277
- if (Entries[I].isValid ())
278
- continue ;
279
- if (I != 0 )
280
- Entries[I] = Entries[0 ];
281
- Entries[0 ] = Entry;
282
- EntriesCount++;
283
- if (OldestTime == 0 )
284
- OldestTime = Entry.Time ;
285
- EntryCached = true ;
286
- break ;
287
- }
309
+
310
+ // All excess entries are evicted from the cache
311
+ while (needToEvict ()) {
312
+ // Save MemMaps of evicted entries to perform unmap outside of lock
313
+ EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
314
+ remove (LRUTail);
288
315
}
316
+
317
+ insert (Entry);
318
+
319
+ if (OldestTime == 0 )
320
+ OldestTime = Entry.Time ;
289
321
} while (0 );
290
- if (EmptyCache)
291
- empty ();
292
- else if (Interval >= 0 )
322
+
323
+ for (MemMapT &EvictMemMap : EvictionMemMaps)
324
+ EvictMemMap.unmap (EvictMemMap.getBase (), EvictMemMap.getCapacity ());
325
+
326
+ if (Interval >= 0 ) {
327
+ // TODO: Add ReleaseToOS logic to LRU algorithm
293
328
releaseOlderThan (Time - static_cast <u64>(Interval) * 1000000 );
294
- if (!EntryCached)
295
- Entry.MemMap .unmap (Entry.MemMap .getBase (), Entry.MemMap .getCapacity ());
329
+ }
296
330
}
297
331
298
332
bool retrieve (Options Options, uptr Size , uptr Alignment, uptr HeadersSize,
299
333
LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
300
334
const uptr PageSize = getPageSizeCached ();
301
- const u32 MaxCount = atomic_load_relaxed (&MaxEntriesCount);
302
335
// 10% of the requested size proved to be the optimal choice for
303
336
// retrieving cached blocks after testing several options.
304
337
constexpr u32 FragmentedBytesDivisor = 10 ;
@@ -312,9 +345,8 @@ template <typename Config> class MapAllocatorCache {
312
345
return false ;
313
346
u32 OptimalFitIndex = 0 ;
314
347
uptr MinDiff = UINTPTR_MAX;
315
- for (u32 I = 0 ; I < MaxCount; I++) {
316
- if (!Entries[I].isValid ())
317
- continue ;
348
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
349
+ I = Entries[I].Next ) {
318
350
const uptr CommitBase = Entries[I].CommitBase ;
319
351
const uptr CommitSize = Entries[I].CommitSize ;
320
352
const uptr AllocPos =
@@ -347,8 +379,7 @@ template <typename Config> class MapAllocatorCache {
347
379
}
348
380
if (Found) {
349
381
Entry = Entries[OptimalFitIndex];
350
- Entries[OptimalFitIndex].invalidate ();
351
- EntriesCount--;
382
+ remove (OptimalFitIndex);
352
383
SuccessfulRetrieves++;
353
384
}
354
385
}
@@ -417,12 +448,9 @@ template <typename Config> class MapAllocatorCache {
417
448
Quarantine[I].invalidate ();
418
449
}
419
450
}
420
- const u32 MaxCount = atomic_load_relaxed (&MaxEntriesCount);
421
- for (u32 I = 0 ; I < MaxCount; I++) {
422
- if (Entries[I].isValid ()) {
423
- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
424
- Entries[I].CommitSize , 0 );
425
- }
451
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
452
+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
453
+ Entries[I].CommitSize , 0 );
426
454
}
427
455
QuarantinePos = -1U ;
428
456
}
@@ -434,6 +462,66 @@ template <typename Config> class MapAllocatorCache {
434
462
void unmapTestOnly () { empty (); }
435
463
436
464
private:
465
+ bool needToEvict () REQUIRES(Mutex) {
466
+ return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
467
+ }
468
+
469
+ void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
470
+ DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
471
+
472
+ // Cache should be populated with valid entries when not empty
473
+ DCHECK_NE (AvailableHead, CachedBlock::InvalidEntry);
474
+
475
+ u32 FreeIndex = AvailableHead;
476
+ AvailableHead = Entries[AvailableHead].Next ;
477
+
478
+ if (EntriesCount == 0 ) {
479
+ LRUTail = static_cast <u16>(FreeIndex);
480
+ } else {
481
+ // Check list order
482
+ if (EntriesCount > 1 )
483
+ DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
484
+ Entries[LRUHead].Prev = static_cast <u16>(FreeIndex);
485
+ }
486
+
487
+ Entries[FreeIndex] = Entry;
488
+ Entries[FreeIndex].Next = LRUHead;
489
+ Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
490
+ LRUHead = static_cast <u16>(FreeIndex);
491
+ EntriesCount++;
492
+
493
+ // Availability stack should not have available entries when all entries
494
+ // are in use
495
+ if (EntriesCount == Config::getEntriesArraySize ())
496
+ DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
497
+ }
498
+
499
+ void remove (uptr I) REQUIRES(Mutex) {
500
+ DCHECK (Entries[I].isValid ());
501
+
502
+ Entries[I].invalidate ();
503
+
504
+ if (I == LRUHead)
505
+ LRUHead = Entries[I].Next ;
506
+ else
507
+ Entries[Entries[I].Prev ].Next = Entries[I].Next ;
508
+
509
+ if (I == LRUTail)
510
+ LRUTail = Entries[I].Prev ;
511
+ else
512
+ Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
513
+
514
+ Entries[I].Next = AvailableHead;
515
+ AvailableHead = static_cast <u16>(I);
516
+ EntriesCount--;
517
+
518
+ // Cache should not have valid entries when not empty
519
+ if (EntriesCount == 0 ) {
520
+ DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
521
+ DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
522
+ }
523
+ }
524
+
437
525
void empty () {
438
526
MemMapT MapInfo[Config::getEntriesArraySize ()];
439
527
uptr N = 0 ;
@@ -443,11 +531,10 @@ template <typename Config> class MapAllocatorCache {
443
531
if (!Entries[I].isValid ())
444
532
continue ;
445
533
MapInfo[N] = Entries[I].MemMap ;
446
- Entries[I]. invalidate ( );
534
+ remove (I );
447
535
N++;
448
536
}
449
537
EntriesCount = 0 ;
450
- IsFullEvents = 0 ;
451
538
}
452
539
for (uptr I = 0 ; I < N; I++) {
453
540
MemMapT &MemMap = MapInfo[I];
@@ -484,14 +571,20 @@ template <typename Config> class MapAllocatorCache {
484
571
atomic_u32 MaxEntriesCount = {};
485
572
atomic_uptr MaxEntrySize = {};
486
573
u64 OldestTime GUARDED_BY (Mutex) = 0;
487
- u32 IsFullEvents GUARDED_BY (Mutex) = 0;
488
574
atomic_s32 ReleaseToOsIntervalMs = {};
489
575
u32 CallsToRetrieve GUARDED_BY (Mutex) = 0;
490
576
u32 SuccessfulRetrieves GUARDED_BY (Mutex) = 0;
491
577
492
578
CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
493
579
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
494
580
Quarantine GUARDED_BY (Mutex) = {};
581
+
582
+ // The LRUHead of the cache is the most recently used cache entry
583
+ u16 LRUHead GUARDED_BY (Mutex) = 0;
584
+ // The LRUTail of the cache is the least recently used cache entry
585
+ u16 LRUTail GUARDED_BY (Mutex) = 0;
586
+ // The AvailableHead is the top of the stack of available entries
587
+ u16 AvailableHead GUARDED_BY (Mutex) = 0;
495
588
};
496
589
497
590
template <typename Config> class MapAllocator {
0 commit comments