@@ -117,40 +117,30 @@ template <typename Config> class SizeClassAllocator64 {
117
117
SmallerBlockReleasePageDelta =
118
118
PagesInGroup * (1 + MinSizeClass / 16U ) / 100 ;
119
119
120
- // Reserve the space required for the Primary.
121
- CHECK (ReservedMemory.create (/* Addr=*/ 0U , PrimarySize,
122
- " scudo:primary_reserve" ));
123
- PrimaryBase = ReservedMemory.getBase ();
124
- DCHECK_NE (PrimaryBase, 0U );
125
-
126
120
u32 Seed;
127
121
const u64 Time = getMonotonicTimeFast ();
128
122
if (!getRandom (reinterpret_cast <void *>(&Seed), sizeof (Seed)))
129
- Seed = static_cast <u32>(Time ^ (PrimaryBase >> 12 ));
123
+ Seed = static_cast <u32>(Time ^ (reinterpret_cast <uptr>(&Seed) >> 12 ));
130
124
131
- for (uptr I = 0 ; I < NumClasses; I++) {
132
- RegionInfo *Region = getRegionInfo (I );
125
+ for (uptr I = 0 ; I < NumClasses; I++)
126
+ getRegionInfo (I)-> RandState = getRandomU32 (&Seed );
133
127
134
- // The actual start of a region is offset by a random number of pages
135
- // when PrimaryEnableRandomOffset is set.
136
- Region->RegionBeg = (PrimaryBase + (I << RegionSizeLog)) +
137
- (Config::getEnableRandomOffset ()
138
- ? ((getRandomModN (&Seed, 16 ) + 1 ) * PageSize)
139
- : 0 );
140
- Region->RandState = getRandomU32 (&Seed);
141
- // Releasing small blocks is expensive, set a higher threshold to avoid
142
- // frequent page releases.
143
- if (isSmallBlock (getSizeByClassId (I)))
144
- Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
145
- else
146
- Region->TryReleaseThreshold = PageSize;
147
- Region->ReleaseInfo .LastReleaseAtNs = Time;
128
+ if (Config::getEnableContiguousRegions ()) {
129
+ ReservedMemoryT ReservedMemory = {};
130
+ // Reserve the space required for the Primary.
131
+ CHECK (ReservedMemory.create (/* Addr=*/ 0U , RegionSize * NumClasses,
132
+ " scudo:primary_reserve" ));
133
+ const uptr PrimaryBase = ReservedMemory.getBase ();
134
+
135
+ for (uptr I = 0 ; I < NumClasses; I++) {
136
+ MemMapT RegionMemMap = ReservedMemory.dispatch (
137
+ PrimaryBase + (I << RegionSizeLog), RegionSize);
138
+ RegionInfo *Region = getRegionInfo (I);
148
139
149
- Region-> MemMapInfo . MemMap = ReservedMemory. dispatch (
150
- PrimaryBase + (I << RegionSizeLog), RegionSize);
151
- CHECK (Region-> MemMapInfo . MemMap . isAllocated () );
140
+ initRegion ( Region, I, RegionMemMap, Config::getEnableRandomOffset ());
141
+ }
142
+ shuffle (RegionInfoArray, NumClasses, &Seed );
152
143
}
153
- shuffle (RegionInfoArray, NumClasses, &Seed);
154
144
155
145
// The binding should be done after region shuffling so that it won't bind
156
146
// the FLLock from the wrong region.
@@ -160,14 +150,17 @@ template <typename Config> class SizeClassAllocator64 {
160
150
setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
161
151
}
162
152
163
- void unmapTestOnly () NO_THREAD_SAFETY_ANALYSIS {
153
+ void unmapTestOnly () {
164
154
for (uptr I = 0 ; I < NumClasses; I++) {
165
155
RegionInfo *Region = getRegionInfo (I);
156
+ {
157
+ ScopedLock ML (Region->MMLock );
158
+ MemMapT MemMap = Region->MemMapInfo .MemMap ;
159
+ if (MemMap.isAllocated ())
160
+ MemMap.unmap (MemMap.getBase (), MemMap.getCapacity ());
161
+ }
166
162
*Region = {};
167
163
}
168
- if (PrimaryBase)
169
- ReservedMemory.release ();
170
- PrimaryBase = 0U ;
171
164
}
172
165
173
166
// When all blocks are freed, it has to be the same size as `AllocatedUser`.
@@ -251,9 +244,10 @@ template <typename Config> class SizeClassAllocator64 {
251
244
}
252
245
253
246
const bool RegionIsExhausted = Region->Exhausted ;
254
- if (!RegionIsExhausted)
247
+ if (!RegionIsExhausted) {
255
248
PopCount = populateFreeListAndPopBlocks (C, ClassId, Region, ToArray,
256
249
MaxBlockCount);
250
+ }
257
251
ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted ;
258
252
break ;
259
253
}
@@ -514,7 +508,6 @@ template <typename Config> class SizeClassAllocator64 {
514
508
private:
515
509
static const uptr RegionSize = 1UL << RegionSizeLog;
516
510
static const uptr NumClasses = SizeClassMap::NumClasses;
517
- static const uptr PrimarySize = RegionSize * NumClasses;
518
511
519
512
static const uptr MapSizeIncrement = Config::getMapSizeIncrement();
520
513
// Fill at most this number of batches from the newly map'd memory.
@@ -570,9 +563,14 @@ template <typename Config> class SizeClassAllocator64 {
570
563
}
571
564
572
565
uptr getRegionBaseByClassId (uptr ClassId) {
573
- return roundDown (getRegionInfo (ClassId)->RegionBeg - PrimaryBase,
574
- RegionSize) +
575
- PrimaryBase;
566
+ RegionInfo *Region = getRegionInfo (ClassId);
567
+ Region->MMLock .assertHeld ();
568
+
569
+ if (!Config::getEnableContiguousRegions () &&
570
+ !Region->MemMapInfo .MemMap .isAllocated ()) {
571
+ return 0U ;
572
+ }
573
+ return Region->MemMapInfo .MemMap .getBase ();
576
574
}
577
575
578
576
static CompactPtrT compactPtrInternal (uptr Base, uptr Ptr ) {
@@ -602,6 +600,30 @@ template <typename Config> class SizeClassAllocator64 {
602
600
return BlockSize > PageSize;
603
601
}
604
602
603
+ ALWAYS_INLINE void initRegion (RegionInfo *Region, uptr ClassId,
604
+ MemMapT MemMap, bool EnableRandomOffset)
605
+ REQUIRES(Region->MMLock) {
606
+ DCHECK (!Region->MemMapInfo .MemMap .isAllocated ());
607
+ DCHECK (MemMap.isAllocated ());
608
+
609
+ const uptr PageSize = getPageSizeCached ();
610
+
611
+ Region->MemMapInfo .MemMap = MemMap;
612
+
613
+ Region->RegionBeg = MemMap.getBase ();
614
+ if (EnableRandomOffset) {
615
+ Region->RegionBeg +=
616
+ (getRandomModN (&Region->RandState , 16 ) + 1 ) * PageSize;
617
+ }
618
+
619
+ // Releasing small blocks is expensive, set a higher threshold to avoid
620
+ // frequent page releases.
621
+ if (isSmallBlock (getSizeByClassId (ClassId)))
622
+ Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
623
+ else
624
+ Region->TryReleaseThreshold = PageSize;
625
+ }
626
+
605
627
void pushBatchClassBlocks (RegionInfo *Region, CompactPtrT *Array, u32 Size )
606
628
REQUIRES(Region->FLLock) {
607
629
DCHECK_EQ (Region, getRegionInfo (SizeClassMap::BatchClassId));
@@ -989,9 +1011,26 @@ template <typename Config> class SizeClassAllocator64 {
989
1011
CompactPtrT *ToArray,
990
1012
const u16 MaxBlockCount)
991
1013
REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
1014
+ if (!Config::getEnableContiguousRegions () &&
1015
+ !Region->MemMapInfo .MemMap .isAllocated ()) {
1016
+ ReservedMemoryT ReservedMemory;
1017
+ if (UNLIKELY (!ReservedMemory.create (/* Addr=*/ 0U , RegionSize,
1018
+ " scudo:primary_reserve" ,
1019
+ MAP_ALLOWNOMEM))) {
1020
+ Printf (" Can't reserve pages for size class %zu.\n " ,
1021
+ getSizeByClassId (ClassId));
1022
+ Region->Exhausted = true ;
1023
+ return 0U ;
1024
+ }
1025
+ initRegion (Region, ClassId,
1026
+ ReservedMemory.dispatch (ReservedMemory.getBase (),
1027
+ ReservedMemory.getCapacity ()),
1028
+ /* EnableRandomOffset=*/ false );
1029
+ }
1030
+
1031
+ DCHECK (Region->MemMapInfo .MemMap .isAllocated ());
992
1032
const uptr Size = getSizeByClassId (ClassId);
993
1033
const u16 MaxCount = CacheT::getMaxCached (Size );
994
-
995
1034
const uptr RegionBeg = Region->RegionBeg ;
996
1035
const uptr MappedUser = Region->MemMapInfo .MappedUser ;
997
1036
const uptr TotalUserBytes =
@@ -1683,10 +1722,6 @@ template <typename Config> class SizeClassAllocator64 {
1683
1722
Region->FLLockCV .notifyAll (Region->FLLock );
1684
1723
}
1685
1724
1686
- // TODO: `PrimaryBase` can be obtained from ReservedMemory. This needs to be
1687
- // deprecated.
1688
- uptr PrimaryBase = 0 ;
1689
- ReservedMemoryT ReservedMemory = {};
1690
1725
// The minimum size of pushed blocks that we will try to release the pages in
1691
1726
// that size class.
1692
1727
uptr SmallerBlockReleasePageDelta = 0 ;
0 commit comments