Skip to content

Commit bab0507

Browse files
authored
[scudo] Add EnableContiguousRegions mode (#85149)
This releases the requirement that we need to preserve the memory for all regions at the beginning. It needs a huge amount of contiguous pages and which may be a challenge in certain cases. Therefore, adding a new flag, EnableContiguousRegions, to indicate whether we want to allocate all the regions next to each other. Note that once the EnableContiguousRegions is disabled, EnableRandomOffset becomes irrelevant because the base of each region is already random.
1 parent 528943f commit bab0507

File tree

3 files changed

+83
-42
lines changed

3 files changed

+83
-42
lines changed

compiler-rt/lib/scudo/standalone/allocator_config.def

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,9 +87,14 @@ PRIMARY_REQUIRED(const s32, MaxReleaseToOsIntervalMs)
8787
// PRIMARY_OPTIONAL(TYPE, NAME, DEFAULT)
8888
//
8989
// Indicates support for offsetting the start of a region by a random number of
90-
// pages. Only used with primary64.
90+
// pages. This is only used if `EnableContiguousRegions` is enabled.
9191
PRIMARY_OPTIONAL(const bool, EnableRandomOffset, false)
9292

93+
// When `EnableContiguousRegions` is true, all regions will be be arranged in
94+
// adjacency. This will reduce the fragmentation caused by region allocations
95+
// but may require a huge amount of contiguous pages at initialization.
96+
PRIMARY_OPTIONAL(const bool, EnableContiguousRegions, true)
97+
9398
// PRIMARY_OPTIONAL_TYPE(NAME, DEFAULT)
9499
//
95100
// Use condition variable to shorten the waiting time of refillment of

compiler-rt/lib/scudo/standalone/primary64.h

Lines changed: 76 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -117,40 +117,30 @@ template <typename Config> class SizeClassAllocator64 {
117117
SmallerBlockReleasePageDelta =
118118
PagesInGroup * (1 + MinSizeClass / 16U) / 100;
119119

120-
// Reserve the space required for the Primary.
121-
CHECK(ReservedMemory.create(/*Addr=*/0U, PrimarySize,
122-
"scudo:primary_reserve"));
123-
PrimaryBase = ReservedMemory.getBase();
124-
DCHECK_NE(PrimaryBase, 0U);
125-
126120
u32 Seed;
127121
const u64 Time = getMonotonicTimeFast();
128122
if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
129-
Seed = static_cast<u32>(Time ^ (PrimaryBase >> 12));
123+
Seed = static_cast<u32>(Time ^ (reinterpret_cast<uptr>(&Seed) >> 12));
130124

131-
for (uptr I = 0; I < NumClasses; I++) {
132-
RegionInfo *Region = getRegionInfo(I);
125+
for (uptr I = 0; I < NumClasses; I++)
126+
getRegionInfo(I)->RandState = getRandomU32(&Seed);
133127

134-
// The actual start of a region is offset by a random number of pages
135-
// when PrimaryEnableRandomOffset is set.
136-
Region->RegionBeg = (PrimaryBase + (I << RegionSizeLog)) +
137-
(Config::getEnableRandomOffset()
138-
? ((getRandomModN(&Seed, 16) + 1) * PageSize)
139-
: 0);
140-
Region->RandState = getRandomU32(&Seed);
141-
// Releasing small blocks is expensive, set a higher threshold to avoid
142-
// frequent page releases.
143-
if (isSmallBlock(getSizeByClassId(I)))
144-
Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
145-
else
146-
Region->TryReleaseThreshold = PageSize;
147-
Region->ReleaseInfo.LastReleaseAtNs = Time;
128+
if (Config::getEnableContiguousRegions()) {
129+
ReservedMemoryT ReservedMemory = {};
130+
// Reserve the space required for the Primary.
131+
CHECK(ReservedMemory.create(/*Addr=*/0U, RegionSize * NumClasses,
132+
"scudo:primary_reserve"));
133+
const uptr PrimaryBase = ReservedMemory.getBase();
134+
135+
for (uptr I = 0; I < NumClasses; I++) {
136+
MemMapT RegionMemMap = ReservedMemory.dispatch(
137+
PrimaryBase + (I << RegionSizeLog), RegionSize);
138+
RegionInfo *Region = getRegionInfo(I);
148139

149-
Region->MemMapInfo.MemMap = ReservedMemory.dispatch(
150-
PrimaryBase + (I << RegionSizeLog), RegionSize);
151-
CHECK(Region->MemMapInfo.MemMap.isAllocated());
140+
initRegion(Region, I, RegionMemMap, Config::getEnableRandomOffset());
141+
}
142+
shuffle(RegionInfoArray, NumClasses, &Seed);
152143
}
153-
shuffle(RegionInfoArray, NumClasses, &Seed);
154144

155145
// The binding should be done after region shuffling so that it won't bind
156146
// the FLLock from the wrong region.
@@ -160,14 +150,17 @@ template <typename Config> class SizeClassAllocator64 {
160150
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
161151
}
162152

163-
void unmapTestOnly() NO_THREAD_SAFETY_ANALYSIS {
153+
void unmapTestOnly() {
164154
for (uptr I = 0; I < NumClasses; I++) {
165155
RegionInfo *Region = getRegionInfo(I);
156+
{
157+
ScopedLock ML(Region->MMLock);
158+
MemMapT MemMap = Region->MemMapInfo.MemMap;
159+
if (MemMap.isAllocated())
160+
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
161+
}
166162
*Region = {};
167163
}
168-
if (PrimaryBase)
169-
ReservedMemory.release();
170-
PrimaryBase = 0U;
171164
}
172165

173166
// When all blocks are freed, it has to be the same size as `AllocatedUser`.
@@ -251,9 +244,10 @@ template <typename Config> class SizeClassAllocator64 {
251244
}
252245

253246
const bool RegionIsExhausted = Region->Exhausted;
254-
if (!RegionIsExhausted)
247+
if (!RegionIsExhausted) {
255248
PopCount = populateFreeListAndPopBlocks(C, ClassId, Region, ToArray,
256249
MaxBlockCount);
250+
}
257251
ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
258252
break;
259253
}
@@ -514,7 +508,6 @@ template <typename Config> class SizeClassAllocator64 {
514508
private:
515509
static const uptr RegionSize = 1UL << RegionSizeLog;
516510
static const uptr NumClasses = SizeClassMap::NumClasses;
517-
static const uptr PrimarySize = RegionSize * NumClasses;
518511

519512
static const uptr MapSizeIncrement = Config::getMapSizeIncrement();
520513
// Fill at most this number of batches from the newly map'd memory.
@@ -570,9 +563,14 @@ template <typename Config> class SizeClassAllocator64 {
570563
}
571564

572565
uptr getRegionBaseByClassId(uptr ClassId) {
573-
return roundDown(getRegionInfo(ClassId)->RegionBeg - PrimaryBase,
574-
RegionSize) +
575-
PrimaryBase;
566+
RegionInfo *Region = getRegionInfo(ClassId);
567+
Region->MMLock.assertHeld();
568+
569+
if (!Config::getEnableContiguousRegions() &&
570+
!Region->MemMapInfo.MemMap.isAllocated()) {
571+
return 0U;
572+
}
573+
return Region->MemMapInfo.MemMap.getBase();
576574
}
577575

578576
static CompactPtrT compactPtrInternal(uptr Base, uptr Ptr) {
@@ -602,6 +600,30 @@ template <typename Config> class SizeClassAllocator64 {
602600
return BlockSize > PageSize;
603601
}
604602

603+
ALWAYS_INLINE void initRegion(RegionInfo *Region, uptr ClassId,
604+
MemMapT MemMap, bool EnableRandomOffset)
605+
REQUIRES(Region->MMLock) {
606+
DCHECK(!Region->MemMapInfo.MemMap.isAllocated());
607+
DCHECK(MemMap.isAllocated());
608+
609+
const uptr PageSize = getPageSizeCached();
610+
611+
Region->MemMapInfo.MemMap = MemMap;
612+
613+
Region->RegionBeg = MemMap.getBase();
614+
if (EnableRandomOffset) {
615+
Region->RegionBeg +=
616+
(getRandomModN(&Region->RandState, 16) + 1) * PageSize;
617+
}
618+
619+
// Releasing small blocks is expensive, set a higher threshold to avoid
620+
// frequent page releases.
621+
if (isSmallBlock(getSizeByClassId(ClassId)))
622+
Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
623+
else
624+
Region->TryReleaseThreshold = PageSize;
625+
}
626+
605627
void pushBatchClassBlocks(RegionInfo *Region, CompactPtrT *Array, u32 Size)
606628
REQUIRES(Region->FLLock) {
607629
DCHECK_EQ(Region, getRegionInfo(SizeClassMap::BatchClassId));
@@ -989,9 +1011,26 @@ template <typename Config> class SizeClassAllocator64 {
9891011
CompactPtrT *ToArray,
9901012
const u16 MaxBlockCount)
9911013
REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
1014+
if (!Config::getEnableContiguousRegions() &&
1015+
!Region->MemMapInfo.MemMap.isAllocated()) {
1016+
ReservedMemoryT ReservedMemory;
1017+
if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, RegionSize,
1018+
"scudo:primary_reserve",
1019+
MAP_ALLOWNOMEM))) {
1020+
Printf("Can't reserve pages for size class %zu.\n",
1021+
getSizeByClassId(ClassId));
1022+
Region->Exhausted = true;
1023+
return 0U;
1024+
}
1025+
initRegion(Region, ClassId,
1026+
ReservedMemory.dispatch(ReservedMemory.getBase(),
1027+
ReservedMemory.getCapacity()),
1028+
/*EnableRandomOffset=*/false);
1029+
}
1030+
1031+
DCHECK(Region->MemMapInfo.MemMap.isAllocated());
9921032
const uptr Size = getSizeByClassId(ClassId);
9931033
const u16 MaxCount = CacheT::getMaxCached(Size);
994-
9951034
const uptr RegionBeg = Region->RegionBeg;
9961035
const uptr MappedUser = Region->MemMapInfo.MappedUser;
9971036
const uptr TotalUserBytes =
@@ -1683,10 +1722,6 @@ template <typename Config> class SizeClassAllocator64 {
16831722
Region->FLLockCV.notifyAll(Region->FLLock);
16841723
}
16851724

1686-
// TODO: `PrimaryBase` can be obtained from ReservedMemory. This needs to be
1687-
// deprecated.
1688-
uptr PrimaryBase = 0;
1689-
ReservedMemoryT ReservedMemory = {};
16901725
// The minimum size of pushed blocks that we will try to release the pages in
16911726
// that size class.
16921727
uptr SmallerBlockReleasePageDelta = 0;

compiler-rt/lib/scudo/standalone/tests/primary_test.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ template <typename SizeClassMapT> struct TestConfig3 {
9090
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
9191
typedef scudo::uptr CompactPtrT;
9292
static const scudo::uptr CompactPtrScale = 0;
93+
static const bool EnableContiguousRegions = false;
9394
static const bool EnableRandomOffset = true;
9495
static const scudo::uptr MapSizeIncrement = 1UL << 18;
9596
};

0 commit comments

Comments
 (0)