-
Notifications
You must be signed in to change notification settings - Fork 13.6k
[scudo] Add EnableMultiRegions mode #98076
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
ChiaHungDuan
commented
Jul 8, 2024
@llvm/pr-subscribers-compiler-rt-sanitizer Author: None (ChiaHungDuan) Changes
Patch is 51.32 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/98076.diff 11 Files Affected:
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def
index ce37b1cfaedcc..a14541f519f7c 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.def
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.def
@@ -84,6 +84,20 @@ PRIMARY_OPTIONAL(const uptr, CompactPtrScale, SCUDO_MIN_ALIGNMENT_LOG)
// Indicates support for offsetting the start of a region by a random number of
// pages. This is only used if `EnableContiguousRegions` is enabled.
PRIMARY_OPTIONAL(const bool, EnableRandomOffset, false)
+
+// This allows each size class to have multiple regions instead of one. Note
+// that this is an experimental option so it has a few constraints while using.
+// a. Pointer compaction is diabled. Which means `CompactPtrT` needs to be the
+// pointer integral type, i.e., uptr.
+// b. `EnableRandomOffset` needs to be false. Pointer grouping requires
+// the beginning of allocation address of a region to be aligned with
+// `GroupSizeLog`. Without pointer compaction, it relies the region to be
+// allocated with proper alignment and the random offset will break the
+// assumption.
+// c. Condition variable is not supported under this mode. This is still under
+// developing.
+PRIMARY_OPTIONAL(const bool, EnableMultiRegions, false)
+
PRIMARY_OPTIONAL(const s32, DefaultReleaseToOsIntervalMs, INT32_MIN)
// When `EnableContiguousRegions` is true, all regions will be be arranged in
diff --git a/compiler-rt/lib/scudo/standalone/mem_map_base.h b/compiler-rt/lib/scudo/standalone/mem_map_base.h
index 99ab0cba604fc..f4261f035d778 100644
--- a/compiler-rt/lib/scudo/standalone/mem_map_base.h
+++ b/compiler-rt/lib/scudo/standalone/mem_map_base.h
@@ -93,9 +93,11 @@ template <class Derived, typename MemMapTy> class ReservedMemory {
constexpr ReservedMemory() = default;
// Reserve a chunk of memory at a suggested address.
- bool create(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
+ bool create(uptr Addr, uptr Size, const char *Name, uptr Flags = 0,
+ uptr Alignment = getPageSizeCached()) {
DCHECK(!isCreated());
- return invokeImpl(&Derived::createImpl, Addr, Size, Name, Flags);
+ DCHECK_EQ(Alignment % getPageSizeCached(), 0U);
+ return invokeImpl(&Derived::createImpl, Addr, Size, Name, Flags, Alignment);
}
// Release the entire reserved memory.
diff --git a/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp b/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
index fc793abf44cda..dca6c717519e3 100644
--- a/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
+++ b/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
@@ -227,7 +227,9 @@ void MemMapFuchsia::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
}
bool ReservedMemoryFuchsia::createImpl(UNUSED uptr Addr, uptr Size,
- UNUSED const char *Name, uptr Flags) {
+ UNUSED const char *Name, uptr Flags,
+ UNUSED uptr Alignment) {
+ // TODO: Add the support of alignment.
const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
// Reserve memory by mapping the placeholder VMO without any permission.
diff --git a/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h b/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h
index 2e66f89cfca55..3adab733645c4 100644
--- a/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h
+++ b/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h
@@ -57,7 +57,8 @@ class ReservedMemoryFuchsia final
public:
constexpr ReservedMemoryFuchsia() = default;
- bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags,
+ uptr Alignment);
void releaseImpl();
MemMapT dispatchImpl(uptr Addr, uptr Size);
uptr getBaseImpl() { return Base; }
diff --git a/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp b/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp
index 783c4f0d9ab0f..943b528e379d0 100644
--- a/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp
+++ b/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp
@@ -127,11 +127,26 @@ void MemMapLinux::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
}
bool ReservedMemoryLinux::createImpl(uptr Addr, uptr Size, const char *Name,
- uptr Flags) {
+ uptr Flags, uptr Alignment) {
ReservedMemoryLinux::MemMapT MemMap;
- if (!MemMap.map(Addr, Size, Name, Flags | MAP_NOACCESS))
+ uptr MapSize = Size;
+ if (Alignment != getPageSizeCached())
+ MapSize += Alignment;
+ if (!MemMap.map(Addr, MapSize, Name, Flags | MAP_NOACCESS))
return false;
+ if (Alignment != getPageSizeCached()) {
+ uptr Offset = MemMap.getBase() % Alignment;
+ if (Offset != 0) {
+ Offset = Alignment - Offset;
+ MemMap.unmap(MemMap.getBase(), Offset);
+ }
+ MemMap.unmap(MemMap.getBase() + Size, MemMap.getCapacity() - Size);
+ }
+
+ DCHECK_EQ(MemMap.getBase() % Alignment, 0);
+ DCHECK_EQ(MemMap.getCapacity(), Size);
+
MapBase = MemMap.getBase();
MapCapacity = MemMap.getCapacity();
diff --git a/compiler-rt/lib/scudo/standalone/mem_map_linux.h b/compiler-rt/lib/scudo/standalone/mem_map_linux.h
index 7a89b3bff5ed1..9f61d8d1f47ef 100644
--- a/compiler-rt/lib/scudo/standalone/mem_map_linux.h
+++ b/compiler-rt/lib/scudo/standalone/mem_map_linux.h
@@ -51,7 +51,8 @@ class ReservedMemoryLinux final
uptr getCapacityImpl() { return MapCapacity; }
// These threes are specific to `ReservedMemory`.
- bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags,
+ uptr Alignment);
void releaseImpl();
MemMapT dispatchImpl(uptr Addr, uptr Size);
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index ebfb8dfe0a31f..57b762d1baf77 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -365,7 +365,7 @@ template <typename Config> class SizeClassAllocator32 {
}
const char *getRegionInfoArrayAddress() const { return nullptr; }
- static uptr getRegionInfoArraySize() { return 0; }
+ uptr getRegionInfoArraySize() { return 0; }
static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
UNUSED uptr Ptr) {
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 8a583bacb4a93..37b3ec2ddddb3 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -56,6 +56,17 @@ template <typename Config> class SizeClassAllocator64 {
static_assert(RegionSizeLog >= GroupSizeLog,
"Group size shouldn't be greater than the region size");
static const uptr GroupScale = GroupSizeLog - CompactPtrScale;
+ // Local cache stores the pointers in the type of compacted pointer and the
+ // compaction is done by calculating the offset to the base address of a
+ // region. Currently, we don't support decompacting through multiple regions
+ // because of the concern of performance and so we disable the pointer
+ // compaction.
+ // TODO(chiahungduan): Allow local cache store the raw pointer and keep
+ // storing the compacted pointers in each region to save memory.
+ static const bool DisablePtrCompaction = Config::getEnableMultiRegions();
+ static_assert(!DisablePtrCompaction || sizeof(CompactPtrT) == sizeof(uptr),
+ "Pointer compaction is disabled, `CompactPtrT` needs to be the "
+ "same size of `uptr`");
typedef SizeClassAllocator64<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
typedef TransferBatch<ThisT> TransferBatchT;
@@ -117,35 +128,45 @@ template <typename Config> class SizeClassAllocator64 {
SmallerBlockReleasePageDelta =
PagesInGroup * (1 + MinSizeClass / 16U) / 100;
+ RegionInfoManager.init(RegionInfoAllocator);
+
u32 Seed;
const u64 Time = getMonotonicTimeFast();
if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
Seed = static_cast<u32>(Time ^ (reinterpret_cast<uptr>(&Seed) >> 12));
for (uptr I = 0; I < NumClasses; I++)
- getRegionInfo(I)->RandState = getRandomU32(&Seed);
+ RegionInfoManager.getCurRegionInfo(I)->RandState = getRandomU32(&Seed);
if (Config::getEnableContiguousRegions()) {
ReservedMemoryT ReservedMemory = {};
+ // Block grouping requires the base address of a Region to be aligned
+ // with GrouopSize and pointer is compacted according to the offset to the
+ // base of a region so it always meets the requirement. As a result when
+ // the compaction is disabled, it relies the base address to be aligned.
+ const uptr Alignment =
+ DisablePtrCompaction ? (1UL << GroupSizeLog) : PageSize;
// Reserve the space required for the Primary.
CHECK(ReservedMemory.create(/*Addr=*/0U, RegionSize * NumClasses,
- "scudo:primary_reserve"));
+ "scudo:primary_reserve", /*Flag=*/0,
+ Alignment));
const uptr PrimaryBase = ReservedMemory.getBase();
for (uptr I = 0; I < NumClasses; I++) {
MemMapT RegionMemMap = ReservedMemory.dispatch(
PrimaryBase + (I << RegionSizeLog), RegionSize);
- RegionInfo *Region = getRegionInfo(I);
+ RegionInfo *Region = RegionInfoManager.getCurRegionInfo(I);
initRegion(Region, I, RegionMemMap, Config::getEnableRandomOffset());
}
- shuffle(RegionInfoArray, NumClasses, &Seed);
+ RegionInfoManager.shuffle(&Seed);
}
// The binding should be done after region shuffling so that it won't bind
// the FLLock from the wrong region.
for (uptr I = 0; I < NumClasses; I++)
- getRegionInfo(I)->FLLockCV.bindTestOnly(getRegionInfo(I)->FLLock);
+ RegionInfoManager.getCurRegionInfo(I)->FLLockCV.bindTestOnly(
+ RegionInfoManager.getCurRegionInfo(I)->FLLock);
// The default value in the primary config has the higher priority.
if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
@@ -155,82 +176,111 @@ template <typename Config> class SizeClassAllocator64 {
void unmapTestOnly() {
for (uptr I = 0; I < NumClasses; I++) {
- RegionInfo *Region = getRegionInfo(I);
- {
- ScopedLock ML(Region->MMLock);
- MemMapT MemMap = Region->MemMapInfo.MemMap;
+ auto RegionInfoIter = RegionInfoManager.getRegionInfoIter(I);
+ do {
+ ScopedLock ML(RegionInfoIter->MMLock);
+ MemMapT MemMap = RegionInfoIter->MemMapInfo.MemMap;
if (MemMap.isAllocated())
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
- }
- *Region = {};
+ RegionInfo *OldRegion = RegionInfoIter.get();
+ ++RegionInfoIter;
+ *OldRegion = {};
+ } while (!RegionInfoIter.end());
}
}
// When all blocks are freed, it has to be the same size as `AllocatedUser`.
void verifyAllBlocksAreReleasedTestOnly() {
+ uptr NumRegionInfo = 0;
+ // TODO: Verify all pointers are belong to the right region
// `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
uptr BatchClassUsedInFreeLists = 0;
for (uptr I = 0; I < NumClasses; I++) {
// We have to count BatchClassUsedInFreeLists in other regions first.
if (I == SizeClassMap::BatchClassId)
continue;
- RegionInfo *Region = getRegionInfo(I);
- ScopedLock ML(Region->MMLock);
- ScopedLock FL(Region->FLLock);
- const uptr BlockSize = getSizeByClassId(I);
- uptr TotalBlocks = 0;
- for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
- // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
- BatchClassUsedInFreeLists += BG.Batches.size() + 1;
- for (const auto &It : BG.Batches)
- TotalBlocks += It.getCount();
- }
+ auto RegionInfoIter = RegionInfoManager.getRegionInfoIter(I);
+
+ do {
+ ++NumRegionInfo;
+
+ ScopedLock ML(RegionInfoIter->MMLock);
+ ScopedLock FL(RegionInfoIter->FLLock);
+ const uptr BlockSize = getSizeByClassId(I);
+ uptr TotalBlocks = 0;
+ for (BatchGroupT &BG : RegionInfoIter->FreeListInfo.BlockList) {
+ // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
+ BatchClassUsedInFreeLists += BG.Batches.size() + 1;
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ }
- DCHECK_EQ(TotalBlocks, Region->MemMapInfo.AllocatedUser / BlockSize);
- DCHECK_EQ(Region->FreeListInfo.PushedBlocks,
- Region->FreeListInfo.PoppedBlocks);
+ DCHECK_EQ(TotalBlocks,
+ RegionInfoIter->MemMapInfo.AllocatedUser / BlockSize);
+ DCHECK_EQ(RegionInfoIter->FreeListInfo.PushedBlocks,
+ RegionInfoIter->FreeListInfo.PoppedBlocks);
+
+ ++RegionInfoIter;
+ } while (!RegionInfoIter.end());
}
- RegionInfo *Region = getRegionInfo(SizeClassMap::BatchClassId);
- ScopedLock ML(Region->MMLock);
- ScopedLock FL(Region->FLLock);
- const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
- uptr TotalBlocks = 0;
- for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
- if (LIKELY(!BG.Batches.empty())) {
- for (const auto &It : BG.Batches)
- TotalBlocks += It.getCount();
- } else {
- // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
- // itself.
- ++TotalBlocks;
+ auto RegionInfoIter =
+ RegionInfoManager.getRegionInfoIter(SizeClassMap::BatchClassId);
+
+ do {
+ ++NumRegionInfo;
+
+ ScopedLock ML(RegionInfoIter->MMLock);
+ ScopedLock FL(RegionInfoIter->FLLock);
+ const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
+ uptr TotalBlocks = 0;
+ for (BatchGroupT &BG : RegionInfoIter->FreeListInfo.BlockList) {
+ if (LIKELY(!BG.Batches.empty())) {
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ } else {
+ // `BatchGroup` with empty freelist doesn't have `TransferBatch`
+ // record itself.
+ ++TotalBlocks;
+ }
}
- }
- DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
- Region->MemMapInfo.AllocatedUser / BlockSize);
- DCHECK_GE(Region->FreeListInfo.PoppedBlocks,
- Region->FreeListInfo.PushedBlocks);
- const uptr BlocksInUse =
- Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
- DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
+ DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
+ RegionInfoIter->MemMapInfo.AllocatedUser / BlockSize);
+ DCHECK_GE(RegionInfoIter->FreeListInfo.PoppedBlocks,
+ RegionInfoIter->FreeListInfo.PushedBlocks);
+ const uptr BlocksInUse = RegionInfoIter->FreeListInfo.PoppedBlocks -
+ RegionInfoIter->FreeListInfo.PushedBlocks;
+ DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
+ ++RegionInfoIter;
+ } while (!RegionInfoIter.end());
+
+ RegionInfoAllocator.verifyTheNumberOfAllocatedRegionInfo(NumRegionInfo);
}
u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
const u16 MaxBlockCount) {
DCHECK_LT(ClassId, NumClasses);
- RegionInfo *Region = getRegionInfo(ClassId);
+ auto RegionInfoIter = RegionInfoManager.getRegionInfoIter(ClassId);
u16 PopCount = 0;
- {
- ScopedLock L(Region->FLLock);
- PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
- if (PopCount != 0U)
- return PopCount;
- }
+ do {
+ {
+ ScopedLock FL(RegionInfoIter->FLLock);
+ PopCount = popBlocksImpl(C, ClassId, RegionInfoIter.get(), ToArray,
+ MaxBlockCount);
+ if (PopCount != 0U)
+ return PopCount;
+ }
+
+ ++RegionInfoIter;
+ } while (!RegionInfoIter.end());
bool ReportRegionExhausted = false;
- if (conditionVariableEnabled()) {
+ RegionInfo *Region = RegionInfoManager.getCurRegionInfo(ClassId);
+
+ // TODO(chiahungduan): Support multiple-regions with condition variable.
+ if (conditionVariableEnabled() && !Config::getEnableMultiRegions()) {
PopCount = popBlocksWithCV(C, ClassId, Region, ToArray, MaxBlockCount,
ReportRegionExhausted);
} else {
@@ -247,14 +297,37 @@ template <typename Config> class SizeClassAllocator64 {
}
const bool RegionIsExhausted = Region->Exhausted;
- if (!RegionIsExhausted) {
+ if (!Config::getEnableMultiRegions()) {
+ if (!RegionIsExhausted) {
+ PopCount = populateFreeListAndPopBlocks(C, ClassId, Region, ToArray,
+ MaxBlockCount);
+ }
+ ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
+ break;
+ } else {
+ // When a region is exhaused, a new region will be created unless it's
+ // OOM in RegionInfoAllocator. If so, there's no way to create a new
+ // region.
+ if (RegionIsExhausted)
+ break;
PopCount = populateFreeListAndPopBlocks(C, ClassId, Region, ToArray,
MaxBlockCount);
+ if (PopCount != 0)
+ break;
+
+ DCHECK(Region->Exhausted);
+ RegionInfo *NewRegion = populateNewRegion(Region, ClassId);
+ if (NewRegion == nullptr) {
+ ReportRegionExhausted = true;
+ break;
+ }
+
+ // Try to allocate from the new region in the next iteration so that
+ // we can release the `MMLock` of previous region first.
+ Region = NewRegion;
}
- ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
- break;
}
- }
+ } // if (conditionVariableEnabled() && !Config::getEnableMultiRegions())
if (UNLIKELY(ReportRegionExhausted)) {
Printf("Can't populate more pages for size class %zu.\n",
@@ -274,8 +347,24 @@ template <typename Config> class SizeClassAllocator64 {
DCHECK_LT(ClassId, NumClasses);
DCHECK_GT(Size, 0);
- RegionInfo *Region = getRegionInfo(ClassId);
- if (ClassId == SizeClassMap::BatchClassId) {
+ auto IsPtrInRegion = [](RegionInfo *Region,
+ uptr Ptr) NO_THREAD_SAFETY_ANALYSIS {
+ // Thread-safety annotation doesn't support lambda. Use a runtime check
+ // instead.
+ Region->MMLock.assertHeld();
+ const uptr RegionEnd = Region->MemMapInfo.MemMap.getBase() +
+ Region->MemMapInfo.MemMap.getCapacity();
+ return Ptr >= Region->RegionBeg && Ptr < RegionEnd;
+ };
+
+ // When multiple-regions is enabled, we need to sort the array to dispatch
+ // the blocks to different regions efficiently. Thus even we don't put
+ // BatchClass into groups, sorting is still necessary and it'll be handled
+ // later in the function.
+ // TODO: Reorder the use of variable
+ RegionInfo *Region = RegionInfoManager.getCurRegionInfo(ClassId);
+ if (ClassId == SizeClassMap::BatchClassId &&
+ !Config::getEnableMultiRegions()) {
ScopedLock L(Region->FLLock);
pushBatchClassBlocks(Region, Array, Size);
if (conditionVariableEnabled())
@@ -287,7 +376,7 @@ template <typename Config> class SizeClassAllocator64 {
// greater than the block size with a certain scale.
bool SameGroup = true;
- if (GroupSizeLog < RegionSizeLog) {
+ if (GroupSizeLog < RegionSizeLog || Config::getEnableMultiRegions()) {
// Sort the blocks so that blocks belonging to the same group can be
// pushed together.
for (u32 I = 1; I < Size; ++I) {
@@ -303,11 +392,41 @@ template <typename Config> class SizeClassAllocator64 {
}
}
- {
+ if (!Config::getEnableMultiRegions()) {
ScopedLock L(Region->FLLock);
...
[truncated]
|
@cferris1000, there are two commits in this pull request. I'll split them up later. Now it's used for understanding the dependency |
Instead of having single region for a size class, this mode increases the number of regions when one is exhausted. This reduces the fragmentation issue when one region is exhausted and gives the finer grunularity of choosing the size classes. However, it inevitably introduces some performance overhead because of the management of several regions for single size class. Currently, it's an experimental option and expected to see performance turbulance for a little bit. See more details and constraints in allocator_config.def.
9549949
to
70bfda9
Compare
|
||
// This allows each size class to have multiple regions instead of one. Note | ||
// that this is an experimental option so it has a few constraints while using. | ||
// a. Pointer compaction is diabled. Which means `CompactPtrT` needs to be the |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
diabled -> disabled
to be the -> to be a
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.
// pointer integral type, i.e., uptr. | ||
// b. `EnableRandomOffset` needs to be false. Pointer grouping requires | ||
// the beginning of allocation address of a region to be aligned with | ||
// `GroupSizeLog`. Without pointer compaction, it relies the region to be |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
relies -> requires
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.
// b. `EnableRandomOffset` needs to be false. Pointer grouping requires | ||
// the beginning of allocation address of a region to be aligned with | ||
// `GroupSizeLog`. Without pointer compaction, it relies the region to be | ||
// allocated with proper alignment and the random offset will break the |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
and the random offset -> and a random offset
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.
// allocated with proper alignment and the random offset will break the | ||
// assumption. | ||
// c. Condition variable is not supported under this mode. This is still under | ||
// developing. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
developing -> development
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.
return false; | ||
|
||
if (Alignment != getPageSizeCached()) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is it worth adding a comment indicating that this is not the normal path?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Add LIKELY
, let me know if you think a comment is better.
return &Array[Size++]; | ||
} | ||
// The amount memory used by this allocator is about (NumEntries * | ||
// RegionSize). For example, region with size 256 KB will have 2GB space |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
region with size 256 KB -> a 256 KB region
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.
RegionInfoLock[ClassId]); | ||
} | ||
|
||
// RegionInfos for the same size class will be stored in the order of base |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
store in the order of base address -> ordered by base address
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.
DCHECK_LT(ClassId, NumClasses); | ||
DCHECK(Region->MemMapInfo.MemMap.isAllocated()); | ||
|
||
// The creation of new region requires holding the MMLock of current |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
of new region -> of a new region
of current -> of the current
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.
sizeof(RegionInfoPointer) * NumClasses); | ||
} | ||
|
||
// Scudo requires the data member constant initializable. Array of raw |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Array of raw -> An array of raw
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.
} | ||
|
||
// Scudo requires the data member constant initializable. Array of raw | ||
// pointers doesn't meet the condition. Therefore, wrap the pointer in the |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
meet the condition -> meet this condition
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.
uptr MapSize = Size; | ||
|
||
if (LIKELY(!NeedToAdjustAlignment)) { | ||
if (!MemMap.map(Addr, MapSize, Name, Flags | MAP_NOACCESS)) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should this have MAP_ALLOWNOMEM in the flags? Same with theMap calls below.
|
||
if (Config::getEnableContiguousRegions()) { | ||
ReservedMemoryT ReservedMemory = {}; | ||
// Block grouping requires the base address of a Region to be aligned | ||
// with GrouopSize and pointer is compacted according to the offset to the |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
GrouopSize -> GroupSize
{ | ||
ScopedLock ML(Region->MMLock); | ||
MemMapT MemMap = Region->MemMapInfo.MemMap; | ||
auto RegionInfoIter = RegionInfoManager.getRegionInfoIter(I); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Seems reasonable to me.
// the compaction is disabled, it relies on the base address to be | ||
// aligned. | ||
const uptr Alignment = | ||
DisablePtrCompaction ? (1UL << GroupSizeLog) : PageSize; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is there a way to add a DCHECK that would fail if CompactPtrT is set but pointer compaction is disabled?
for (const auto &It : BG.Batches) | ||
TotalBlocks += It.getCount(); | ||
} | ||
auto RegionInfoIter = RegionInfoManager.getRegionInfoIter(I); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
As above, your explanation seems reasonable.
if (conditionVariableEnabled()) { | ||
RegionInfo *Region = RegionInfoManager.getCurRegionInfo(ClassId); | ||
|
||
// TODO(chiahungduan): Support multiple-regions with condition variable. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Similar question, can we add DCHECK that will fail if condition is enabled but isn't supported.
Better if it could be a static_assert to cause a compilation failure.
do { | ||
// Note that the tryLock() can fail under certain circumstances. Since | ||
// this should be a rare occurrence, there is no need to do anything to | ||
// force at least one `releaseToOSMaybe()` is called(). |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is slightly awkward. Hope about something like:
force at least one call to releaseToOSMaybe()
.