@@ -54,15 +54,15 @@ template <typename Config> class SizeClassAllocator32 {
54
54
" " );
55
55
typedef SizeClassAllocator32<Config> ThisT;
56
56
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
57
- typedef TransferBatch<ThisT> TransferBatch ;
58
- typedef BatchGroup<ThisT> BatchGroup ;
57
+ typedef TransferBatch<ThisT> TransferBatchT ;
58
+ typedef BatchGroup<ThisT> BatchGroupT ;
59
59
60
- static_assert (sizeof (BatchGroup ) <= sizeof (TransferBatch ),
61
- " BatchGroup uses the same class size as TransferBatch " );
60
+ static_assert (sizeof (BatchGroupT ) <= sizeof (TransferBatchT ),
61
+ " BatchGroupT uses the same class size as TransferBatchT " );
62
62
63
63
static uptr getSizeByClassId (uptr ClassId) {
64
64
return (ClassId == SizeClassMap::BatchClassId)
65
- ? sizeof (TransferBatch )
65
+ ? sizeof (TransferBatchT )
66
66
: SizeClassMap::getSizeByClassId (ClassId);
67
67
}
68
68
@@ -130,7 +130,7 @@ template <typename Config> class SizeClassAllocator32 {
130
130
SizeClassInfo *Sci = getSizeClassInfo (I);
131
131
ScopedLock L1 (Sci->Mutex );
132
132
uptr TotalBlocks = 0 ;
133
- for (BatchGroup &BG : Sci->FreeListInfo .BlockList ) {
133
+ for (BatchGroupT &BG : Sci->FreeListInfo .BlockList ) {
134
134
// `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
135
135
BatchClassUsedInFreeLists += BG.Batches .size () + 1 ;
136
136
for (const auto &It : BG.Batches )
@@ -145,7 +145,7 @@ template <typename Config> class SizeClassAllocator32 {
145
145
SizeClassInfo *Sci = getSizeClassInfo (SizeClassMap::BatchClassId);
146
146
ScopedLock L1 (Sci->Mutex );
147
147
uptr TotalBlocks = 0 ;
148
- for (BatchGroup &BG : Sci->FreeListInfo .BlockList ) {
148
+ for (BatchGroupT &BG : Sci->FreeListInfo .BlockList ) {
149
149
if (LIKELY (!BG.Batches .empty ())) {
150
150
for (const auto &It : BG.Batches )
151
151
TotalBlocks += It.getCount ();
@@ -192,7 +192,7 @@ template <typename Config> class SizeClassAllocator32 {
192
192
}
193
193
194
194
u16 popBlocks (CacheT *C, uptr ClassId, CompactPtrT *ToArray) {
195
- TransferBatch *B = popBatch (C, ClassId);
195
+ TransferBatchT *B = popBatch (C, ClassId);
196
196
if (!B)
197
197
return 0 ;
198
198
@@ -206,11 +206,11 @@ template <typename Config> class SizeClassAllocator32 {
206
206
return Count;
207
207
}
208
208
209
- TransferBatch *popBatch (CacheT *C, uptr ClassId) {
209
+ TransferBatchT *popBatch (CacheT *C, uptr ClassId) {
210
210
DCHECK_LT (ClassId, NumClasses);
211
211
SizeClassInfo *Sci = getSizeClassInfo (ClassId);
212
212
ScopedLock L (Sci->Mutex );
213
- TransferBatch *B = popBatchImpl (C, ClassId, Sci);
213
+ TransferBatchT *B = popBatchImpl (C, ClassId, Sci);
214
214
if (UNLIKELY (!B)) {
215
215
if (UNLIKELY (!populateFreeList (C, ClassId, Sci)))
216
216
return nullptr ;
@@ -400,7 +400,7 @@ template <typename Config> class SizeClassAllocator32 {
400
400
};
401
401
402
402
struct BlocksInfo {
403
- SinglyLinkedList<BatchGroup > BlockList = {};
403
+ SinglyLinkedList<BatchGroupT > BlockList = {};
404
404
uptr PoppedBlocks = 0 ;
405
405
uptr PushedBlocks = 0 ;
406
406
};
@@ -524,11 +524,11 @@ template <typename Config> class SizeClassAllocator32 {
524
524
// reusable and don't need additional space for them.
525
525
526
526
Sci->FreeListInfo .PushedBlocks += Size ;
527
- BatchGroup *BG = Sci->FreeListInfo .BlockList .front ();
527
+ BatchGroupT *BG = Sci->FreeListInfo .BlockList .front ();
528
528
529
529
if (BG == nullptr ) {
530
530
// Construct `BatchGroup` on the last element.
531
- BG = reinterpret_cast <BatchGroup *>(
531
+ BG = reinterpret_cast <BatchGroupT *>(
532
532
decompactPtr (SizeClassMap::BatchClassId, Array[Size - 1 ]));
533
533
--Size ;
534
534
BG->Batches .clear ();
@@ -553,7 +553,7 @@ template <typename Config> class SizeClassAllocator32 {
553
553
// 2. Only 1 block is pushed when the freelist is empty.
554
554
if (BG->Batches .empty ()) {
555
555
// Construct the `TransferBatch` on the last element.
556
- TransferBatch *TB = reinterpret_cast <TransferBatch *>(
556
+ TransferBatchT *TB = reinterpret_cast <TransferBatchT *>(
557
557
decompactPtr (SizeClassMap::BatchClassId, Array[Size - 1 ]));
558
558
TB->clear ();
559
559
// As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
@@ -568,14 +568,14 @@ template <typename Config> class SizeClassAllocator32 {
568
568
BG->Batches .push_front (TB);
569
569
}
570
570
571
- TransferBatch *CurBatch = BG->Batches .front ();
571
+ TransferBatchT *CurBatch = BG->Batches .front ();
572
572
DCHECK_NE (CurBatch, nullptr );
573
573
574
574
for (u32 I = 0 ; I < Size ;) {
575
575
u16 UnusedSlots =
576
576
static_cast <u16>(BG->MaxCachedPerBatch - CurBatch->getCount ());
577
577
if (UnusedSlots == 0 ) {
578
- CurBatch = reinterpret_cast <TransferBatch *>(
578
+ CurBatch = reinterpret_cast <TransferBatchT *>(
579
579
decompactPtr (SizeClassMap::BatchClassId, Array[I]));
580
580
CurBatch->clear ();
581
581
// Self-contained
@@ -619,10 +619,11 @@ template <typename Config> class SizeClassAllocator32 {
619
619
DCHECK_GT (Size , 0U );
620
620
621
621
auto CreateGroup = [&](uptr CompactPtrGroupBase) {
622
- BatchGroup *BG = reinterpret_cast <BatchGroup *>(C->getBatchClassBlock ());
622
+ BatchGroupT *BG =
623
+ reinterpret_cast <BatchGroupT *>(C->getBatchClassBlock ());
623
624
BG->Batches .clear ();
624
- TransferBatch *TB =
625
- reinterpret_cast <TransferBatch *>(C->getBatchClassBlock ());
625
+ TransferBatchT *TB =
626
+ reinterpret_cast <TransferBatchT *>(C->getBatchClassBlock ());
626
627
TB->clear ();
627
628
628
629
BG->CompactPtrGroupBase = CompactPtrGroupBase;
@@ -634,17 +635,18 @@ template <typename Config> class SizeClassAllocator32 {
634
635
return BG;
635
636
};
636
637
637
- auto InsertBlocks = [&](BatchGroup *BG, CompactPtrT *Array, u32 Size ) {
638
- SinglyLinkedList<TransferBatch > &Batches = BG->Batches ;
639
- TransferBatch *CurBatch = Batches.front ();
638
+ auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size ) {
639
+ SinglyLinkedList<TransferBatchT > &Batches = BG->Batches ;
640
+ TransferBatchT *CurBatch = Batches.front ();
640
641
DCHECK_NE (CurBatch, nullptr );
641
642
642
643
for (u32 I = 0 ; I < Size ;) {
643
644
DCHECK_GE (BG->MaxCachedPerBatch , CurBatch->getCount ());
644
645
u16 UnusedSlots =
645
646
static_cast <u16>(BG->MaxCachedPerBatch - CurBatch->getCount ());
646
647
if (UnusedSlots == 0 ) {
647
- CurBatch = reinterpret_cast <TransferBatch *>(C->getBatchClassBlock ());
648
+ CurBatch =
649
+ reinterpret_cast <TransferBatchT *>(C->getBatchClassBlock ());
648
650
CurBatch->clear ();
649
651
Batches.push_front (CurBatch);
650
652
UnusedSlots = BG->MaxCachedPerBatch ;
@@ -659,11 +661,11 @@ template <typename Config> class SizeClassAllocator32 {
659
661
};
660
662
661
663
Sci->FreeListInfo .PushedBlocks += Size ;
662
- BatchGroup *Cur = Sci->FreeListInfo .BlockList .front ();
664
+ BatchGroupT *Cur = Sci->FreeListInfo .BlockList .front ();
663
665
664
666
// In the following, `Cur` always points to the BatchGroup for blocks that
665
667
// will be pushed next. `Prev` is the element right before `Cur`.
666
- BatchGroup *Prev = nullptr ;
668
+ BatchGroupT *Prev = nullptr ;
667
669
668
670
while (Cur != nullptr &&
669
671
compactPtrGroupBase (Array[0 ]) > Cur->CompactPtrGroupBase ) {
@@ -724,36 +726,36 @@ template <typename Config> class SizeClassAllocator32 {
724
726
// group id will be considered first.
725
727
//
726
728
// The region mutex needs to be held while calling this method.
727
- TransferBatch *popBatchImpl (CacheT *C, uptr ClassId, SizeClassInfo *Sci)
729
+ TransferBatchT *popBatchImpl (CacheT *C, uptr ClassId, SizeClassInfo *Sci)
728
730
REQUIRES(Sci->Mutex) {
729
731
if (Sci->FreeListInfo .BlockList .empty ())
730
732
return nullptr ;
731
733
732
- SinglyLinkedList<TransferBatch > &Batches =
734
+ SinglyLinkedList<TransferBatchT > &Batches =
733
735
Sci->FreeListInfo .BlockList .front ()->Batches ;
734
736
735
737
if (Batches.empty ()) {
736
738
DCHECK_EQ (ClassId, SizeClassMap::BatchClassId);
737
- BatchGroup *BG = Sci->FreeListInfo .BlockList .front ();
739
+ BatchGroupT *BG = Sci->FreeListInfo .BlockList .front ();
738
740
Sci->FreeListInfo .BlockList .pop_front ();
739
741
740
742
// Block used by `BatchGroup` is from BatchClassId. Turn the block into
741
743
// `TransferBatch` with single block.
742
- TransferBatch *TB = reinterpret_cast <TransferBatch *>(BG);
744
+ TransferBatchT *TB = reinterpret_cast <TransferBatchT *>(BG);
743
745
TB->clear ();
744
746
TB->add (
745
747
compactPtr (SizeClassMap::BatchClassId, reinterpret_cast <uptr>(TB)));
746
748
Sci->FreeListInfo .PoppedBlocks += 1 ;
747
749
return TB;
748
750
}
749
751
750
- TransferBatch *B = Batches.front ();
752
+ TransferBatchT *B = Batches.front ();
751
753
Batches.pop_front ();
752
754
DCHECK_NE (B, nullptr );
753
755
DCHECK_GT (B->getCount (), 0U );
754
756
755
757
if (Batches.empty ()) {
756
- BatchGroup *BG = Sci->FreeListInfo .BlockList .front ();
758
+ BatchGroupT *BG = Sci->FreeListInfo .BlockList .front ();
757
759
Sci->FreeListInfo .BlockList .pop_front ();
758
760
759
761
// We don't keep BatchGroup with zero blocks to avoid empty-checking while
@@ -805,7 +807,7 @@ template <typename Config> class SizeClassAllocator32 {
805
807
DCHECK_GT (NumberOfBlocks, 0U );
806
808
807
809
constexpr u32 ShuffleArraySize =
808
- MaxNumBatches * TransferBatch ::MaxNumCached;
810
+ MaxNumBatches * TransferBatchT ::MaxNumCached;
809
811
// Fill the transfer batches and put them in the size-class freelist. We
810
812
// need to randomize the blocks for security purposes, so we first fill a
811
813
// local array that we then shuffle before populating the batches.
@@ -1070,7 +1072,7 @@ template <typename Config> class SizeClassAllocator32 {
1070
1072
auto DecompactPtr = [](CompactPtrT CompactPtr) {
1071
1073
return reinterpret_cast <uptr>(CompactPtr);
1072
1074
};
1073
- for (BatchGroup &BG : Sci->FreeListInfo .BlockList ) {
1075
+ for (BatchGroupT &BG : Sci->FreeListInfo .BlockList ) {
1074
1076
const uptr GroupBase = decompactGroupBase (BG.CompactPtrGroupBase );
1075
1077
// The `GroupSize` may not be divided by `BlockSize`, which means there is
1076
1078
// an unused space at the end of Region. Exclude that space to avoid
0 commit comments