Skip to content

Commit b5e8555

Browse files
authored
[MemCpyOpt][NFC] Format codebase (#90225)
This patch automatically formats the code.
1 parent 9e30c96 commit b5e8555

File tree

1 file changed

+62
-57
lines changed

1 file changed

+62
-57
lines changed

llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp

Lines changed: 62 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ struct MemsetRange {
9999
MaybeAlign Alignment;
100100

101101
/// TheStores - The actual stores that make up this range.
102-
SmallVector<Instruction*, 16> TheStores;
102+
SmallVector<Instruction *, 16> TheStores;
103103

104104
bool isProfitableToUseMemset(const DataLayout &DL) const;
105105
};
@@ -108,10 +108,12 @@ struct MemsetRange {
108108

109109
bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
110110
// If we found more than 4 stores to merge or 16 bytes, use memset.
111-
if (TheStores.size() >= 4 || End-Start >= 16) return true;
111+
if (TheStores.size() >= 4 || End - Start >= 16)
112+
return true;
112113

113114
// If there is nothing to merge, don't do anything.
114-
if (TheStores.size() < 2) return false;
115+
if (TheStores.size() < 2)
116+
return false;
115117

116118
// If any of the stores are a memset, then it is always good to extend the
117119
// memset.
@@ -121,7 +123,8 @@ bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
121123

122124
// Assume that the code generator is capable of merging pairs of stores
123125
// together if it wants to.
124-
if (TheStores.size() == 2) return false;
126+
if (TheStores.size() == 2)
127+
return false;
125128

126129
// If we have fewer than 8 stores, it can still be worthwhile to do this.
127130
// For example, merging 4 i8 stores into an i32 store is useful almost always.
@@ -133,7 +136,7 @@ bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
133136
// the maximum GPR width is the same size as the largest legal integer
134137
// size. If so, check to see whether we will end up actually reducing the
135138
// number of stores used.
136-
unsigned Bytes = unsigned(End-Start);
139+
unsigned Bytes = unsigned(End - Start);
137140
unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8;
138141
if (MaxIntSize == 0)
139142
MaxIntSize = 1;
@@ -145,7 +148,7 @@ bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
145148
// If we will reduce the # stores (according to this heuristic), do the
146149
// transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
147150
// etc.
148-
return TheStores.size() > NumPointerStores+NumByteStores;
151+
return TheStores.size() > NumPointerStores + NumByteStores;
149152
}
150153

151154
namespace {
@@ -197,7 +200,7 @@ class MemsetRanges {
197200
/// existing ranges as appropriate.
198201
void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
199202
MaybeAlign Alignment, Instruction *Inst) {
200-
int64_t End = Start+Size;
203+
int64_t End = Start + Size;
201204

202205
range_iterator I = partition_point(
203206
Ranges, [=](const MemsetRange &O) { return O.End < Start; });
@@ -207,10 +210,10 @@ void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
207210
// to insert a new range. Handle this now.
208211
if (I == Ranges.end() || End < I->Start) {
209212
MemsetRange &R = *Ranges.insert(I, MemsetRange());
210-
R.Start = Start;
211-
R.End = End;
212-
R.StartPtr = Ptr;
213-
R.Alignment = Alignment;
213+
R.Start = Start;
214+
R.End = End;
215+
R.StartPtr = Ptr;
216+
R.Alignment = Alignment;
214217
R.TheStores.push_back(Inst);
215218
return;
216219
}
@@ -397,7 +400,8 @@ Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
397400

398401
if (auto *NextStore = dyn_cast<StoreInst>(BI)) {
399402
// If this is a store, see if we can merge it in.
400-
if (!NextStore->isSimple()) break;
403+
if (!NextStore->isSimple())
404+
break;
401405

402406
Value *StoredVal = NextStore->getValueOperand();
403407

@@ -460,7 +464,8 @@ Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
460464
// emit memset's for anything big enough to be worthwhile.
461465
Instruction *AMemSet = nullptr;
462466
for (const MemsetRange &Range : Ranges) {
463-
if (Range.TheStores.size() == 1) continue;
467+
if (Range.TheStores.size() == 1)
468+
continue;
464469

465470
// If it is profitable to lower this range to memset, do so now.
466471
if (!Range.isProfitableToUseMemset(DL))
@@ -481,12 +486,10 @@ Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
481486
if (!Range.TheStores.empty())
482487
AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
483488

484-
auto *NewDef =
485-
cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI
486-
? MSSAU->createMemoryAccessBefore(
487-
AMemSet, nullptr, MemInsertPoint)
488-
: MSSAU->createMemoryAccessAfter(
489-
AMemSet, nullptr, MemInsertPoint));
489+
auto *NewDef = cast<MemoryDef>(
490+
MemInsertPoint->getMemoryInst() == &*BI
491+
? MSSAU->createMemoryAccessBefore(AMemSet, nullptr, MemInsertPoint)
492+
: MSSAU->createMemoryAccessAfter(AMemSet, nullptr, MemInsertPoint));
490493
MSSAU->insertDef(NewDef, /*RenameUses=*/true);
491494
MemInsertPoint = NewDef;
492495

@@ -512,12 +515,13 @@ bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) {
512515

513516
// Keep track of the arguments of all instruction we plan to lift
514517
// so we can make sure to lift them as well if appropriate.
515-
DenseSet<Instruction*> Args;
518+
DenseSet<Instruction *> Args;
516519
auto AddArg = [&](Value *Arg) {
517520
auto *I = dyn_cast<Instruction>(Arg);
518521
if (I && I->getParent() == SI->getParent()) {
519522
// Cannot hoist user of P above P
520-
if (I == P) return false;
523+
if (I == P)
524+
return false;
521525
Args.insert(I);
522526
}
523527
return true;
@@ -630,8 +634,7 @@ bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) {
630634
bool MemCpyOptPass::processStoreOfLoad(StoreInst *SI, LoadInst *LI,
631635
const DataLayout &DL,
632636
BasicBlock::iterator &BBI) {
633-
if (!LI->isSimple() || !LI->hasOneUse() ||
634-
LI->getParent() != SI->getParent())
637+
if (!LI->isSimple() || !LI->hasOneUse() || LI->getParent() != SI->getParent())
635638
return false;
636639

637640
auto *T = LI->getType();
@@ -678,21 +681,20 @@ bool MemCpyOptPass::processStoreOfLoad(StoreInst *SI, LoadInst *LI,
678681
UseMemMove = true;
679682

680683
IRBuilder<> Builder(P);
681-
Value *Size = Builder.CreateTypeSize(Builder.getInt64Ty(),
682-
DL.getTypeStoreSize(T));
684+
Value *Size =
685+
Builder.CreateTypeSize(Builder.getInt64Ty(), DL.getTypeStoreSize(T));
683686
Instruction *M;
684687
if (UseMemMove)
685-
M = Builder.CreateMemMove(
686-
SI->getPointerOperand(), SI->getAlign(),
687-
LI->getPointerOperand(), LI->getAlign(), Size);
688+
M = Builder.CreateMemMove(SI->getPointerOperand(), SI->getAlign(),
689+
LI->getPointerOperand(), LI->getAlign(),
690+
Size);
688691
else
689-
M = Builder.CreateMemCpy(
690-
SI->getPointerOperand(), SI->getAlign(),
691-
LI->getPointerOperand(), LI->getAlign(), Size);
692+
M = Builder.CreateMemCpy(SI->getPointerOperand(), SI->getAlign(),
693+
LI->getPointerOperand(), LI->getAlign(), Size);
692694
M->copyMetadata(*SI, LLVMContext::MD_DIAssignID);
693695

694-
LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "
695-
<< *M << "\n");
696+
LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => " << *M
697+
<< "\n");
696698

697699
auto *LastDef =
698700
cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI));
@@ -755,7 +757,8 @@ bool MemCpyOptPass::processStoreOfLoad(StoreInst *SI, LoadInst *LI,
755757
}
756758

757759
bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
758-
if (!SI->isSimple()) return false;
760+
if (!SI->isSimple())
761+
return false;
759762

760763
// Avoid merging nontemporal stores since the resulting
761764
// memcpy/memset would not be able to preserve the nontemporal hint.
@@ -794,8 +797,8 @@ bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
794797
// 0xA0A0A0A0 and 0.0.
795798
auto *V = SI->getOperand(0);
796799
if (Value *ByteVal = isBytewiseValue(V, DL)) {
797-
if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
798-
ByteVal)) {
800+
if (Instruction *I =
801+
tryMergingIntoMemset(SI, SI->getPointerOperand(), ByteVal)) {
799802
BBI = I->getIterator(); // Don't invalidate iterator.
800803
return true;
801804
}
@@ -816,8 +819,7 @@ bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
816819
// The newly inserted memset is immediately overwritten by the original
817820
// store, so we do not need to rename uses.
818821
auto *StoreDef = cast<MemoryDef>(MSSA->getMemoryAccess(SI));
819-
auto *NewAccess = MSSAU->createMemoryAccessBefore(
820-
M, nullptr, StoreDef);
822+
auto *NewAccess = MSSAU->createMemoryAccessBefore(M, nullptr, StoreDef);
821823
MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/false);
822824

823825
eraseInstruction(SI);
@@ -836,8 +838,8 @@ bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
836838
// See if there is another memset or store neighboring this memset which
837839
// allows us to widen out the memset to do a single larger store.
838840
if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
839-
if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
840-
MSI->getValue())) {
841+
if (Instruction *I =
842+
tryMergingIntoMemset(MSI, MSI->getDest(), MSI->getValue())) {
841843
BBI = I->getIterator(); // Don't invalidate iterator.
842844
return true;
843845
}
@@ -850,7 +852,8 @@ bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
850852
bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
851853
Instruction *cpyStore, Value *cpyDest,
852854
Value *cpySrc, TypeSize cpySize,
853-
Align cpyDestAlign, BatchAAResults &BAA,
855+
Align cpyDestAlign,
856+
BatchAAResults &BAA,
854857
std::function<CallInst *()> GetC) {
855858
// The general transformation to keep in mind is
856859
//
@@ -898,15 +901,15 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
898901
if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start)
899902
return false;
900903

901-
902904
if (C->getParent() != cpyStore->getParent()) {
903905
LLVM_DEBUG(dbgs() << "Call Slot: block local restriction\n");
904906
return false;
905907
}
906908

907-
MemoryLocation DestLoc = isa<StoreInst>(cpyStore) ?
908-
MemoryLocation::get(cpyStore) :
909-
MemoryLocation::getForDest(cast<MemCpyInst>(cpyStore));
909+
MemoryLocation DestLoc =
910+
isa<StoreInst>(cpyStore)
911+
? MemoryLocation::get(cpyStore)
912+
: MemoryLocation::getForDest(cast<MemCpyInst>(cpyStore));
910913

911914
// Check that nothing touches the dest of the copy between
912915
// the call and the store/memcpy.
@@ -1175,7 +1178,8 @@ bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
11751178

11761179
// If all checks passed, then we can transform M.
11771180
LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
1178-
<< *MDep << '\n' << *M << '\n');
1181+
<< *MDep << '\n'
1182+
<< *M << '\n');
11791183

11801184
// TODO: Is this worth it if we're creating a less aligned memcpy? For
11811185
// example we could be moving from movaps -> movq on x86.
@@ -1307,8 +1311,8 @@ bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
13071311
// memcpy's defining access is the memset about to be removed.
13081312
auto *LastDef =
13091313
cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy));
1310-
auto *NewAccess = MSSAU->createMemoryAccessBefore(
1311-
NewMemSet, nullptr, LastDef);
1314+
auto *NewAccess =
1315+
MSSAU->createMemoryAccessBefore(NewMemSet, nullptr, LastDef);
13121316
MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
13131317

13141318
eraseInstruction(MemSet);
@@ -1384,7 +1388,7 @@ bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
13841388
return false;
13851389

13861390
// A known memcpy size is also required.
1387-
auto *CCopySize = dyn_cast<ConstantInt>(CopySize);
1391+
auto *CCopySize = dyn_cast<ConstantInt>(CopySize);
13881392
if (!CCopySize)
13891393
return false;
13901394
if (CCopySize->getZExtValue() > CMemSetSize->getZExtValue()) {
@@ -1655,7 +1659,8 @@ static bool isZeroSize(Value *Size) {
16551659
/// altogether.
16561660
bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) {
16571661
// We can only optimize non-volatile memcpy's.
1658-
if (M->isVolatile()) return false;
1662+
if (M->isVolatile())
1663+
return false;
16591664

16601665
// If the source and destination of the memcpy are the same, then zap it.
16611666
if (M->getSource() == M->getDest()) {
@@ -1796,11 +1801,10 @@ bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
17961801
<< "\n");
17971802

17981803
// If not, then we know we can transform this.
1799-
Type *ArgTys[3] = { M->getRawDest()->getType(),
1800-
M->getRawSource()->getType(),
1801-
M->getLength()->getType() };
1802-
M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
1803-
Intrinsic::memcpy, ArgTys));
1804+
Type *ArgTys[3] = {M->getRawDest()->getType(), M->getRawSource()->getType(),
1805+
M->getLength()->getType()};
1806+
M->setCalledFunction(
1807+
Intrinsic::getDeclaration(M->getModule(), Intrinsic::memcpy, ArgTys));
18041808

18051809
// For MemorySSA nothing really changes (except that memcpy may imply stricter
18061810
// aliasing guarantees).
@@ -1843,7 +1847,8 @@ bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) {
18431847
// Get the alignment of the byval. If the call doesn't specify the alignment,
18441848
// then it is some target specific value that we can't know.
18451849
MaybeAlign ByValAlign = CB.getParamAlign(ArgNo);
1846-
if (!ByValAlign) return false;
1850+
if (!ByValAlign)
1851+
return false;
18471852

18481853
// If it is greater than the memcpy, then we check to see if we can force the
18491854
// source of the memcpy to the alignment we need. If we fail, we bail out.
@@ -1987,7 +1992,7 @@ bool MemCpyOptPass::iterateOnFunction(Function &F) {
19871992
continue;
19881993

19891994
for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
1990-
// Avoid invalidating the iterator.
1995+
// Avoid invalidating the iterator.
19911996
Instruction *I = &*BI++;
19921997

19931998
bool RepeatInstruction = false;

0 commit comments

Comments
 (0)