-
Notifications
You must be signed in to change notification settings - Fork 13.5k
[Target] Use *Set::insert_range (NFC) #132879
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Target] Use *Set::insert_range (NFC) #132879
Conversation
We can use *Set::insert_range to collapse: for (auto Elem : Range) Set.insert(E); down to: Set.insert_range(Range); In some cases, we can further fold that into the set declaration.
@llvm/pr-subscribers-backend-aarch64 @llvm/pr-subscribers-backend-amdgpu Author: Kazu Hirata (kazutakahirata) ChangesWe can use *Set::insert_range to collapse: for (auto Elem : Range) down to: Set.insert_range(Range); In some cases, we can further fold that into the set declaration. Full diff: https://github.com/llvm/llvm-project/pull/132879.diff 14 Files Affected:
diff --git a/llvm/lib/Target/AArch64/AArch64BranchTargets.cpp b/llvm/lib/Target/AArch64/AArch64BranchTargets.cpp
index e2d10511cccd3..b9feb83339d8d 100644
--- a/llvm/lib/Target/AArch64/AArch64BranchTargets.cpp
+++ b/llvm/lib/Target/AArch64/AArch64BranchTargets.cpp
@@ -72,8 +72,7 @@ bool AArch64BranchTargets::runOnMachineFunction(MachineFunction &MF) {
SmallPtrSet<MachineBasicBlock *, 8> JumpTableTargets;
if (auto *JTI = MF.getJumpTableInfo())
for (auto &JTE : JTI->getJumpTables())
- for (auto *MBB : JTE.MBBs)
- JumpTableTargets.insert(MBB);
+ JumpTableTargets.insert_range(JTE.MBBs);
bool MadeChange = false;
bool HasWinCFI = MF.hasWinCFI();
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index d678dd88e05da..10123c51badb4 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -2412,8 +2412,7 @@ class AArch64Operand : public MCParsedAsmOperand {
else {
std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
assert(!Regs.empty() && "Invalid tile or element width!");
- for (auto OutReg : Regs)
- OutRegs.insert(OutReg);
+ OutRegs.insert_range(Regs);
}
}
diff --git a/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
index 4db5808c93f50..f8b40b0a1cdfc 100644
--- a/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
+++ b/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
@@ -1013,9 +1013,7 @@ int R600MachineCFGStructurizer::mergeLoop(MachineLoop *LoopRep) {
// We assume a single ExitBlk
MBBVector ExitBlks;
LoopRep->getExitBlocks(ExitBlks);
- SmallPtrSet<MachineBasicBlock *, 2> ExitBlkSet;
- for (MachineBasicBlock *MBB : ExitBlks)
- ExitBlkSet.insert(MBB);
+ SmallPtrSet<MachineBasicBlock *, 2> ExitBlkSet(llvm::from_range, ExitBlks);
assert(ExitBlkSet.size() == 1);
MachineBasicBlock *ExitBlk = *ExitBlks.begin();
assert(ExitBlk && "Loop has several exit block");
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index 10a8a5272812b..db8e467eedf64 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -120,8 +120,7 @@ bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// Collect all globals that had their storage promoted to a constant pool.
// Functions are emitted before variables, so this accumulates promoted
// globals from all functions in PromotedGlobals.
- for (const auto *GV : AFI->getGlobalsPromotedToConstantPool())
- PromotedGlobals.insert(GV);
+ PromotedGlobals.insert_range(AFI->getGlobalsPromotedToConstantPool());
// Calculate this function's optimization goal.
unsigned OptimizationGoal;
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 6e885ab574cea..475f53fc03399 100644
--- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -3213,8 +3213,7 @@ void ARMFrameLowering::adjustForSegmentedStacks(
MachineBasicBlock *AddedBlocks[] = {PrevStackMBB, McrMBB, GetMBB, AllocMBB,
PostStackMBB};
- for (MachineBasicBlock *B : AddedBlocks)
- BeforePrologueRegion.insert(B);
+ BeforePrologueRegion.insert_range(AddedBlocks);
for (const auto &LI : PrologueMBB.liveins()) {
for (MachineBasicBlock *PredBB : BeforePrologueRegion)
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index f99e084d9347c..d2f9ec982ae01 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -3139,9 +3139,8 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(
// Sometimes, no register matches all of these conditions, so we can't do a
// tail-call.
if (!isa<GlobalAddressSDNode>(Callee.getNode()) || isIndirect) {
- SmallSet<MCPhysReg, 5> AddressRegisters;
- for (Register R : {ARM::R0, ARM::R1, ARM::R2, ARM::R3})
- AddressRegisters.insert(R);
+ SmallSet<MCPhysReg, 5> AddressRegisters = {ARM::R0, ARM::R1, ARM::R2,
+ ARM::R3};
if (!(Subtarget->isThumb1Only() ||
MF.getInfo<ARMFunctionInfo>()->shouldSignReturnAddress(true)))
AddressRegisters.insert(ARM::R12);
diff --git a/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp b/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
index 1d68185d916f2..0c519d99785d4 100644
--- a/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
+++ b/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
@@ -96,8 +96,7 @@ static void TrackDefUses(MachineInstr *MI, RegisterSet &Defs, RegisterSet &Uses,
auto InsertUsesDefs = [&](RegList &Regs, RegisterSet &UsesDefs) {
for (unsigned Reg : Regs)
- for (MCPhysReg Subreg : TRI->subregs_inclusive(Reg))
- UsesDefs.insert(Subreg);
+ UsesDefs.insert_range(TRI->subregs_inclusive(Reg));
};
InsertUsesDefs(LocalDefs, Defs);
diff --git a/llvm/lib/Target/Hexagon/BitTracker.cpp b/llvm/lib/Target/Hexagon/BitTracker.cpp
index 6cba3898edd36..80eedabb0d038 100644
--- a/llvm/lib/Target/Hexagon/BitTracker.cpp
+++ b/llvm/lib/Target/Hexagon/BitTracker.cpp
@@ -965,8 +965,7 @@ void BT::visitBranchesFrom(const MachineInstr &BI) {
Targets.insert(&*Next);
}
} else {
- for (const MachineBasicBlock *SB : B.successors())
- Targets.insert(SB);
+ Targets.insert_range(B.successors());
}
for (const MachineBasicBlock *TB : Targets)
diff --git a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
index 92374199b2c91..04aac43a2ed02 100644
--- a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -779,8 +779,7 @@ void MachineConstPropagator::visitBranchesFrom(const MachineInstr &BrI) {
Targets.clear();
LLVM_DEBUG(dbgs() << " failed to evaluate a branch...adding all CFG "
"successors\n");
- for (const MachineBasicBlock *SB : B.successors())
- Targets.insert(SB);
+ Targets.insert_range(B.successors());
}
for (const MachineBasicBlock *TB : Targets) {
diff --git a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index 02e2d670ebee2..3f5e068e65ebe 100644
--- a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -401,8 +401,7 @@ void HexagonExpandCondsets::updateDeadsInRange(Register Reg, LaneBitmask LM,
continue;
if (B == Entry)
return false;
- for (auto *P : B->predecessors())
- Work.insert(P);
+ Work.insert_range(B->predecessors());
}
return true;
};
diff --git a/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp b/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp
index 1a60d0e13057e..ea58e0be748e9 100644
--- a/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp
@@ -801,9 +801,7 @@ bool HexagonLoadStoreWidening::replaceInsts(InstrGroup &OG, InstrGroup &NG) {
// the insertion point.
// Create a set of all instructions in OG (for quick lookup).
- InstrSet OldMemInsts;
- for (auto *I : OG)
- OldMemInsts.insert(I);
+ InstrSet OldMemInsts(llvm::from_range, OG);
if (Mode == WideningMode::Load) {
// Find the first load instruction in the block that is present in OG.
diff --git a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
index 2c8889572a93f..666173b87f9da 100644
--- a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
@@ -2303,8 +2303,7 @@ bool HexagonLoopIdiomRecognize::processCopyingStore(Loop *CurLoop,
bool HexagonLoopIdiomRecognize::coverLoop(Loop *L,
SmallVectorImpl<Instruction*> &Insts) const {
SmallSet<BasicBlock*,8> LoopBlocks;
- for (auto *B : L->blocks())
- LoopBlocks.insert(B);
+ LoopBlocks.insert_range(L->blocks());
SetVector<Instruction*> Worklist(Insts.begin(), Insts.end());
diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
index 3b8715bce6d5f..acc8c014cb26b 100644
--- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
@@ -44,10 +44,7 @@ static cl::list<SPIRV::Capability::Capability>
// Use sets instead of cl::list to check "if contains" condition
struct AvoidCapabilitiesSet {
SmallSet<SPIRV::Capability::Capability, 4> S;
- AvoidCapabilitiesSet() {
- for (auto Cap : AvoidCapabilities)
- S.insert(Cap);
- }
+ AvoidCapabilitiesSet() { S.insert_range(AvoidCapabilities); }
};
char llvm::SPIRVModuleAnalysis::ID = 0;
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 459cfdc984f16..4cc456ece77e0 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -1041,8 +1041,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches(
IndirectTerminatedMBBs.insert(&MBB);
// Add all the successors to our target candidates.
- for (MachineBasicBlock *Succ : MBB.successors())
- IndirectTargetMBBs.insert(Succ);
+ IndirectTargetMBBs.insert_range(MBB.successors());
}
// Keep track of the cmov instructions we insert so we can return them.
|
@llvm/pr-subscribers-backend-arm Author: Kazu Hirata (kazutakahirata) ChangesWe can use *Set::insert_range to collapse: for (auto Elem : Range) down to: Set.insert_range(Range); In some cases, we can further fold that into the set declaration. Full diff: https://github.com/llvm/llvm-project/pull/132879.diff 14 Files Affected:
diff --git a/llvm/lib/Target/AArch64/AArch64BranchTargets.cpp b/llvm/lib/Target/AArch64/AArch64BranchTargets.cpp
index e2d10511cccd3..b9feb83339d8d 100644
--- a/llvm/lib/Target/AArch64/AArch64BranchTargets.cpp
+++ b/llvm/lib/Target/AArch64/AArch64BranchTargets.cpp
@@ -72,8 +72,7 @@ bool AArch64BranchTargets::runOnMachineFunction(MachineFunction &MF) {
SmallPtrSet<MachineBasicBlock *, 8> JumpTableTargets;
if (auto *JTI = MF.getJumpTableInfo())
for (auto &JTE : JTI->getJumpTables())
- for (auto *MBB : JTE.MBBs)
- JumpTableTargets.insert(MBB);
+ JumpTableTargets.insert_range(JTE.MBBs);
bool MadeChange = false;
bool HasWinCFI = MF.hasWinCFI();
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index d678dd88e05da..10123c51badb4 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -2412,8 +2412,7 @@ class AArch64Operand : public MCParsedAsmOperand {
else {
std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
assert(!Regs.empty() && "Invalid tile or element width!");
- for (auto OutReg : Regs)
- OutRegs.insert(OutReg);
+ OutRegs.insert_range(Regs);
}
}
diff --git a/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
index 4db5808c93f50..f8b40b0a1cdfc 100644
--- a/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
+++ b/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
@@ -1013,9 +1013,7 @@ int R600MachineCFGStructurizer::mergeLoop(MachineLoop *LoopRep) {
// We assume a single ExitBlk
MBBVector ExitBlks;
LoopRep->getExitBlocks(ExitBlks);
- SmallPtrSet<MachineBasicBlock *, 2> ExitBlkSet;
- for (MachineBasicBlock *MBB : ExitBlks)
- ExitBlkSet.insert(MBB);
+ SmallPtrSet<MachineBasicBlock *, 2> ExitBlkSet(llvm::from_range, ExitBlks);
assert(ExitBlkSet.size() == 1);
MachineBasicBlock *ExitBlk = *ExitBlks.begin();
assert(ExitBlk && "Loop has several exit block");
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index 10a8a5272812b..db8e467eedf64 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -120,8 +120,7 @@ bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// Collect all globals that had their storage promoted to a constant pool.
// Functions are emitted before variables, so this accumulates promoted
// globals from all functions in PromotedGlobals.
- for (const auto *GV : AFI->getGlobalsPromotedToConstantPool())
- PromotedGlobals.insert(GV);
+ PromotedGlobals.insert_range(AFI->getGlobalsPromotedToConstantPool());
// Calculate this function's optimization goal.
unsigned OptimizationGoal;
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 6e885ab574cea..475f53fc03399 100644
--- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -3213,8 +3213,7 @@ void ARMFrameLowering::adjustForSegmentedStacks(
MachineBasicBlock *AddedBlocks[] = {PrevStackMBB, McrMBB, GetMBB, AllocMBB,
PostStackMBB};
- for (MachineBasicBlock *B : AddedBlocks)
- BeforePrologueRegion.insert(B);
+ BeforePrologueRegion.insert_range(AddedBlocks);
for (const auto &LI : PrologueMBB.liveins()) {
for (MachineBasicBlock *PredBB : BeforePrologueRegion)
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index f99e084d9347c..d2f9ec982ae01 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -3139,9 +3139,8 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(
// Sometimes, no register matches all of these conditions, so we can't do a
// tail-call.
if (!isa<GlobalAddressSDNode>(Callee.getNode()) || isIndirect) {
- SmallSet<MCPhysReg, 5> AddressRegisters;
- for (Register R : {ARM::R0, ARM::R1, ARM::R2, ARM::R3})
- AddressRegisters.insert(R);
+ SmallSet<MCPhysReg, 5> AddressRegisters = {ARM::R0, ARM::R1, ARM::R2,
+ ARM::R3};
if (!(Subtarget->isThumb1Only() ||
MF.getInfo<ARMFunctionInfo>()->shouldSignReturnAddress(true)))
AddressRegisters.insert(ARM::R12);
diff --git a/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp b/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
index 1d68185d916f2..0c519d99785d4 100644
--- a/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
+++ b/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
@@ -96,8 +96,7 @@ static void TrackDefUses(MachineInstr *MI, RegisterSet &Defs, RegisterSet &Uses,
auto InsertUsesDefs = [&](RegList &Regs, RegisterSet &UsesDefs) {
for (unsigned Reg : Regs)
- for (MCPhysReg Subreg : TRI->subregs_inclusive(Reg))
- UsesDefs.insert(Subreg);
+ UsesDefs.insert_range(TRI->subregs_inclusive(Reg));
};
InsertUsesDefs(LocalDefs, Defs);
diff --git a/llvm/lib/Target/Hexagon/BitTracker.cpp b/llvm/lib/Target/Hexagon/BitTracker.cpp
index 6cba3898edd36..80eedabb0d038 100644
--- a/llvm/lib/Target/Hexagon/BitTracker.cpp
+++ b/llvm/lib/Target/Hexagon/BitTracker.cpp
@@ -965,8 +965,7 @@ void BT::visitBranchesFrom(const MachineInstr &BI) {
Targets.insert(&*Next);
}
} else {
- for (const MachineBasicBlock *SB : B.successors())
- Targets.insert(SB);
+ Targets.insert_range(B.successors());
}
for (const MachineBasicBlock *TB : Targets)
diff --git a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
index 92374199b2c91..04aac43a2ed02 100644
--- a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -779,8 +779,7 @@ void MachineConstPropagator::visitBranchesFrom(const MachineInstr &BrI) {
Targets.clear();
LLVM_DEBUG(dbgs() << " failed to evaluate a branch...adding all CFG "
"successors\n");
- for (const MachineBasicBlock *SB : B.successors())
- Targets.insert(SB);
+ Targets.insert_range(B.successors());
}
for (const MachineBasicBlock *TB : Targets) {
diff --git a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index 02e2d670ebee2..3f5e068e65ebe 100644
--- a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -401,8 +401,7 @@ void HexagonExpandCondsets::updateDeadsInRange(Register Reg, LaneBitmask LM,
continue;
if (B == Entry)
return false;
- for (auto *P : B->predecessors())
- Work.insert(P);
+ Work.insert_range(B->predecessors());
}
return true;
};
diff --git a/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp b/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp
index 1a60d0e13057e..ea58e0be748e9 100644
--- a/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp
@@ -801,9 +801,7 @@ bool HexagonLoadStoreWidening::replaceInsts(InstrGroup &OG, InstrGroup &NG) {
// the insertion point.
// Create a set of all instructions in OG (for quick lookup).
- InstrSet OldMemInsts;
- for (auto *I : OG)
- OldMemInsts.insert(I);
+ InstrSet OldMemInsts(llvm::from_range, OG);
if (Mode == WideningMode::Load) {
// Find the first load instruction in the block that is present in OG.
diff --git a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
index 2c8889572a93f..666173b87f9da 100644
--- a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
@@ -2303,8 +2303,7 @@ bool HexagonLoopIdiomRecognize::processCopyingStore(Loop *CurLoop,
bool HexagonLoopIdiomRecognize::coverLoop(Loop *L,
SmallVectorImpl<Instruction*> &Insts) const {
SmallSet<BasicBlock*,8> LoopBlocks;
- for (auto *B : L->blocks())
- LoopBlocks.insert(B);
+ LoopBlocks.insert_range(L->blocks());
SetVector<Instruction*> Worklist(Insts.begin(), Insts.end());
diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
index 3b8715bce6d5f..acc8c014cb26b 100644
--- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
@@ -44,10 +44,7 @@ static cl::list<SPIRV::Capability::Capability>
// Use sets instead of cl::list to check "if contains" condition
struct AvoidCapabilitiesSet {
SmallSet<SPIRV::Capability::Capability, 4> S;
- AvoidCapabilitiesSet() {
- for (auto Cap : AvoidCapabilities)
- S.insert(Cap);
- }
+ AvoidCapabilitiesSet() { S.insert_range(AvoidCapabilities); }
};
char llvm::SPIRVModuleAnalysis::ID = 0;
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 459cfdc984f16..4cc456ece77e0 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -1041,8 +1041,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches(
IndirectTerminatedMBBs.insert(&MBB);
// Add all the successors to our target candidates.
- for (MachineBasicBlock *Succ : MBB.successors())
- IndirectTargetMBBs.insert(Succ);
+ IndirectTargetMBBs.insert_range(MBB.successors());
}
// Keep track of the cmov instructions we insert so we can return them.
|
We can use *Set::insert_range to collapse:
for (auto Elem : Range)
Set.insert(E);
down to:
Set.insert_range(Range);
In some cases, we can further fold that into the set declaration.