Skip to content

Commit c5dd703

Browse files
AMDGPU/GlobalISel: AMDGPURegBankSelect
Assign register banks to virtual registers. Does not use generic RegBankSelect. After register bank selection all register operand of G_ instructions have LLT and register banks exclusively. If they had register class, reassign appropriate register bank. Assign register banks using machine uniformity analysis: Sgpr - uniform values and some lane masks Vgpr - divergent, non S1, values Vcc - divergent S1 values(lane masks) AMDGPURegBankSelect does not consider available instructions and, in some cases, G_ instructions with some register bank assignment can't be inst-selected. This is solved in RegBankLegalize. Exceptions when uniformity analysis does not work: S32/S64 lane masks: - need to end up with sgpr register class after instruction selection - In most cases Uniformity analysis declares them as uniform (forced by tablegen) resulting in sgpr S32/S64 reg bank - When Uniformity analysis declares them as divergent (some phis), use intrinsic lane mask analyzer to still assign sgpr register bank temporal divergence copy: - COPY to vgpr with implicit use of $exec inside of the cycle - this copy is declared as uniform by uniformity analysis - make sure that assigned bank is vgpr Note: uniformity analysis does not consider that registers with vgpr def are divergent (you can have uniform value in vgpr). - TODO: implicit use of $exec could be implemented as indicator that instruction is divergent
1 parent 0bec6e8 commit c5dd703

File tree

5 files changed

+961
-661
lines changed

5 files changed

+961
-661
lines changed

llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,14 @@
99
#include "AMDGPUGlobalISelUtils.h"
1010
#include "GCNSubtarget.h"
1111
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
12+
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
1213
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
1314
#include "llvm/CodeGenTypes/LowLevelType.h"
1415
#include "llvm/IR/Constants.h"
16+
#include "llvm/IR/IntrinsicsAMDGPU.h"
1517

1618
using namespace llvm;
19+
using namespace AMDGPU;
1720
using namespace MIPatternMatch;
1821

1922
std::pair<Register, unsigned>
@@ -69,3 +72,37 @@ AMDGPU::getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg,
6972

7073
return std::pair(Reg, 0);
7174
}
75+
76+
IntrinsicLaneMaskAnalyzer::IntrinsicLaneMaskAnalyzer(MachineFunction &MF)
77+
: MRI(MF.getRegInfo()) {
78+
initLaneMaskIntrinsics(MF);
79+
}
80+
81+
bool IntrinsicLaneMaskAnalyzer::isS32S64LaneMask(Register Reg) const {
82+
return S32S64LaneMask.contains(Reg);
83+
}
84+
85+
void IntrinsicLaneMaskAnalyzer::initLaneMaskIntrinsics(MachineFunction &MF) {
86+
for (auto &MBB : MF) {
87+
for (auto &MI : MBB) {
88+
GIntrinsic *GI = dyn_cast<GIntrinsic>(&MI);
89+
if (GI && GI->is(Intrinsic::amdgcn_if_break)) {
90+
S32S64LaneMask.insert(MI.getOperand(3).getReg());
91+
findLCSSAPhi(MI.getOperand(0).getReg());
92+
}
93+
94+
if (MI.getOpcode() == AMDGPU::SI_IF ||
95+
MI.getOpcode() == AMDGPU::SI_ELSE) {
96+
findLCSSAPhi(MI.getOperand(0).getReg());
97+
}
98+
}
99+
}
100+
}
101+
102+
void IntrinsicLaneMaskAnalyzer::findLCSSAPhi(Register Reg) {
103+
S32S64LaneMask.insert(Reg);
104+
for (const MachineInstr &LCSSAPhi : MRI.use_instructions(Reg)) {
105+
if (LCSSAPhi.isPHI())
106+
S32S64LaneMask.insert(LCSSAPhi.getOperand(0).getReg());
107+
}
108+
}

llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.h

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99
#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUGLOBALISELUTILS_H
1010
#define LLVM_LIB_TARGET_AMDGPU_AMDGPUGLOBALISELUTILS_H
1111

12+
#include "llvm/ADT/DenseSet.h"
13+
#include "llvm/CodeGen/MachineFunction.h"
1214
#include "llvm/CodeGen/Register.h"
1315
#include <utility>
1416

@@ -26,6 +28,26 @@ std::pair<Register, unsigned>
2628
getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg,
2729
GISelKnownBits *KnownBits = nullptr,
2830
bool CheckNUW = false);
31+
32+
// Currently finds S32/S64 lane masks that can be declared as divergent by
33+
// uniformity analysis (all are phis at the moment).
34+
// These are defined as i32/i64 in some IR intrinsics (not as i1).
35+
// Tablegen forces(via telling that lane mask IR intrinsics are uniform) most of
36+
// S32/S64 lane masks to be uniform, as this results in them ending up with sgpr
37+
// reg class after instruction-select don't search for all of them.
38+
class IntrinsicLaneMaskAnalyzer {
39+
SmallDenseSet<Register, 8> S32S64LaneMask;
40+
MachineRegisterInfo &MRI;
41+
42+
public:
43+
IntrinsicLaneMaskAnalyzer(MachineFunction &MF);
44+
bool isS32S64LaneMask(Register Reg) const;
45+
46+
private:
47+
void initLaneMaskIntrinsics(MachineFunction &MF);
48+
// This will not be needed when we turn off LCSSA for global-isel.
49+
void findLCSSAPhi(Register Reg);
50+
};
2951
}
3052
}
3153

llvm/lib/Target/AMDGPU/AMDGPURegBankSelect.cpp

Lines changed: 214 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,21 @@
1616
//===----------------------------------------------------------------------===//
1717

1818
#include "AMDGPU.h"
19+
#include "AMDGPUGlobalISelUtils.h"
20+
#include "GCNSubtarget.h"
21+
#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
22+
#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
23+
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
1924
#include "llvm/CodeGen/MachineFunctionPass.h"
25+
#include "llvm/CodeGen/MachineInstr.h"
26+
#include "llvm/CodeGen/MachineUniformityAnalysis.h"
27+
#include "llvm/CodeGen/TargetPassConfig.h"
2028
#include "llvm/InitializePasses.h"
2129

2230
#define DEBUG_TYPE "amdgpu-regbankselect"
2331

2432
using namespace llvm;
33+
using namespace AMDGPU;
2534

2635
namespace {
2736

@@ -40,6 +49,9 @@ class AMDGPURegBankSelect : public MachineFunctionPass {
4049
}
4150

4251
void getAnalysisUsage(AnalysisUsage &AU) const override {
52+
AU.addRequired<TargetPassConfig>();
53+
AU.addRequired<GISelCSEAnalysisWrapperPass>();
54+
AU.addRequired<MachineUniformityAnalysisPass>();
4355
MachineFunctionPass::getAnalysisUsage(AU);
4456
}
4557

@@ -55,6 +67,9 @@ class AMDGPURegBankSelect : public MachineFunctionPass {
5567

5668
INITIALIZE_PASS_BEGIN(AMDGPURegBankSelect, DEBUG_TYPE,
5769
"AMDGPU Register Bank Select", false, false)
70+
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
71+
INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
72+
INITIALIZE_PASS_DEPENDENCY(MachineUniformityAnalysisPass)
5873
INITIALIZE_PASS_END(AMDGPURegBankSelect, DEBUG_TYPE,
5974
"AMDGPU Register Bank Select", false, false)
6075

@@ -66,9 +81,208 @@ FunctionPass *llvm::createAMDGPURegBankSelectPass() {
6681
return new AMDGPURegBankSelect();
6782
}
6883

84+
class RegBankSelectHelper {
85+
MachineIRBuilder &B;
86+
MachineRegisterInfo &MRI;
87+
AMDGPU::IntrinsicLaneMaskAnalyzer &ILMA;
88+
const MachineUniformityInfo &MUI;
89+
const SIRegisterInfo &TRI;
90+
const RegisterBank *SgprRB;
91+
const RegisterBank *VgprRB;
92+
const RegisterBank *VccRB;
93+
94+
public:
95+
RegBankSelectHelper(MachineIRBuilder &B,
96+
AMDGPU::IntrinsicLaneMaskAnalyzer &ILMA,
97+
const MachineUniformityInfo &MUI,
98+
const SIRegisterInfo &TRI, const RegisterBankInfo &RBI)
99+
: B(B), MRI(*B.getMRI()), ILMA(ILMA), MUI(MUI), TRI(TRI),
100+
SgprRB(&RBI.getRegBank(AMDGPU::SGPRRegBankID)),
101+
VgprRB(&RBI.getRegBank(AMDGPU::VGPRRegBankID)),
102+
VccRB(&RBI.getRegBank(AMDGPU::VCCRegBankID)) {}
103+
104+
// Temporal divergence copy: COPY to vgpr with implicit use of $exec inside of
105+
// the cycle
106+
// Note: uniformity analysis does not consider that registers with vgpr def
107+
// are divergent (you can have uniform value in vgpr).
108+
// - TODO: implicit use of $exec could be implemented as indicator that
109+
// instruction is divergent
110+
bool isTemporalDivergenceCopy(Register Reg) {
111+
MachineInstr *MI = MRI.getVRegDef(Reg);
112+
if (!MI->isCopy() || MI->getNumImplicitOperands() != 1)
113+
return false;
114+
115+
return MI->implicit_operands().begin()->getReg() == TRI.getExec();
116+
}
117+
118+
void setRegBankDef(MachineInstr &MI, MachineOperand &DefOP,
119+
const RegisterBank *RB) {
120+
Register Reg = DefOP.getReg();
121+
122+
if (!MRI.getRegClassOrNull(Reg)) {
123+
MRI.setRegBank(Reg, *RB);
124+
return;
125+
}
126+
127+
// Register that already has Register class got it during pre-inst selection
128+
// of another instruction. Maybe cross bank copy was required so we insert a
129+
// copy that can be removed later. This simplifies post regbanklegalize
130+
// combiner and avoids need to special case some patterns.
131+
LLT Ty = MRI.getType(Reg);
132+
Register NewReg = MRI.createVirtualRegister({RB, Ty});
133+
DefOP.setReg(NewReg);
134+
135+
auto &MBB = *MI.getParent();
136+
B.setInsertPt(MBB, MBB.SkipPHIsAndLabels(std::next(MI.getIterator())));
137+
B.buildCopy(Reg, NewReg);
138+
139+
// The problem was discovered for uniform S1 that was used as both
140+
// lane mask(vcc) and regular sgpr S1.
141+
// - lane-mask(vcc) use was by si_if, this use is divergent and requires
142+
// non-trivial sgpr-S1-to-vcc copy. But pre-inst-selection of si_if sets
143+
// sreg_64_xexec(S1) on def of uniform S1 making it lane-mask.
144+
// - the regular sgpr S1(uniform) instruction is now broken since
145+
// it uses sreg_64_xexec(S1) which is divergent.
146+
147+
// Replace virtual registers with register class on generic instructions
148+
// uses with virtual registers with register bank.
149+
for (auto &UseMI : make_early_inc_range(MRI.use_instructions(Reg))) {
150+
if (UseMI.isPreISelOpcode()) {
151+
for (MachineOperand &Op : UseMI.operands()) {
152+
if (Op.isReg() && Op.getReg() == Reg)
153+
Op.setReg(NewReg);
154+
}
155+
}
156+
}
157+
}
158+
159+
Register tryGetVReg(MachineOperand &Op) {
160+
if (!Op.isReg())
161+
return {};
162+
163+
Register Reg = Op.getReg();
164+
if (!Reg.isVirtual())
165+
return {};
166+
167+
return Reg;
168+
}
169+
170+
void assignBanksOnDefs(MachineInstr &MI) {
171+
for (MachineOperand &DefOP : MI.defs()) {
172+
Register DefReg = tryGetVReg(DefOP);
173+
if (!DefReg.isValid())
174+
continue;
175+
176+
// Copies can have register class on def registers.
177+
if (MI.isCopy() && MRI.getRegClassOrNull(DefReg)) {
178+
continue;
179+
}
180+
181+
if (MUI.isUniform(DefReg) || ILMA.isS32S64LaneMask(DefReg)) {
182+
setRegBankDef(MI, DefOP, SgprRB);
183+
} else {
184+
if (MRI.getType(DefReg) == LLT::scalar(1))
185+
setRegBankDef(MI, DefOP, VccRB);
186+
else
187+
setRegBankDef(MI, DefOP, VgprRB);
188+
}
189+
}
190+
}
191+
192+
void constrainRegBankUse(MachineInstr &MI, MachineOperand &UseOP,
193+
const RegisterBank *RB) {
194+
Register Reg = UseOP.getReg();
195+
196+
LLT Ty = MRI.getType(Reg);
197+
Register NewReg = MRI.createVirtualRegister({RB, Ty});
198+
UseOP.setReg(NewReg);
199+
200+
if (MI.isPHI()) {
201+
auto DefMI = MRI.getVRegDef(Reg)->getIterator();
202+
MachineBasicBlock *DefMBB = DefMI->getParent();
203+
B.setInsertPt(*DefMBB, DefMBB->SkipPHIsAndLabels(std::next(DefMI)));
204+
} else {
205+
B.setInstr(MI);
206+
}
207+
208+
B.buildCopy(NewReg, Reg);
209+
}
210+
211+
void constrainBanksOnUses(MachineInstr &MI) {
212+
for (MachineOperand &UseOP : MI.uses()) {
213+
auto UseReg = tryGetVReg(UseOP);
214+
if (!UseReg.isValid())
215+
continue;
216+
217+
// UseReg already has register bank.
218+
if (MRI.getRegBankOrNull(UseReg))
219+
continue;
220+
221+
if (!isTemporalDivergenceCopy(UseReg) &&
222+
(MUI.isUniform(UseReg) || ILMA.isS32S64LaneMask(UseReg))) {
223+
constrainRegBankUse(MI, UseOP, SgprRB);
224+
} else {
225+
if (MRI.getType(UseReg) == LLT::scalar(1))
226+
constrainRegBankUse(MI, UseOP, VccRB);
227+
else
228+
constrainRegBankUse(MI, UseOP, VgprRB);
229+
}
230+
}
231+
}
232+
};
233+
69234
bool AMDGPURegBankSelect::runOnMachineFunction(MachineFunction &MF) {
70235
if (MF.getProperties().hasProperty(
71236
MachineFunctionProperties::Property::FailedISel))
72237
return false;
238+
239+
// Setup the instruction builder with CSE.
240+
const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
241+
GISelCSEAnalysisWrapper &Wrapper =
242+
getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
243+
GISelCSEInfo &CSEInfo = Wrapper.get(TPC.getCSEConfig());
244+
GISelObserverWrapper Observer;
245+
Observer.addObserver(&CSEInfo);
246+
247+
CSEMIRBuilder B(MF);
248+
B.setCSEInfo(&CSEInfo);
249+
B.setChangeObserver(Observer);
250+
251+
RAIIDelegateInstaller DelegateInstaller(MF, &Observer);
252+
RAIIMFObserverInstaller MFObserverInstaller(MF, Observer);
253+
254+
IntrinsicLaneMaskAnalyzer ILMA(MF);
255+
MachineUniformityInfo &MUI =
256+
getAnalysis<MachineUniformityAnalysisPass>().getUniformityInfo();
257+
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
258+
RegBankSelectHelper RBSHelper(B, ILMA, MUI, *ST.getRegisterInfo(),
259+
*ST.getRegBankInfo());
260+
261+
// Assign register banks to ALL def registers on G_ instructions.
262+
// Same for copies if they have no register bank or class on def.
263+
for (MachineBasicBlock &MBB : MF) {
264+
for (MachineInstr &MI : MBB) {
265+
if (MI.isPreISelOpcode() || MI.isCopy())
266+
RBSHelper.assignBanksOnDefs(MI);
267+
}
268+
}
269+
270+
// At this point all virtual registers have register class or bank
271+
// - Defs of G_ instructions have register banks.
272+
// - Defs and uses of inst-selected instructions have register class.
273+
// - Defs and uses of copies can have either register class or bank
274+
// and most notably:
275+
// - Uses of G_ instructions can have either register class or bank.
276+
277+
// Reassign use registers of G_ instructions to only have register banks.
278+
for (MachineBasicBlock &MBB : MF) {
279+
for (MachineInstr &MI : MBB) {
280+
// Copies are skipped since they can have register class on use registers.
281+
if (MI.isPreISelOpcode())
282+
RBSHelper.constrainBanksOnUses(MI);
283+
}
284+
}
285+
286+
// Defs and uses of G_ instructions have register banks exclusively.
73287
return true;
74288
}

0 commit comments

Comments
 (0)