Skip to content

Commit 6cf16c0

Browse files
AMDGPU/GlobalISel: AMDGPURegBankSelect
Assign register banks to virtual registers. Does not use generic RegBankSelect. After register bank selection all register operand of G_ instructions have LLT and register banks exclusively. If they had register class, reassign appropriate register bank. Assign register banks using machine uniformity analysis: Sgpr - uniform values and some lane masks Vgpr - divergent, non S1, values Vcc - divergent S1 values(lane masks) AMDGPURegBankSelect does not consider available instructions and, in some cases, G_ instructions with some register bank assignment can't be inst-selected. This is solved in RegBankLegalize. Exceptions when uniformity analysis does not work: S32/S64 lane masks: - need to end up with sgpr register class after instruction selection - In most cases Uniformity analysis declares them as uniform (forced by tablegen) resulting in sgpr S32/S64 reg bank - When Uniformity analysis declares them as divergent (some phis), use intrinsic lane mask analyzer to still assign sgpr register bank temporal divergence copy: - COPY to vgpr with implicit use of $exec inside of the cycle - this copy is declared as uniform by uniformity analysis - make sure that assigned bank is vgpr Note: uniformity analysis does not consider that registers with vgpr def are divergent (you can have uniform value in vgpr). - TODO: implicit use of $exec could be implemented as indicator that instruction is divergent
1 parent f153650 commit 6cf16c0

File tree

5 files changed

+945
-661
lines changed

5 files changed

+945
-661
lines changed

llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,16 @@
77
//===----------------------------------------------------------------------===//
88

99
#include "AMDGPUGlobalISelUtils.h"
10+
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
1011
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
12+
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
1113
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
1214
#include "llvm/CodeGenTypes/LowLevelType.h"
1315
#include "llvm/IR/Constants.h"
16+
#include "llvm/IR/IntrinsicsAMDGPU.h"
1417

1518
using namespace llvm;
19+
using namespace AMDGPU;
1620
using namespace MIPatternMatch;
1721

1822
std::pair<Register, unsigned>
@@ -68,3 +72,37 @@ AMDGPU::getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg,
6872

6973
return std::pair(Reg, 0);
7074
}
75+
76+
IntrinsicLaneMaskAnalyzer::IntrinsicLaneMaskAnalyzer(MachineFunction &MF)
77+
: MRI(MF.getRegInfo()) {
78+
initLaneMaskIntrinsics(MF);
79+
}
80+
81+
bool IntrinsicLaneMaskAnalyzer::isS32S64LaneMask(Register Reg) const {
82+
return S32S64LaneMask.contains(Reg);
83+
}
84+
85+
void IntrinsicLaneMaskAnalyzer::initLaneMaskIntrinsics(MachineFunction &MF) {
86+
for (auto &MBB : MF) {
87+
for (auto &MI : MBB) {
88+
GIntrinsic *GI = dyn_cast<GIntrinsic>(&MI);
89+
if (GI && GI->is(Intrinsic::amdgcn_if_break)) {
90+
S32S64LaneMask.insert(MI.getOperand(3).getReg());
91+
findLCSSAPhi(MI.getOperand(0).getReg());
92+
}
93+
94+
if (MI.getOpcode() == AMDGPU::SI_IF ||
95+
MI.getOpcode() == AMDGPU::SI_ELSE) {
96+
findLCSSAPhi(MI.getOperand(0).getReg());
97+
}
98+
}
99+
}
100+
}
101+
102+
void IntrinsicLaneMaskAnalyzer::findLCSSAPhi(Register Reg) {
103+
S32S64LaneMask.insert(Reg);
104+
for (const MachineInstr &LCSSAPhi : MRI.use_instructions(Reg)) {
105+
if (LCSSAPhi.isPHI())
106+
S32S64LaneMask.insert(LCSSAPhi.getOperand(0).getReg());
107+
}
108+
}

llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.h

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99
#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUGLOBALISELUTILS_H
1010
#define LLVM_LIB_TARGET_AMDGPU_AMDGPUGLOBALISELUTILS_H
1111

12+
#include "llvm/ADT/DenseSet.h"
13+
#include "llvm/CodeGen/MachineFunction.h"
1214
#include "llvm/CodeGen/Register.h"
1315
#include <utility>
1416

@@ -26,6 +28,26 @@ std::pair<Register, unsigned>
2628
getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg,
2729
GISelKnownBits *KnownBits = nullptr,
2830
bool CheckNUW = false);
31+
32+
// Currently finds S32/S64 lane masks that can be declared as divergent by
33+
// uniformity analysis (all are phis at the moment).
34+
// These are defined as i32/i64 in some IR intrinsics (not as i1).
35+
// Tablegen forces(via telling that lane mask IR intrinsics are uniform) most of
36+
// S32/S64 lane masks to be uniform, as this results in them ending up with sgpr
37+
// reg class after instruction-select don't search for all of them.
38+
class IntrinsicLaneMaskAnalyzer {
39+
SmallDenseSet<Register, 8> S32S64LaneMask;
40+
MachineRegisterInfo &MRI;
41+
42+
public:
43+
IntrinsicLaneMaskAnalyzer(MachineFunction &MF);
44+
bool isS32S64LaneMask(Register Reg) const;
45+
46+
private:
47+
void initLaneMaskIntrinsics(MachineFunction &MF);
48+
// This will not be needed when we turn off LCSSA for global-isel.
49+
void findLCSSAPhi(Register Reg);
50+
};
2951
}
3052
}
3153

llvm/lib/Target/AMDGPU/AMDGPURegBankSelect.cpp

Lines changed: 197 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,21 @@
1616
//===----------------------------------------------------------------------===//
1717

1818
#include "AMDGPU.h"
19+
#include "AMDGPUGlobalISelUtils.h"
20+
#include "GCNSubtarget.h"
21+
#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
22+
#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
23+
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
1924
#include "llvm/CodeGen/MachineFunctionPass.h"
25+
#include "llvm/CodeGen/MachineInstr.h"
26+
#include "llvm/CodeGen/MachineUniformityAnalysis.h"
27+
#include "llvm/CodeGen/TargetPassConfig.h"
2028
#include "llvm/InitializePasses.h"
2129

2230
#define DEBUG_TYPE "amdgpu-regbankselect"
2331

2432
using namespace llvm;
33+
using namespace AMDGPU;
2534

2635
namespace {
2736

@@ -40,6 +49,9 @@ class AMDGPURegBankSelect : public MachineFunctionPass {
4049
}
4150

4251
void getAnalysisUsage(AnalysisUsage &AU) const override {
52+
AU.addRequired<TargetPassConfig>();
53+
AU.addRequired<GISelCSEAnalysisWrapperPass>();
54+
AU.addRequired<MachineUniformityAnalysisPass>();
4355
MachineFunctionPass::getAnalysisUsage(AU);
4456
}
4557

@@ -55,6 +67,9 @@ class AMDGPURegBankSelect : public MachineFunctionPass {
5567

5668
INITIALIZE_PASS_BEGIN(AMDGPURegBankSelect, DEBUG_TYPE,
5769
"AMDGPU Register Bank Select", false, false)
70+
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
71+
INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
72+
INITIALIZE_PASS_DEPENDENCY(MachineUniformityAnalysisPass)
5873
INITIALIZE_PASS_END(AMDGPURegBankSelect, DEBUG_TYPE,
5974
"AMDGPU Register Bank Select", false, false)
6075

@@ -66,9 +81,191 @@ FunctionPass *llvm::createAMDGPURegBankSelectPass() {
6681
return new AMDGPURegBankSelect();
6782
}
6883

84+
class RegBankSelectHelper {
85+
MachineIRBuilder &B;
86+
MachineRegisterInfo &MRI;
87+
AMDGPU::IntrinsicLaneMaskAnalyzer &ILMA;
88+
const MachineUniformityInfo &MUI;
89+
const RegisterBank *SgprRB;
90+
const RegisterBank *VgprRB;
91+
const RegisterBank *VccRB;
92+
93+
public:
94+
RegBankSelectHelper(MachineIRBuilder &B,
95+
AMDGPU::IntrinsicLaneMaskAnalyzer &ILMA,
96+
const MachineUniformityInfo &MUI,
97+
const RegisterBankInfo &RBI)
98+
: B(B), MRI(*B.getMRI()), ILMA(ILMA), MUI(MUI),
99+
SgprRB(&RBI.getRegBank(AMDGPU::SGPRRegBankID)),
100+
VgprRB(&RBI.getRegBank(AMDGPU::VGPRRegBankID)),
101+
VccRB(&RBI.getRegBank(AMDGPU::VCCRegBankID)) {}
102+
103+
void setRegBankDef(MachineInstr &MI, MachineOperand &DefOP,
104+
const RegisterBank *RB) {
105+
Register Reg = DefOP.getReg();
106+
107+
if (!MRI.getRegClassOrNull(Reg)) {
108+
MRI.setRegBank(Reg, *RB);
109+
return;
110+
}
111+
112+
// Register that already has Register class got it during pre-inst selection
113+
// of another instruction. Maybe cross bank copy was required so we insert a
114+
// copy that can be removed later. This simplifies post regbanklegalize
115+
// combiner and avoids need to special case some patterns.
116+
LLT Ty = MRI.getType(Reg);
117+
Register NewReg = MRI.createVirtualRegister({RB, Ty});
118+
DefOP.setReg(NewReg);
119+
120+
auto &MBB = *MI.getParent();
121+
B.setInsertPt(MBB, MBB.SkipPHIsAndLabels(std::next(MI.getIterator())));
122+
B.buildCopy(Reg, NewReg);
123+
124+
// The problem was discovered for uniform S1 that was used as both
125+
// lane mask(vcc) and regular sgpr S1.
126+
// - lane-mask(vcc) use was by si_if, this use is divergent and requires
127+
// non-trivial sgpr-S1-to-vcc copy. But pre-inst-selection of si_if sets
128+
// sreg_64_xexec(S1) on def of uniform S1 making it lane-mask.
129+
// - the regular sgpr S1(uniform) instruction is now broken since
130+
// it uses sreg_64_xexec(S1) which is divergent.
131+
132+
// Replace virtual registers with register class on generic instructions
133+
// uses with virtual registers with register bank.
134+
for (auto &UseMI : make_early_inc_range(MRI.use_instructions(Reg))) {
135+
if (UseMI.isPreISelOpcode()) {
136+
for (MachineOperand &Op : UseMI.operands()) {
137+
if (Op.isReg() && Op.getReg() == Reg)
138+
Op.setReg(NewReg);
139+
}
140+
}
141+
}
142+
}
143+
144+
Register tryGetVReg(MachineOperand &Op) {
145+
if (!Op.isReg())
146+
return {};
147+
148+
Register Reg = Op.getReg();
149+
if (!Reg.isVirtual())
150+
return {};
151+
152+
return Reg;
153+
}
154+
155+
void assignBanksOnDefs(MachineInstr &MI) {
156+
for (MachineOperand &DefOP : MI.defs()) {
157+
Register DefReg = tryGetVReg(DefOP);
158+
if (!DefReg.isValid())
159+
continue;
160+
161+
// Copies can have register class on def registers.
162+
if (MI.isCopy() && MRI.getRegClassOrNull(DefReg)) {
163+
continue;
164+
}
165+
166+
if (MUI.isUniform(DefReg) || ILMA.isS32S64LaneMask(DefReg)) {
167+
setRegBankDef(MI, DefOP, SgprRB);
168+
} else {
169+
if (MRI.getType(DefReg) == LLT::scalar(1))
170+
setRegBankDef(MI, DefOP, VccRB);
171+
else
172+
setRegBankDef(MI, DefOP, VgprRB);
173+
}
174+
}
175+
}
176+
177+
void constrainRegBankUse(MachineInstr &MI, MachineOperand &UseOP,
178+
const RegisterBank *RB) {
179+
Register Reg = UseOP.getReg();
180+
181+
LLT Ty = MRI.getType(Reg);
182+
Register NewReg = MRI.createVirtualRegister({RB, Ty});
183+
UseOP.setReg(NewReg);
184+
185+
if (MI.isPHI()) {
186+
auto DefMI = MRI.getVRegDef(Reg)->getIterator();
187+
MachineBasicBlock *DefMBB = DefMI->getParent();
188+
B.setInsertPt(*DefMBB, DefMBB->SkipPHIsAndLabels(std::next(DefMI)));
189+
} else {
190+
B.setInstr(MI);
191+
}
192+
193+
B.buildCopy(NewReg, Reg);
194+
}
195+
196+
void constrainBanksOnUses(MachineInstr &MI) {
197+
for (MachineOperand &UseOP : MI.uses()) {
198+
auto UseReg = tryGetVReg(UseOP);
199+
if (!UseReg.isValid())
200+
continue;
201+
202+
// UseReg already has register bank.
203+
if (MRI.getRegBankOrNull(UseReg))
204+
continue;
205+
206+
if (MUI.isUniform(UseReg) || ILMA.isS32S64LaneMask(UseReg)) {
207+
constrainRegBankUse(MI, UseOP, SgprRB);
208+
} else {
209+
if (MRI.getType(UseReg) == LLT::scalar(1))
210+
constrainRegBankUse(MI, UseOP, VccRB);
211+
else
212+
constrainRegBankUse(MI, UseOP, VgprRB);
213+
}
214+
}
215+
}
216+
};
217+
69218
bool AMDGPURegBankSelect::runOnMachineFunction(MachineFunction &MF) {
70219
if (MF.getProperties().hasProperty(
71220
MachineFunctionProperties::Property::FailedISel))
72221
return false;
222+
223+
// Setup the instruction builder with CSE.
224+
const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
225+
GISelCSEAnalysisWrapper &Wrapper =
226+
getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
227+
GISelCSEInfo &CSEInfo = Wrapper.get(TPC.getCSEConfig());
228+
GISelObserverWrapper Observer;
229+
Observer.addObserver(&CSEInfo);
230+
231+
CSEMIRBuilder B(MF);
232+
B.setCSEInfo(&CSEInfo);
233+
B.setChangeObserver(Observer);
234+
235+
RAIIDelegateInstaller DelegateInstaller(MF, &Observer);
236+
RAIIMFObserverInstaller MFObserverInstaller(MF, Observer);
237+
238+
IntrinsicLaneMaskAnalyzer ILMA(MF);
239+
MachineUniformityInfo &MUI =
240+
getAnalysis<MachineUniformityAnalysisPass>().getUniformityInfo();
241+
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
242+
RegBankSelectHelper RBSHelper(B, ILMA, MUI, *ST.getRegBankInfo());
243+
244+
// Assign register banks to ALL def registers on G_ instructions.
245+
// Same for copies if they have no register bank or class on def.
246+
for (MachineBasicBlock &MBB : MF) {
247+
for (MachineInstr &MI : MBB) {
248+
if (MI.isPreISelOpcode() || MI.isCopy())
249+
RBSHelper.assignBanksOnDefs(MI);
250+
}
251+
}
252+
253+
// At this point all virtual registers have register class or bank
254+
// - Defs of G_ instructions have register banks.
255+
// - Defs and uses of inst-selected instructions have register class.
256+
// - Defs and uses of copies can have either register class or bank
257+
// and most notably:
258+
// - Uses of G_ instructions can have either register class or bank.
259+
260+
// Reassign use registers of G_ instructions to only have register banks.
261+
for (MachineBasicBlock &MBB : MF) {
262+
for (MachineInstr &MI : MBB) {
263+
// Copies are skipped since they can have register class on use registers.
264+
if (MI.isPreISelOpcode())
265+
RBSHelper.constrainBanksOnUses(MI);
266+
}
267+
}
268+
269+
// Defs and uses of G_ instructions have register banks exclusively.
73270
return true;
74271
}

0 commit comments

Comments
 (0)