Skip to content

[AMDGPU] Fold uniform readfirstlane + cndmask #70188

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
88 changes: 88 additions & 0 deletions llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ class SIFoldOperands : public MachineFunctionPass {
bool foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
bool tryFoldFoldableCopy(MachineInstr &MI,
MachineOperand *&CurrentKnownM0Val) const;
bool tryFoldUniformReadFirstLaneCndMask(MachineInstr &MI) const;

const MachineOperand *isClamp(const MachineInstr &MI) const;
bool tryFoldClamp(MachineInstr &MI);
Expand Down Expand Up @@ -1400,6 +1401,88 @@ bool SIFoldOperands::tryFoldFoldableCopy(
return Changed;
}

// Try to fold the following pattern:
// s_cselect s[2:3], K, 0 ; K has LSB set. Usually it's +-1.
// v_cndmask v0, 0, +-1, s[2:3]
// v_readfirstlane s0, v0
//
// into (for example)
//
// s_cselect s[2:3], K, 0
// s_bfe_u64 s0, s[2:3], 0x10000
bool SIFoldOperands::tryFoldUniformReadFirstLaneCndMask(
MachineInstr &MI) const {
if (MI.getOpcode() != AMDGPU::V_READFIRSTLANE_B32)
return false;

MachineInstr *RFLSrc = MRI->getVRegDef(MI.getOperand(1).getReg());
// We can also have the following pattern:
//
// %2:vreg_64 = REG_SEQUENCE %X:vgpr_32, sub0, %1:sreg_32, sub1
// %3:sgpr_32 = V_READFIRSTLANE_B32 %2.sub0:vreg_64
//
// In this case we dig into %X or %Y depending on which sub register
// the V_READFIRSTLANE accesses.
if (RFLSrc->isRegSequence()) {
unsigned RFLSubReg = MI.getOperand(1).getSubReg();
if (RFLSrc->getNumOperands() != 5)
return false;

if (RFLSrc->getOperand(2).getImm() == RFLSubReg)
RFLSrc = MRI->getVRegDef(RFLSrc->getOperand(1).getReg());
else if (RFLSrc->getOperand(4).getImm() == RFLSubReg)
RFLSrc = MRI->getVRegDef(RFLSrc->getOperand(3).getReg());
else
return false;
}

// Need e64 to have a SGPR regmask.
if (!RFLSrc || RFLSrc->getOpcode() != AMDGPU::V_CNDMASK_B32_e64)
return false;

MachineOperand *Src0 = TII->getNamedOperand(*RFLSrc, AMDGPU::OpName::src0);
MachineOperand *Src1 = TII->getNamedOperand(*RFLSrc, AMDGPU::OpName::src1);
Register Src2 = TII->getNamedOperand(*RFLSrc, AMDGPU::OpName::src2)->getReg();

if (!Src0->isImm() || Src0->getImm() != 0 || !Src1->isImm())
return false;

// This pattern usually comes from a ext. sext uses -1.
bool IsSigned = false;
if (Src1->getImm() == -1)
IsSigned = true;
else if (Src1->getImm() != 1)
return false;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

IsSigned = Src1->getImm() == -1

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This also handle rejecting Src1 != -1/1

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

but you can check that after, if != 1 && !IsSigned


MachineInstr *CSel = MRI->getVRegDef(Src2);
if (!CSel || (CSel->getOpcode() != AMDGPU::S_CSELECT_B32 &&
CSel->getOpcode() != AMDGPU::S_CSELECT_B64))
return false;

MachineOperand *CSelSrc0 = TII->getNamedOperand(*CSel, AMDGPU::OpName::src0);
MachineOperand *CSelSrc1 = TII->getNamedOperand(*CSel, AMDGPU::OpName::src1);
// Note: we could also allow any non-zero value for CSelSrc0, and adapt the
// BFE's mask depending on where the first set bit is.
if (!CSelSrc0->isImm() || (CSelSrc0->getImm() & 1) == 0 ||
!CSelSrc1->isImm() || CSelSrc1->getImm() != 0)
return false;

// Replace the V_CNDMASK with S_BFE.
unsigned BFEOpc = (IsSigned ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32);

// If the CSELECT writes to a 64 bit SGPR, only pick the low bits.
unsigned SubReg = 0;
if (CSel->getOpcode() == AMDGPU::S_CSELECT_B64)
SubReg = AMDGPU::sub0;

BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(BFEOpc),
MI.getOperand(0).getReg())
.addReg(Src2, /*Flags*/ 0, SubReg)
.addImm(0x10000);
MI.eraseFromParent();
return true;
}

// Clamp patterns are canonically selected to v_max_* instructions, so only
// handle them.
const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
Expand Down Expand Up @@ -2087,6 +2170,11 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
continue;
}

if (tryFoldUniformReadFirstLaneCndMask(MI)) {
Changed = true;
continue;
}

// Saw an unknown clobber of m0, so we no longer know what it is.
if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI))
CurrentKnownM0Val = nullptr;
Expand Down
77 changes: 36 additions & 41 deletions llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1536,8 +1536,7 @@ define amdgpu_kernel void @s_copysign_out_f16_mag_f64_sign_f16(ptr addrspace(1)
; SI-NEXT: s_or_b32 s2, s5, s2
; SI-NEXT: s_cmp_lg_u32 s2, 0
; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
; SI-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[4:5]
; SI-NEXT: v_readfirstlane_b32 s2, v1
; SI-NEXT: s_bfe_u32 s2, s4, 0x10000
; SI-NEXT: s_bfe_u32 s5, s3, 0xb0014
; SI-NEXT: s_or_b32 s2, s6, s2
; SI-NEXT: s_sub_i32 s6, 0x3f1, s5
Expand Down Expand Up @@ -1599,8 +1598,7 @@ define amdgpu_kernel void @s_copysign_out_f16_mag_f64_sign_f16(ptr addrspace(1)
; VI-NEXT: s_or_b32 s0, s1, s6
; VI-NEXT: s_cmp_lg_u32 s0, 0
; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
; VI-NEXT: v_readfirstlane_b32 s0, v2
; VI-NEXT: s_bfe_u32 s0, s0, 0x10000
; VI-NEXT: s_bfe_u32 s1, s7, 0xb0014
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: s_or_b32 s4, s2, s0
Expand Down Expand Up @@ -1661,8 +1659,7 @@ define amdgpu_kernel void @s_copysign_out_f16_mag_f64_sign_f16(ptr addrspace(1)
; GFX9-NEXT: s_or_b32 s0, s1, s6
; GFX9-NEXT: s_cmp_lg_u32 s0, 0
; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
; GFX9-NEXT: v_readfirstlane_b32 s0, v1
; GFX9-NEXT: s_bfe_u32 s0, s0, 0x10000
; GFX9-NEXT: s_bfe_u32 s1, s7, 0xb0014
; GFX9-NEXT: s_or_b32 s6, s2, s0
; GFX9-NEXT: s_sub_i32 s2, 0x3f1, s1
Expand Down Expand Up @@ -1714,63 +1711,61 @@ define amdgpu_kernel void @s_copysign_out_f16_mag_f64_sign_f16(ptr addrspace(1)
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x34
; GFX11-NEXT: v_mov_b32_e32 v1, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_and_b32 s1, s7, 0x1ff
; GFX11-NEXT: s_lshr_b32 s2, s7, 8
; GFX11-NEXT: s_or_b32 s1, s1, s6
; GFX11-NEXT: s_and_b32 s2, s2, 0xffe
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_cselect_b32 s1, -1, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s1
; GFX11-NEXT: s_bfe_u32 s1, s7, 0xb0014
; GFX11-NEXT: s_sub_i32 s3, 0x3f1, s1
; GFX11-NEXT: s_addk_i32 s1, 0xfc10
; GFX11-NEXT: v_med3_i32 v1, s3, 0, 13
; GFX11-NEXT: v_readfirstlane_b32 s3, v0
; GFX11-NEXT: s_lshl_b32 s8, s1, 12
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s6, v1
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: s_or_b32 s3, s2, 0x1000
; GFX11-NEXT: s_or_b32 s8, s2, s8
; GFX11-NEXT: s_lshr_b32 s6, s3, s6
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_lshlrev_b32_e64 v0, v1, s6
; GFX11-NEXT: v_mov_b32_e32 v1, 0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, s3, v0
; GFX11-NEXT: s_bfe_u32 s3, s7, 0xb0014
; GFX11-NEXT: s_bfe_u32 s1, s1, 0x10000
; GFX11-NEXT: s_sub_i32 s6, 0x3f1, s3
; GFX11-NEXT: s_or_b32 s1, s2, s1
; GFX11-NEXT: v_med3_i32 v0, s6, 0, 13
; GFX11-NEXT: s_or_b32 s2, s1, 0x1000
; GFX11-NEXT: s_addk_i32 s3, 0xfc10
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: s_lshl_b32 s8, s3, 12
; GFX11-NEXT: v_readfirstlane_b32 s6, v0
; GFX11-NEXT: s_or_b32 s8, s1, s8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: s_lshr_b32 s6, s2, s6
; GFX11-NEXT: v_lshlrev_b32_e64 v0, v0, s6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, s2, v0
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_readfirstlane_b32 s3, v0
; GFX11-NEXT: s_or_b32 s3, s6, s3
; GFX11-NEXT: s_cmp_lt_i32 s1, 1
; GFX11-NEXT: s_cselect_b32 s3, s3, s8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: s_and_b32 s6, s3, 7
; GFX11-NEXT: v_readfirstlane_b32 s2, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: s_or_b32 s2, s6, s2
; GFX11-NEXT: s_cmp_lt_i32 s3, 1
; GFX11-NEXT: s_cselect_b32 s2, s2, s8
; GFX11-NEXT: s_and_b32 s6, s2, 7
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_cmp_gt_i32 s6, 5
; GFX11-NEXT: s_cselect_b32 s8, -1, 0
; GFX11-NEXT: s_cmp_eq_u32 s6, 3
; GFX11-NEXT: s_cselect_b32 s6, -1, 0
; GFX11-NEXT: s_lshr_b32 s3, s3, 2
; GFX11-NEXT: s_lshr_b32 s2, s2, 2
; GFX11-NEXT: s_or_b32 s6, s6, s8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_cmp_lt_i32 s1, 31
; GFX11-NEXT: s_cselect_b32 s3, s3, 0x7c00
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_cselect_b32 s2, -1, 0
; GFX11-NEXT: s_cmpk_eq_i32 s1, 0x40f
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
; GFX11-NEXT: s_addc_u32 s2, s2, 0
; GFX11-NEXT: s_cmp_lt_i32 s3, 31
; GFX11-NEXT: s_cselect_b32 s2, s2, 0x7c00
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_cselect_b32 s1, -1, 0
; GFX11-NEXT: s_cmpk_eq_i32 s3, 0x40f
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s1
; GFX11-NEXT: s_cselect_b32 vcc_lo, -1, 0
; GFX11-NEXT: s_lshr_b32 s1, s7, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: s_and_b32 s1, s1, 0x8000
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 9, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_or_b32_e32 v0, 0x7c00, v0
; GFX11-NEXT: v_cndmask_b32_e32 v0, s3, v0, vcc_lo
; GFX11-NEXT: v_cndmask_b32_e32 v0, s2, v0, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_or_b32_e32 v0, s1, v0
; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, s0
Expand Down
Loading