Skip to content

[AMDGPU] - Add s_quadmask intrinsics #70804

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Nov 2, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions llvm/include/llvm/IR/IntrinsicsAMDGPU.td
Original file line number Diff line number Diff line change
Expand Up @@ -1932,6 +1932,11 @@ def int_amdgcn_inverse_ballot :
def int_amdgcn_s_bitreplicate :
DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent]>;

// Lowers to S_QUADMASK_B{32,64}
// The argument must be uniform; otherwise, the result is undefined.
def int_amdgcn_s_quadmask :
DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem, IntrConvergent]>;

class AMDGPUWaveReduce<LLVMType data_ty = llvm_anyint_ty> : Intrinsic<
[data_ty],
[
Expand Down
9 changes: 9 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2995,6 +2995,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
return;
case Intrinsic::amdgcn_inverse_ballot:
case Intrinsic::amdgcn_s_bitreplicate:
case Intrinsic::amdgcn_s_quadmask:
applyDefaultMapping(OpdMapper);
constrainOpWithReadfirstlane(B, MI, 2); // Mask
return;
Expand Down Expand Up @@ -4537,6 +4538,14 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[2] = AMDGPU::getValueMapping(MaskBank, MaskSize);
break;
}
case Intrinsic::amdgcn_s_quadmask: {
Register MaskReg = MI.getOperand(2).getReg();
unsigned MaskSize = MRI.getType(MaskReg).getSizeInBits();
unsigned MaskBank = getRegBankID(MaskReg, MRI, AMDGPU::SGPRRegBankID);
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, MaskSize);
OpdsMapping[2] = AMDGPU::getValueMapping(MaskBank, MaskSize);
break;
}
case Intrinsic::amdgcn_wave_reduce_umin:
case Intrinsic::amdgcn_wave_reduce_umax: {
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
Expand Down
6 changes: 4 additions & 2 deletions llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6473,8 +6473,10 @@ SIInstrInfo::legalizeOperands(MachineInstr &MI,
return CreatedBB;
}

// Legalize S_BITREPLICATE
if (MI.getOpcode() == AMDGPU::S_BITREPLICATE_B64_B32) {
// Legalize S_BITREPLICATE and S_QUADMASK
if (MI.getOpcode() == AMDGPU::S_BITREPLICATE_B64_B32 ||
MI.getOpcode() == AMDGPU::S_QUADMASK_B32 ||
MI.getOpcode() == AMDGPU::S_QUADMASK_B64) {
MachineOperand &Src = MI.getOperand(1);
if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg())))
Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI));
Expand Down
6 changes: 4 additions & 2 deletions llvm/lib/Target/AMDGPU/SOPInstructions.td
Original file line number Diff line number Diff line change
Expand Up @@ -326,8 +326,10 @@ def S_XNOR_SAVEEXEC_B64 : SOP1_64 <"s_xnor_saveexec_b64">;

} // End hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC]

def S_QUADMASK_B32 : SOP1_32 <"s_quadmask_b32">;
def S_QUADMASK_B64 : SOP1_64 <"s_quadmask_b64">;
def S_QUADMASK_B32 : SOP1_32 <"s_quadmask_b32",
[(set i32:$sdst, (int_amdgcn_s_quadmask i32:$src0))]>;
def S_QUADMASK_B64 : SOP1_64 <"s_quadmask_b64",
[(set i64:$sdst, (int_amdgcn_s_quadmask i64:$src0))]>;

let Uses = [M0] in {
def S_MOVRELS_B32 : SOP1_32R <"s_movrels_b32">;
Expand Down
90 changes: 90 additions & 0 deletions llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -march=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
; RUN: llc -march=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s

declare i32 @llvm.amdgcn.s.quadmask.i32(i32)
declare i64 @llvm.amdgcn.s.quadmask.i64(i64)

define i32 @test_quadmask_constant_i32() {
; GFX11-LABEL: test_quadmask_constant_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_quadmask_b32 s0, 0x85fe3a92
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could constant fold this at the IR level, as a follow up.

; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 u0x85FE3A92)
ret i32 %qm
}

define amdgpu_cs void @test_quadmask_sgpr_i32(i32 inreg %mask, ptr addrspace(1) %out) {
; GFX11-LABEL: test_quadmask_sgpr_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_quadmask_b32 s0, s0
; GFX11-NEXT: v_mov_b32_e32 v2, s0
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %mask)
store i32 %qm, ptr addrspace(1) %out
ret void
}


define i32 @test_quadmask_vgpr_i32(i32 %mask) {
; GFX11-LABEL: test_quadmask_vgpr_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
; GFX11-NEXT: s_quadmask_b32 s0, s0
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %mask)
ret i32 %qm
}

define i64 @test_quadmask_constant_i64() {
; GFX11-LABEL: test_quadmask_constant_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_mov_b32 s0, 0x85fe3a92
; GFX11-NEXT: s_mov_b32 s1, 0x67de48fc
; GFX11-NEXT: s_quadmask_b64 s[0:1], s[0:1]
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 u0x67DE48FC85FE3A92)
ret i64 %qm
}

define amdgpu_cs void @test_quadmask_sgpr_i64(i64 inreg %mask, ptr addrspace(1) %out) {
; GFX11-LABEL: test_quadmask_sgpr_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_quadmask_b64 s[0:1], s[0:1]
; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %mask)
store i64 %qm, ptr addrspace(1) %out
ret void
}

define i64 @test_quadmask_vgpr_i64(i64 %mask) {
; GFX11-LABEL: test_quadmask_vgpr_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
; GFX11-NEXT: v_readfirstlane_b32 s1, v1
; GFX11-NEXT: s_quadmask_b64 s[0:1], s[0:1]
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %mask)
ret i64 %qm
}