Skip to content

Commit 122eec1

Browse files
committed
AMDGPU: Legalize atomicrmw fadd for v2f16/v2bf16 for local memory
Make this legal for gfx940 and gfx12
1 parent 8cc6a24 commit 122eec1

File tree

7 files changed

+375
-512
lines changed

7 files changed

+375
-512
lines changed

llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -301,6 +301,9 @@ static const LLT V10S16 = LLT::fixed_vector(10, 16);
301301
static const LLT V12S16 = LLT::fixed_vector(12, 16);
302302
static const LLT V16S16 = LLT::fixed_vector(16, 16);
303303

304+
static const LLT V2F16 = LLT::fixed_vector(2, LLT::float16());
305+
static const LLT V2BF16 = V2F16; // FIXME
306+
304307
static const LLT V2S32 = LLT::fixed_vector(2, 32);
305308
static const LLT V3S32 = LLT::fixed_vector(3, 32);
306309
static const LLT V4S32 = LLT::fixed_vector(4, 32);
@@ -1638,7 +1641,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
16381641
if (ST.hasLdsAtomicAddF64())
16391642
Atomic.legalFor({{S64, LocalPtr}});
16401643
if (ST.hasAtomicDsPkAdd16Insts())
1641-
Atomic.legalFor({{V2S16, LocalPtr}});
1644+
Atomic.legalFor({{V2F16, LocalPtr}, {V2BF16, LocalPtr}});
16421645
}
16431646
if (ST.hasAtomicFaddInsts())
16441647
Atomic.legalFor({{S32, GlobalPtr}});

llvm/lib/Target/AMDGPU/DSInstructions.td

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1082,6 +1082,12 @@ defm : DSAtomicRetNoRetPat_mc<DS_MAX_RTN_U32, DS_MAX_U32, i32, "atomic_load_umax
10821082
defm : DSAtomicRetNoRetPat_mc<DS_MIN_RTN_F32, DS_MIN_F32, f32, "atomic_load_fmin">;
10831083
defm : DSAtomicRetNoRetPat_mc<DS_MAX_RTN_F32, DS_MAX_F32, f32, "atomic_load_fmax">;
10841084

1085+
1086+
let SubtargetPredicate = HasAtomicDsPkAdd16Insts in {
1087+
defm : DSAtomicRetNoRetPat_mc<DS_PK_ADD_RTN_F16, DS_PK_ADD_F16, v2f16, "atomic_load_fadd">;
1088+
defm : DSAtomicRetNoRetPat_mc<DS_PK_ADD_RTN_BF16, DS_PK_ADD_BF16, v2bf16, "atomic_load_fadd">;
1089+
}
1090+
10851091
let SubtargetPredicate = isGFX6GFX7GFX8GFX9GFX10 in {
10861092
defm : DSAtomicCmpXChgSwapped_mc<DS_CMPST_RTN_B32, DS_CMPST_B32, i32, "atomic_cmp_swap">;
10871093
}

llvm/lib/Target/AMDGPU/SIISelLowering.cpp

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15932,6 +15932,16 @@ static OptimizationRemark emitAtomicRMWLegalRemark(const AtomicRMWInst *RMW) {
1593215932
<< " operation at memory scope " << MemScope;
1593315933
}
1593415934

15935+
static bool isHalf2OrBFloat2(Type *Ty) {
15936+
if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
15937+
Type *EltTy = VT->getElementType();
15938+
return VT->getNumElements() == 2 &&
15939+
(EltTy->isHalfTy() || EltTy->isBFloatTy());
15940+
}
15941+
15942+
return false;
15943+
}
15944+
1593515945
TargetLowering::AtomicExpansionKind
1593615946
SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
1593715947
unsigned AS = RMW->getPointerAddressSpace();
@@ -15990,7 +16000,9 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
1599016000
: AtomicExpansionKind::CmpXChg;
1599116001
}
1599216002

15993-
// TODO: Handle v2f16/v2bf16 cases for gfx940
16003+
if (Subtarget->hasAtomicDsPkAdd16Insts() && isHalf2OrBFloat2(Ty))
16004+
return AtomicExpansionKind::None;
16005+
1599416006
return AtomicExpansionKind::CmpXChg;
1599516007
}
1599616008

llvm/test/CodeGen/AMDGPU/GlobalISel/fp-atomics-gfx940.ll

Lines changed: 2 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -213,22 +213,8 @@ define <2 x half> @local_atomic_fadd_ret_v2f16_offset(ptr addrspace(3) %ptr, <2
213213
; GFX940-LABEL: local_atomic_fadd_ret_v2f16_offset:
214214
; GFX940: ; %bb.0:
215215
; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
216-
; GFX940-NEXT: ds_read_b32 v2, v0 offset:65532
217-
; GFX940-NEXT: s_mov_b64 s[0:1], 0
218-
; GFX940-NEXT: .LBB15_1: ; %atomicrmw.start
219-
; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
216+
; GFX940-NEXT: ds_pk_add_rtn_f16 v0, v0, v1 offset:65532
220217
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
221-
; GFX940-NEXT: v_mov_b32_e32 v3, v2
222-
; GFX940-NEXT: v_pk_add_f16 v2, v3, v1
223-
; GFX940-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
224-
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
225-
; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
226-
; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
227-
; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
228-
; GFX940-NEXT: s_cbranch_execnz .LBB15_1
229-
; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
230-
; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
231-
; GFX940-NEXT: v_mov_b32_e32 v0, v2
232218
; GFX940-NEXT: s_setpc_b64 s[30:31]
233219
%gep = getelementptr <2 x half>, ptr addrspace(3) %ptr, i32 16383
234220
%result = atomicrmw fadd ptr addrspace(3) %gep, <2 x half> %val seq_cst
@@ -239,21 +225,8 @@ define void @local_atomic_fadd_noret_v2f16_offset(ptr addrspace(3) %ptr, <2 x ha
239225
; GFX940-LABEL: local_atomic_fadd_noret_v2f16_offset:
240226
; GFX940: ; %bb.0:
241227
; GFX940-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
242-
; GFX940-NEXT: ds_read_b32 v2, v0 offset:65532
243-
; GFX940-NEXT: s_mov_b64 s[0:1], 0
244-
; GFX940-NEXT: .LBB16_1: ; %atomicrmw.start
245-
; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
228+
; GFX940-NEXT: ds_pk_add_f16 v0, v1 offset:65532
246229
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
247-
; GFX940-NEXT: v_pk_add_f16 v3, v2, v1
248-
; GFX940-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
249-
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
250-
; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
251-
; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
252-
; GFX940-NEXT: v_mov_b32_e32 v2, v3
253-
; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
254-
; GFX940-NEXT: s_cbranch_execnz .LBB16_1
255-
; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
256-
; GFX940-NEXT: s_or_b64 exec, exec, s[0:1]
257230
; GFX940-NEXT: s_setpc_b64 s[30:31]
258231
%gep = getelementptr <2 x half>, ptr addrspace(3) %ptr, i32 16383
259232
%unused = atomicrmw fadd ptr addrspace(3) %gep, <2 x half> %val seq_cst

llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-atomicrmw.ll

Lines changed: 2 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -52,36 +52,13 @@ define float @test_atomicrmw_fsub(ptr addrspace(3) %addr) {
5252
define <2 x half> @test_atomicrmw_fadd_vector(ptr addrspace(3) %addr) {
5353
; CHECK-LABEL: name: test_atomicrmw_fadd_vector
5454
; CHECK: bb.1 (%ir-block.0):
55-
; CHECK-NEXT: successors: %bb.2(0x80000000)
5655
; CHECK-NEXT: liveins: $vgpr0
5756
; CHECK-NEXT: {{ $}}
5857
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
5958
; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
6059
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
61-
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
62-
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p3) :: (load (<2 x s16>) from %ir.addr, addrspace 3)
63-
; CHECK-NEXT: G_BR %bb.2
64-
; CHECK-NEXT: {{ $}}
65-
; CHECK-NEXT: bb.2.atomicrmw.start:
66-
; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000)
67-
; CHECK-NEXT: {{ $}}
68-
; CHECK-NEXT: [[PHI:%[0-9]+]]:_(s64) = G_PHI %19(s64), %bb.2, [[C1]](s64), %bb.1
69-
; CHECK-NEXT: [[PHI1:%[0-9]+]]:_(<2 x s16>) = G_PHI [[LOAD]](<2 x s16>), %bb.1, %18(<2 x s16>), %bb.2
70-
; CHECK-NEXT: [[FADD:%[0-9]+]]:_(<2 x s16>) = G_FADD [[PHI1]], [[BUILD_VECTOR]]
71-
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[FADD]](<2 x s16>)
72-
; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[PHI1]](<2 x s16>)
73-
; CHECK-NEXT: [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p3), [[BITCAST1]], [[BITCAST]] :: (load store seq_cst seq_cst (s32) on %ir.addr, addrspace 3)
74-
; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32)
75-
; CHECK-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ATOMIC_CMPXCHG_WITH_SUCCESS1]](s1), [[PHI]](s64)
76-
; CHECK-NEXT: [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.loop), [[INT]](s64)
77-
; CHECK-NEXT: G_BRCOND [[INT1]](s1), %bb.3
78-
; CHECK-NEXT: G_BR %bb.2
79-
; CHECK-NEXT: {{ $}}
80-
; CHECK-NEXT: bb.3.atomicrmw.end:
81-
; CHECK-NEXT: [[PHI2:%[0-9]+]]:_(<2 x s16>) = G_PHI [[BITCAST2]](<2 x s16>), %bb.2
82-
; CHECK-NEXT: [[PHI3:%[0-9]+]]:_(s64) = G_PHI [[INT]](s64), %bb.2
83-
; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI3]](s64)
84-
; CHECK-NEXT: $vgpr0 = COPY [[PHI2]](<2 x s16>)
60+
; CHECK-NEXT: [[ATOMICRMW_FADD:%[0-9]+]]:_(<2 x s16>) = G_ATOMICRMW_FADD [[COPY]](p3), [[BUILD_VECTOR]] :: (load store seq_cst (<2 x s16>) on %ir.addr, addrspace 3)
61+
; CHECK-NEXT: $vgpr0 = COPY [[ATOMICRMW_FADD]](<2 x s16>)
8562
; CHECK-NEXT: SI_RETURN implicit $vgpr0
8663
%oldval = atomicrmw fadd ptr addrspace(3) %addr, <2 x half> <half 1.0, half 1.0> seq_cst
8764
ret <2 x half> %oldval

0 commit comments

Comments
 (0)