Skip to content

Commit 917be5c

Browse files
committed
AMDGPU: Handle new atomicrmw metadata for fadd case
This is the most complex atomicrmw support case. Note we don't have accurate remarks for all of the cases, which I'm planning on fixing in a later change with more precise wording. Continue respecting amdgpu-unsafe-fp-atomics until it's eventual removal. Also seems to fix a few cases not interpreting amdgpu-unsafe-fp-atomics appropriately aaggressively.
1 parent 9df089b commit 917be5c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+5007
-8028
lines changed

llvm/lib/Target/AMDGPU/SIISelLowering.cpp

Lines changed: 74 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -16067,26 +16067,21 @@ bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
1606716067
SNaN, Depth);
1606816068
}
1606916069

16070-
#if 0
16071-
// FIXME: This should be checked before unsafe fp atomics are enabled
16072-
// Global FP atomic instructions have a hardcoded FP mode and do not support
16073-
// FP32 denormals, and only support v2f16 denormals.
16074-
static bool fpModeMatchesGlobalFPAtomicMode(const AtomicRMWInst *RMW) {
16070+
// On older subtargets, global FP atomic instructions have a hardcoded FP mode
16071+
// and do not support FP32 denormals, and only support v2f16/f64 denormals.
16072+
static bool atomicIgnoresDenormalModeOrFPModeIsFTZ(const AtomicRMWInst *RMW) {
16073+
if (RMW->hasMetadata("amdgpu.ignore.denormal.mode"))
16074+
return true;
16075+
1607516076
const fltSemantics &Flt = RMW->getType()->getScalarType()->getFltSemantics();
16076-
auto DenormMode = RMW->getParent()->getParent()->getDenormalMode(Flt);
16077-
if (&Flt == &APFloat::IEEEsingle())
16078-
return DenormMode == DenormalMode::getPreserveSign();
16079-
return DenormMode == DenormalMode::getIEEE();
16080-
}
16081-
#endif
16077+
auto DenormMode = RMW->getFunction()->getDenormalMode(Flt);
16078+
if (DenormMode == DenormalMode::getPreserveSign())
16079+
return true;
1608216080

16083-
// The amdgpu-unsafe-fp-atomics attribute enables generation of unsafe
16084-
// floating point atomic instructions. May generate more efficient code,
16085-
// but may not respect rounding and denormal modes, and may give incorrect
16086-
// results for certain memory destinations.
16087-
bool unsafeFPAtomicsDisabled(Function *F) {
16088-
return F->getFnAttribute("amdgpu-unsafe-fp-atomics").getValueAsString() !=
16089-
"true";
16081+
// TODO: Remove this.
16082+
return RMW->getFunction()
16083+
->getFnAttribute("amdgpu-unsafe-fp-atomics")
16084+
.getValueAsBool();
1609016085
}
1609116086

1609216087
static OptimizationRemark emitAtomicRMWLegalRemark(const AtomicRMWInst *RMW) {
@@ -16215,75 +16210,76 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
1621516210
return AtomicExpansionKind::CmpXChg;
1621616211
}
1621716212

16218-
if (!AMDGPU::isFlatGlobalAddrSpace(AS) &&
16219-
AS != AMDGPUAS::BUFFER_FAT_POINTER)
16220-
return AtomicExpansionKind::CmpXChg;
16221-
16222-
if (Subtarget->hasGFX940Insts() && (Ty->isFloatTy() || Ty->isDoubleTy()))
16223-
return AtomicExpansionKind::None;
16224-
16225-
if (AS == AMDGPUAS::FLAT_ADDRESS) {
16226-
// gfx940, gfx12
16227-
// FIXME: Needs to account for no fine-grained memory
16228-
if (Subtarget->hasAtomicFlatPkAdd16Insts() && isHalf2OrBFloat2(Ty))
16229-
return AtomicExpansionKind::None;
16230-
} else if (AMDGPU::isExtendedGlobalAddrSpace(AS)) {
16231-
// gfx90a, gfx940, gfx12
16232-
// FIXME: Needs to account for no fine-grained memory
16233-
if (Subtarget->hasAtomicBufferGlobalPkAddF16Insts() && isHalf2(Ty))
16234-
return AtomicExpansionKind::None;
16235-
16236-
// gfx940, gfx12
16237-
// FIXME: Needs to account for no fine-grained memory
16238-
if (Subtarget->hasAtomicGlobalPkAddBF16Inst() && isBFloat2(Ty))
16239-
return AtomicExpansionKind::None;
16240-
} else if (AS == AMDGPUAS::BUFFER_FAT_POINTER) {
16241-
// gfx90a, gfx940, gfx12
16242-
// FIXME: Needs to account for no fine-grained memory
16243-
if (Subtarget->hasAtomicBufferGlobalPkAddF16Insts() && isHalf2(Ty))
16244-
return AtomicExpansionKind::None;
16245-
16246-
// While gfx90a/gfx940 supports v2bf16 for global/flat, it does not for
16247-
// buffer. gfx12 does have the buffer version.
16248-
if (Subtarget->hasAtomicBufferPkAddBF16Inst() && isBFloat2(Ty))
16249-
return AtomicExpansionKind::None;
16250-
}
16251-
16252-
if (unsafeFPAtomicsDisabled(RMW->getFunction()))
16213+
// LDS atomics respect the denormal mode from the mode register.
16214+
//
16215+
// Traditionally f32 global/buffer memory atomics would unconditionally
16216+
// flush denormals, but newer targets do not flush. f64/f16/bf16 cases never
16217+
// flush.
16218+
//
16219+
// On targets with flat atomic fadd, denormals would flush depending on
16220+
// whether the target address resides in LDS or global memory. We consider
16221+
// this flat-maybe-flush as will-flush.
16222+
if (Ty->isFloatTy() &&
16223+
!Subtarget->hasMemoryAtomicFaddF32DenormalSupport() &&
16224+
!atomicIgnoresDenormalModeOrFPModeIsFTZ(RMW))
1625316225
return AtomicExpansionKind::CmpXChg;
1625416226

16255-
// Always expand system scope fp atomics.
16256-
if (HasSystemScope)
16257-
return AtomicExpansionKind::CmpXChg;
16227+
// FIXME: These ReportUnsafeHWInsts are imprecise. Some of these cases are
16228+
// safe. The message phrasing also should be better.
16229+
if (globalMemoryFPAtomicIsLegal(*Subtarget, RMW, HasSystemScope)) {
16230+
if (AS == AMDGPUAS::FLAT_ADDRESS) {
16231+
// gfx940, gfx12
16232+
if (Subtarget->hasAtomicFlatPkAdd16Insts() && isHalf2OrBFloat2(Ty))
16233+
return ReportUnsafeHWInst(AtomicExpansionKind::None);
16234+
} else if (AMDGPU::isExtendedGlobalAddrSpace(AS)) {
16235+
// gfx90a, gfx940, gfx12
16236+
if (Subtarget->hasAtomicBufferGlobalPkAddF16Insts() && isHalf2(Ty))
16237+
return ReportUnsafeHWInst(AtomicExpansionKind::None);
1625816238

16259-
// global and flat atomic fadd f64: gfx90a, gfx940.
16260-
if (Subtarget->hasFlatBufferGlobalAtomicFaddF64Inst() && Ty->isDoubleTy())
16261-
return ReportUnsafeHWInst(AtomicExpansionKind::None);
16239+
// gfx940, gfx12
16240+
if (Subtarget->hasAtomicGlobalPkAddBF16Inst() && isBFloat2(Ty))
16241+
return ReportUnsafeHWInst(AtomicExpansionKind::None);
16242+
} else if (AS == AMDGPUAS::BUFFER_FAT_POINTER) {
16243+
// gfx90a, gfx940, gfx12
16244+
if (Subtarget->hasAtomicBufferGlobalPkAddF16Insts() && isHalf2(Ty))
16245+
return ReportUnsafeHWInst(AtomicExpansionKind::None);
1626216246

16263-
if (AS != AMDGPUAS::FLAT_ADDRESS && Ty->isFloatTy()) {
16264-
// global/buffer atomic fadd f32 no-rtn: gfx908, gfx90a, gfx940, gfx11+.
16265-
if (RMW->use_empty() && Subtarget->hasAtomicFaddNoRtnInsts())
16266-
return ReportUnsafeHWInst(AtomicExpansionKind::None);
16267-
// global/buffer atomic fadd f32 rtn: gfx90a, gfx940, gfx11+.
16268-
if (!RMW->use_empty() && Subtarget->hasAtomicFaddRtnInsts())
16269-
return ReportUnsafeHWInst(AtomicExpansionKind::None);
16270-
}
16247+
// While gfx90a/gfx940 supports v2bf16 for global/flat, it does not for
16248+
// buffer. gfx12 does have the buffer version.
16249+
if (Subtarget->hasAtomicBufferPkAddBF16Inst() && isBFloat2(Ty))
16250+
return ReportUnsafeHWInst(AtomicExpansionKind::None);
16251+
}
1627116252

16272-
// flat atomic fadd f32: gfx940, gfx11+.
16273-
if (AS == AMDGPUAS::FLAT_ADDRESS && Ty->isFloatTy()) {
16274-
if (Subtarget->hasFlatAtomicFaddF32Inst())
16253+
// global and flat atomic fadd f64: gfx90a, gfx940.
16254+
if (Subtarget->hasFlatBufferGlobalAtomicFaddF64Inst() && Ty->isDoubleTy())
1627516255
return ReportUnsafeHWInst(AtomicExpansionKind::None);
1627616256

16277-
// If it is in flat address space, and the type is float, we will try to
16278-
// expand it, if the target supports global and lds atomic fadd. The
16279-
// reason we need that is, in the expansion, we emit the check of address
16280-
// space. If it is in global address space, we emit the global atomic
16281-
// fadd; if it is in shared address space, we emit the LDS atomic fadd.
16282-
if (Subtarget->hasLDSFPAtomicAddF32()) {
16257+
if (AS != AMDGPUAS::FLAT_ADDRESS && Ty->isFloatTy()) {
16258+
// global/buffer atomic fadd f32 no-rtn: gfx908, gfx90a, gfx940, gfx11+.
1628316259
if (RMW->use_empty() && Subtarget->hasAtomicFaddNoRtnInsts())
16284-
return AtomicExpansionKind::Expand;
16260+
return ReportUnsafeHWInst(AtomicExpansionKind::None);
16261+
// global/buffer atomic fadd f32 rtn: gfx90a, gfx940, gfx11+.
1628516262
if (!RMW->use_empty() && Subtarget->hasAtomicFaddRtnInsts())
16286-
return AtomicExpansionKind::Expand;
16263+
return ReportUnsafeHWInst(AtomicExpansionKind::None);
16264+
}
16265+
16266+
// flat atomic fadd f32: gfx940, gfx11+.
16267+
if (AS == AMDGPUAS::FLAT_ADDRESS && Ty->isFloatTy()) {
16268+
if (Subtarget->hasFlatAtomicFaddF32Inst())
16269+
return ReportUnsafeHWInst(AtomicExpansionKind::None);
16270+
16271+
// If it is in flat address space, and the type is float, we will try to
16272+
// expand it, if the target supports global and lds atomic fadd. The
16273+
// reason we need that is, in the expansion, we emit the check of
16274+
// address space. If it is in global address space, we emit the global
16275+
// atomic fadd; if it is in shared address space, we emit the LDS atomic
16276+
// fadd.
16277+
if (Subtarget->hasLDSFPAtomicAddF32()) {
16278+
if (RMW->use_empty() && Subtarget->hasAtomicFaddNoRtnInsts())
16279+
return AtomicExpansionKind::Expand;
16280+
if (!RMW->use_empty() && Subtarget->hasAtomicFaddRtnInsts())
16281+
return AtomicExpansionKind::Expand;
16282+
}
1628716283
}
1628816284
}
1628916285

llvm/test/CodeGen/AMDGPU/GlobalISel/flat-atomic-fadd.f32.ll

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ define amdgpu_ps float @flat_atomic_fadd_f32_rtn_intrinsic(ptr %ptr, float %data
5757
ret float %ret
5858
}
5959

60-
define amdgpu_ps void @flat_atomic_fadd_f32_no_rtn_atomicrmw(ptr %ptr, float %data) #0 {
60+
define amdgpu_ps void @flat_atomic_fadd_f32_no_rtn_atomicrmw(ptr %ptr, float %data) {
6161
; GFX940-LABEL: name: flat_atomic_fadd_f32_no_rtn_atomicrmw
6262
; GFX940: bb.1 (%ir-block.0):
6363
; GFX940-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2
@@ -79,11 +79,11 @@ define amdgpu_ps void @flat_atomic_fadd_f32_no_rtn_atomicrmw(ptr %ptr, float %da
7979
; GFX11-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
8080
; GFX11-NEXT: FLAT_ATOMIC_ADD_F32 [[REG_SEQUENCE]], [[COPY2]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr)
8181
; GFX11-NEXT: S_ENDPGM 0
82-
%ret = atomicrmw fadd ptr %ptr, float %data syncscope("wavefront") monotonic
82+
%ret = atomicrmw fadd ptr %ptr, float %data syncscope("wavefront") monotonic, !amdgpu.no.fine.grained.memory !0
8383
ret void
8484
}
8585

86-
define amdgpu_ps float @flat_atomic_fadd_f32_rtn_atomicrmw(ptr %ptr, float %data) #0 {
86+
define amdgpu_ps float @flat_atomic_fadd_f32_rtn_atomicrmw(ptr %ptr, float %data) {
8787
; GFX940-LABEL: name: flat_atomic_fadd_f32_rtn_atomicrmw
8888
; GFX940: bb.1 (%ir-block.0):
8989
; GFX940-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2
@@ -107,10 +107,10 @@ define amdgpu_ps float @flat_atomic_fadd_f32_rtn_atomicrmw(ptr %ptr, float %data
107107
; GFX11-NEXT: [[FLAT_ATOMIC_ADD_F32_RTN:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_ADD_F32_RTN [[REG_SEQUENCE]], [[COPY2]], 0, 1, implicit $exec, implicit $flat_scr :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr)
108108
; GFX11-NEXT: $vgpr0 = COPY [[FLAT_ATOMIC_ADD_F32_RTN]]
109109
; GFX11-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
110-
%ret = atomicrmw fadd ptr %ptr, float %data syncscope("wavefront") monotonic
110+
%ret = atomicrmw fadd ptr %ptr, float %data syncscope("wavefront") monotonic, !amdgpu.no.fine.grained.memory !0
111111
ret float %ret
112112
}
113113

114114
declare float @llvm.amdgcn.flat.atomic.fadd.f32.p1.f32(ptr, float)
115115

116-
attributes #0 = {"amdgpu-unsafe-fp-atomics"="true" }
116+
!0 = !{}

llvm/test/CodeGen/AMDGPU/GlobalISel/flat-atomic-fadd.f64.ll

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ define amdgpu_ps double @flat_atomic_fadd_f64_rtn_intrinsic(ptr %ptr, double %da
4242
ret double %ret
4343
}
4444

45-
define amdgpu_ps void @flat_atomic_fadd_f64_no_rtn_atomicrmw(ptr %ptr, double %data) #0 {
45+
define amdgpu_ps void @flat_atomic_fadd_f64_no_rtn_atomicrmw(ptr %ptr, double %data) {
4646
; GFX90A_GFX940-LABEL: name: flat_atomic_fadd_f64_no_rtn_atomicrmw
4747
; GFX90A_GFX940: bb.1 (%ir-block.0):
4848
; GFX90A_GFX940-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
@@ -55,11 +55,11 @@ define amdgpu_ps void @flat_atomic_fadd_f64_no_rtn_atomicrmw(ptr %ptr, double %d
5555
; GFX90A_GFX940-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
5656
; GFX90A_GFX940-NEXT: FLAT_ATOMIC_ADD_F64 [[REG_SEQUENCE]], [[REG_SEQUENCE1]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("wavefront") monotonic (s64) on %ir.ptr)
5757
; GFX90A_GFX940-NEXT: S_ENDPGM 0
58-
%ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic
58+
%ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic, !amdgpu.no.fine.grained.memory !0
5959
ret void
6060
}
6161

62-
define amdgpu_ps double @flat_atomic_fadd_f64_rtn_atomicrmw(ptr %ptr, double %data) #0 {
62+
define amdgpu_ps double @flat_atomic_fadd_f64_rtn_atomicrmw(ptr %ptr, double %data) {
6363
; GFX90A_GFX940-LABEL: name: flat_atomic_fadd_f64_rtn_atomicrmw
6464
; GFX90A_GFX940: bb.1 (%ir-block.0):
6565
; GFX90A_GFX940-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
@@ -78,10 +78,10 @@ define amdgpu_ps double @flat_atomic_fadd_f64_rtn_atomicrmw(ptr %ptr, double %da
7878
; GFX90A_GFX940-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
7979
; GFX90A_GFX940-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
8080
; GFX90A_GFX940-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
81-
%ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic
81+
%ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic, !amdgpu.no.fine.grained.memory !0
8282
ret double %ret
8383
}
8484

8585
declare double @llvm.amdgcn.flat.atomic.fadd.f64.p1.f64(ptr, double)
8686

87-
attributes #0 = {"amdgpu-unsafe-fp-atomics"="true" }
87+
!0 = !{}

llvm/test/CodeGen/AMDGPU/GlobalISel/fp-atomics-gfx940.ll

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ define amdgpu_kernel void @flat_atomic_fadd_f32_noret_pat(ptr %ptr) {
3434
; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3535
; GFX940-NEXT: buffer_inv sc0 sc1
3636
; GFX940-NEXT: s_endpgm
37-
%ret = atomicrmw fadd ptr %ptr, float 4.0 seq_cst
37+
%ret = atomicrmw fadd ptr %ptr, float 4.0 seq_cst, !amdgpu.no.remote.memory !0
3838
ret void
3939
}
4040

@@ -50,7 +50,7 @@ define amdgpu_kernel void @flat_atomic_fadd_f32_noret_pat_ieee(ptr %ptr) #0 {
5050
; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5151
; GFX940-NEXT: buffer_inv sc0 sc1
5252
; GFX940-NEXT: s_endpgm
53-
%ret = atomicrmw fadd ptr %ptr, float 4.0 seq_cst
53+
%ret = atomicrmw fadd ptr %ptr, float 4.0 seq_cst, !amdgpu.no.remote.memory !0
5454
ret void
5555
}
5656

@@ -75,7 +75,7 @@ define float @flat_atomic_fadd_f32_rtn_pat(ptr %ptr, float %data) {
7575
; GFX940-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
7676
; GFX940-NEXT: buffer_inv sc0 sc1
7777
; GFX940-NEXT: s_setpc_b64 s[30:31]
78-
%ret = atomicrmw fadd ptr %ptr, float 4.0 seq_cst
78+
%ret = atomicrmw fadd ptr %ptr, float 4.0 seq_cst, !amdgpu.no.remote.memory !0
7979
ret float %ret
8080
}
8181

@@ -235,3 +235,5 @@ define void @flat_atomic_fadd_noret_v2f16_agent_offset(ptr %ptr, <2 x half> %val
235235
}
236236

237237
attributes #0 = { "denormal-fp-math-f32"="ieee,ieee" }
238+
239+
!0 = !{}

0 commit comments

Comments
 (0)