Skip to content

[AMDGPU] Codegen support for constrained multi-dword sloads #96163

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Jul 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPU.td
Original file line number Diff line number Diff line change
Expand Up @@ -2026,6 +2026,8 @@ def HasAddNoCarryInsts : Predicate<"Subtarget->hasAddNoCarry()">,

def NotHasAddNoCarryInsts : Predicate<"!Subtarget->hasAddNoCarry()">;

def HasXNACKEnabled : Predicate<"Subtarget->isXNACKEnabled()">;

def Has16BitInsts : Predicate<"Subtarget->has16BitInsts()">,
AssemblerPredicate<(all_of Feature16BitInsts)>;

Expand Down
70 changes: 50 additions & 20 deletions llvm/lib/Target/AMDGPU/SMInstructions.td
Original file line number Diff line number Diff line change
Expand Up @@ -866,45 +866,74 @@ def SMRDBufferImm : ComplexPattern<iPTR, 1, "SelectSMRDBufferImm">;
def SMRDBufferImm32 : ComplexPattern<iPTR, 1, "SelectSMRDBufferImm32">;
def SMRDBufferSgprImm : ComplexPattern<iPTR, 2, "SelectSMRDBufferSgprImm">;

multiclass SMRD_Pattern <string Instr, ValueType vt, bit immci = true> {
class SMRDAlignedLoadPat<PatFrag Op> : PatFrag <(ops node:$ptr), (Op node:$ptr), [{
// Returns true if it is a single dword load or naturally aligned multi-dword load.
LoadSDNode *Ld = cast<LoadSDNode>(N);
unsigned Size = Ld->getMemoryVT().getStoreSize();
return Size <= 4 || Ld->getAlign().value() >= Size;
}]> {
let GISelPredicateCode = [{
auto &Ld = cast<GLoad>(MI);
TypeSize Size = Ld.getMMO().getSize().getValue();
return Size <= 4 || Ld.getMMO().getAlign().value() >= Size;
}];
}

def aligned_smrd_load : SMRDAlignedLoadPat<smrd_load>;

multiclass SMRD_Patterns <string Instr, ValueType vt, PatFrag frag,
bit immci = true, string suffix = ""> {
// 1. IMM offset
def : GCNPat <
(smrd_load (SMRDImm i64:$sbase, i32:$offset)),
(vt (!cast<SM_Pseudo>(Instr#"_IMM") $sbase, $offset, 0))
>;
(frag (SMRDImm i64:$sbase, i32:$offset)),
(vt (!cast<SM_Pseudo>(Instr#"_IMM"#suffix) $sbase, $offset, 0))>;

// 2. 32-bit IMM offset on CI
if immci then def : GCNPat <
(smrd_load (SMRDImm32 i64:$sbase, i32:$offset)),
(vt (!cast<InstSI>(Instr#"_IMM_ci") $sbase, $offset, 0))> {
let OtherPredicates = [isGFX7Only];
(frag (SMRDImm32 i64:$sbase, i32:$offset)),
(vt (!cast<InstSI>(Instr#"_IMM_ci"#suffix) $sbase, $offset, 0))> {
let SubtargetPredicate = isGFX7Only;
}

// 3. SGPR offset
def : GCNPat <
(smrd_load (SMRDSgpr i64:$sbase, i32:$soffset)),
(vt (!cast<SM_Pseudo>(Instr#"_SGPR") $sbase, $soffset, 0))> {
let OtherPredicates = [isNotGFX9Plus];
(frag (SMRDSgpr i64:$sbase, i32:$soffset)),
(vt (!cast<SM_Pseudo>(Instr#"_SGPR"#suffix) $sbase, $soffset, 0))> {
let SubtargetPredicate = isNotGFX9Plus;
}
def : GCNPat <
(smrd_load (SMRDSgpr i64:$sbase, i32:$soffset)),
(vt (!cast<SM_Pseudo>(Instr#"_SGPR_IMM") $sbase, $soffset, 0, 0))> {
let OtherPredicates = [isGFX9Plus];
(frag (SMRDSgpr i64:$sbase, i32:$soffset)),
(vt (!cast<SM_Pseudo>(Instr#"_SGPR_IMM"#suffix) $sbase, $soffset, 0, 0))> {
let SubtargetPredicate = isGFX9Plus;
}

// 4. SGPR+IMM offset
def : GCNPat <
(smrd_load (SMRDSgprImm i64:$sbase, i32:$soffset, i32:$offset)),
(vt (!cast<SM_Pseudo>(Instr#"_SGPR_IMM") $sbase, $soffset, $offset, 0))> {
let OtherPredicates = [isGFX9Plus];
(frag (SMRDSgprImm i64:$sbase, i32:$soffset, i32:$offset)),
(vt (!cast<SM_Pseudo>(Instr#"_SGPR_IMM"#suffix) $sbase, $soffset, $offset, 0))> {
let SubtargetPredicate = isGFX9Plus;
}

// 5. No offset
def : GCNPat <
(vt (smrd_load (i64 SReg_64:$sbase))),
(vt (!cast<SM_Pseudo>(Instr#"_IMM") i64:$sbase, 0, 0))
>;
(vt (frag (i64 SReg_64:$sbase))),
(vt (!cast<SM_Pseudo>(Instr#"_IMM"#suffix) i64:$sbase, 0, 0))>;
}

multiclass SMRD_Pattern <string Instr, ValueType vt, bit immci = true> {
// High priority when XNACK is enabled and the load was naturally aligned.
let OtherPredicates = [HasXNACKEnabled], AddedComplexity = 102 in
defm: SMRD_Patterns <Instr, vt, aligned_smrd_load, immci>;

// XNACK is enabled and the load wasn't naturally aligned. The constrained sload variant.
if !gt(vt.Size, 32) then {
let OtherPredicates = [HasXNACKEnabled], AddedComplexity = 101 in
defm: SMRD_Patterns <Instr, vt, smrd_load, /*immci=*/false, /*suffix=*/"_ec">;
}

// XNACK is disabled.
let AddedComplexity = 100 in
defm: SMRD_Patterns <Instr, vt, smrd_load, immci>;
}

multiclass SMLoad_Pattern <string Instr, ValueType vt, bit immci = true> {
Expand Down Expand Up @@ -1018,6 +1047,8 @@ defm : ScalarBufferLoadIntrinsicPat <SIsbuffer_load_ubyte, "S_BUFFER_LOAD_U8">;
defm : ScalarBufferLoadIntrinsicPat <SIsbuffer_load_short, "S_BUFFER_LOAD_I16">;
defm : ScalarBufferLoadIntrinsicPat <SIsbuffer_load_ushort, "S_BUFFER_LOAD_U16">;

} // End let AddedComplexity = 100

foreach vt = Reg32Types.types in {
defm : SMRD_Pattern <"S_LOAD_DWORD", vt>;
}
Expand All @@ -1042,7 +1073,6 @@ foreach vt = SReg_512.RegTypes in {
defm : SMRD_Pattern <"S_LOAD_DWORDX16", vt>;
}

} // End let AddedComplexity = 100

defm : SMLoad_Pattern <"S_BUFFER_LOAD_DWORD", i32>;
defm : SMLoad_Pattern <"S_BUFFER_LOAD_DWORDX2", v2i32>;
Expand Down
100 changes: 50 additions & 50 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1021,20 +1021,20 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret(ptr addrspace(1) %ptr, double %data) {
; GFX90A-LABEL: global_atomic_fadd_f64_noret:
; GFX90A: ; %bb.0: ; %main_body
; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1]
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[4:5]
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret:
; GFX940: ; %bb.0: ; %main_body
; GFX940-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; GFX940-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: v_mov_b64_e32 v[0:1], s[6:7]
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[4:5]
; GFX940-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.global.atomic.fadd.f64.p1.f64(ptr addrspace(1) %ptr, double %data)
Expand All @@ -1044,20 +1044,20 @@ main_body:
define amdgpu_kernel void @global_atomic_fmin_f64_noret(ptr addrspace(1) %ptr, double %data) {
; GFX90A-LABEL: global_atomic_fmin_f64_noret:
; GFX90A: ; %bb.0: ; %main_body
; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
; GFX90A-NEXT: global_atomic_min_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1]
; GFX90A-NEXT: global_atomic_min_f64 v2, v[0:1], s[4:5]
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fmin_f64_noret:
; GFX940: ; %bb.0: ; %main_body
; GFX940-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; GFX940-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX940-NEXT: global_atomic_min_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: v_mov_b64_e32 v[0:1], s[6:7]
; GFX940-NEXT: global_atomic_min_f64 v2, v[0:1], s[4:5]
; GFX940-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.global.atomic.fmin.f64.p1.f64(ptr addrspace(1) %ptr, double %data)
Expand All @@ -1067,20 +1067,20 @@ main_body:
define amdgpu_kernel void @global_atomic_fmax_f64_noret(ptr addrspace(1) %ptr, double %data) {
; GFX90A-LABEL: global_atomic_fmax_f64_noret:
; GFX90A: ; %bb.0: ; %main_body
; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
; GFX90A-NEXT: global_atomic_max_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1]
; GFX90A-NEXT: global_atomic_max_f64 v2, v[0:1], s[4:5]
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fmax_f64_noret:
; GFX940: ; %bb.0: ; %main_body
; GFX940-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; GFX940-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX940-NEXT: global_atomic_max_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: v_mov_b64_e32 v[0:1], s[6:7]
; GFX940-NEXT: global_atomic_max_f64 v2, v[0:1], s[4:5]
; GFX940-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.global.atomic.fmax.f64.p1.f64(ptr addrspace(1) %ptr, double %data)
Expand Down Expand Up @@ -1134,14 +1134,14 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %pt
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX940-NEXT: s_cbranch_execz .LBB39_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x24
; GFX940-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc0 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[2:3] sc1
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[4:5] sc1
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc0 sc1
; GFX940-NEXT: .LBB39_2:
Expand All @@ -1162,13 +1162,13 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent(ptr addrspace(
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX90A-NEXT: s_cbranch_execz .LBB40_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x24
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[2:3]
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
; GFX90A-NEXT: .LBB40_2:
Expand All @@ -1184,14 +1184,14 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent(ptr addrspace(
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX940-NEXT: s_cbranch_execz .LBB40_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x24
; GFX940-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[2:3]
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[4:5]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
; GFX940-NEXT: .LBB40_2:
Expand Down Expand Up @@ -1248,14 +1248,14 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX940-NEXT: s_cbranch_execz .LBB41_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x24
; GFX940-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc0 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[2:3] sc1
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[4:5] sc1
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc0 sc1
; GFX940-NEXT: .LBB41_2:
Expand All @@ -1276,13 +1276,13 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_flush(ptr addrspace(
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX90A-NEXT: s_cbranch_execz .LBB42_2
; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x24
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[2:3]
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
; GFX90A-NEXT: .LBB42_2:
Expand All @@ -1298,14 +1298,14 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_flush(ptr addrspace(
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX940-NEXT: s_cbranch_execz .LBB42_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x24
; GFX940-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[2:3]
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[4:5]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
; GFX940-NEXT: .LBB42_2:
Expand Down Expand Up @@ -1522,14 +1522,14 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent_safe(ptr addrs
; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX940-NEXT: s_cbranch_execz .LBB49_2
; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x24
; GFX940-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[2:3]
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[4:5]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
; GFX940-NEXT: .LBB49_2:
Expand Down Expand Up @@ -1761,19 +1761,19 @@ main_body:
define amdgpu_kernel void @flat_atomic_fadd_f64_noret(ptr %ptr, double %data) {
; GFX90A-LABEL: flat_atomic_fadd_f64_noret:
; GFX90A: ; %bb.0: ; %main_body
; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1]
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[4:5], s[4:5] op_sel:[0,1]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1]
; GFX90A-NEXT: flat_atomic_add_f64 v[0:1], v[2:3]
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: flat_atomic_fadd_f64_noret:
; GFX940: ; %bb.0: ; %main_body
; GFX940-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; GFX940-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
; GFX940-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX940-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
; GFX940-NEXT: v_mov_b64_e32 v[2:3], s[6:7]
; GFX940-NEXT: flat_atomic_add_f64 v[0:1], v[2:3]
; GFX940-NEXT: s_endpgm
main_body:
Expand Down Expand Up @@ -1842,19 +1842,19 @@ main_body:
define amdgpu_kernel void @flat_atomic_fmin_f64_noret(ptr %ptr, double %data) {
; GFX90A-LABEL: flat_atomic_fmin_f64_noret:
; GFX90A: ; %bb.0: ; %main_body
; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1]
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[4:5], s[4:5] op_sel:[0,1]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1]
; GFX90A-NEXT: flat_atomic_min_f64 v[0:1], v[2:3]
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: flat_atomic_fmin_f64_noret:
; GFX940: ; %bb.0: ; %main_body
; GFX940-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; GFX940-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
; GFX940-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX940-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
; GFX940-NEXT: v_mov_b64_e32 v[2:3], s[6:7]
; GFX940-NEXT: flat_atomic_min_f64 v[0:1], v[2:3]
; GFX940-NEXT: s_endpgm
main_body:
Expand Down Expand Up @@ -1884,19 +1884,19 @@ main_body:
define amdgpu_kernel void @flat_atomic_fmax_f64_noret(ptr %ptr, double %data) {
; GFX90A-LABEL: flat_atomic_fmax_f64_noret:
; GFX90A: ; %bb.0: ; %main_body
; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1]
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[4:5], s[4:5] op_sel:[0,1]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[6:7], s[6:7] op_sel:[0,1]
; GFX90A-NEXT: flat_atomic_max_f64 v[0:1], v[2:3]
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: flat_atomic_fmax_f64_noret:
; GFX940: ; %bb.0: ; %main_body
; GFX940-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; GFX940-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
; GFX940-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX940-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
; GFX940-NEXT: v_mov_b64_e32 v[2:3], s[6:7]
; GFX940-NEXT: flat_atomic_max_f64 v[0:1], v[2:3]
; GFX940-NEXT: s_endpgm
main_body:
Expand Down
Loading
Loading