Skip to content

[CodeGenPrepare] Reverse the canonicalization of isInf/isNanOrInf #81572

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Mar 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 36 additions & 0 deletions llvm/lib/CodeGen/CodeGenPrepare.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1943,6 +1943,39 @@ static bool swapICmpOperandsToExposeCSEOpportunities(CmpInst *Cmp) {
return false;
}

static bool foldFCmpToFPClassTest(CmpInst *Cmp, const TargetLowering &TLI,
const DataLayout &DL) {
FCmpInst *FCmp = dyn_cast<FCmpInst>(Cmp);
if (!FCmp)
return false;

// Don't fold if the target offers free fabs and the predicate is legal.
EVT VT = TLI.getValueType(DL, Cmp->getOperand(0)->getType());
if (TLI.isFAbsFree(VT) &&
TLI.isCondCodeLegal(getFCmpCondCode(FCmp->getPredicate()),
VT.getSimpleVT()))
return false;

// Reverse the canonicalization if it is a FP class test
auto ShouldReverseTransform = [](FPClassTest ClassTest) {
return ClassTest == fcInf || ClassTest == (fcInf | fcNan);
};
auto [ClassVal, ClassTest] =
fcmpToClassTest(FCmp->getPredicate(), *FCmp->getParent()->getParent(),
FCmp->getOperand(0), FCmp->getOperand(1));
if (!ClassVal)
return false;

if (!ShouldReverseTransform(ClassTest) && !ShouldReverseTransform(~ClassTest))
return false;

IRBuilder<> Builder(Cmp);
Value *IsFPClass = Builder.createIsFPClass(ClassVal, ClassTest);
Cmp->replaceAllUsesWith(IsFPClass);
RecursivelyDeleteTriviallyDeadInstructions(Cmp);
return true;
}

bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) {
if (sinkCmpExpression(Cmp, *TLI))
return true;
Expand All @@ -1959,6 +1992,9 @@ bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) {
if (swapICmpOperandsToExposeCSEOpportunities(Cmp))
return true;

if (foldFCmpToFPClassTest(Cmp, *TLI, *DL))
return true;

return false;
}

Expand Down
22 changes: 7 additions & 15 deletions llvm/test/CodeGen/AArch64/isinf.ll
Original file line number Diff line number Diff line change
Expand Up @@ -58,22 +58,14 @@ define i32 @replace_isinf_call_f64(double %x) {
define i32 @replace_isinf_call_f128(fp128 %x) {
; CHECK-LABEL: replace_isinf_call_f128:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: str q0, [sp]
; CHECK-NEXT: ldrb w8, [sp, #15]
; CHECK-NEXT: and w8, w8, #0x7f
; CHECK-NEXT: strb w8, [sp, #15]
; CHECK-NEXT: adrp x8, .LCPI3_0
; CHECK-NEXT: ldr q0, [sp]
; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI3_0]
; CHECK-NEXT: bl __eqtf2
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: str q0, [sp, #-16]!
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldp x9, x8, [sp], #16
; CHECK-NEXT: and x8, x8, #0x7fffffffffffffff
; CHECK-NEXT: eor x8, x8, #0x7fff000000000000
; CHECK-NEXT: orr x8, x9, x8
; CHECK-NEXT: cmp x8, #0
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%abs = tail call fp128 @llvm.fabs.f128(fp128 %x)
%cmpinf = fcmp oeq fp128 %abs, 0xL00000000000000007FFF000000000000
Expand Down
60 changes: 33 additions & 27 deletions llvm/test/CodeGen/AMDGPU/fp-classify.ll
Original file line number Diff line number Diff line change
Expand Up @@ -618,16 +618,16 @@ define amdgpu_kernel void @test_not_isfinite_pattern_4_wrong_ord_test(ptr addrsp
define amdgpu_kernel void @test_isinf_pattern_f16(ptr addrspace(1) nocapture %out, half %x) #0 {
; SI-LABEL: test_isinf_pattern_f16:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
; SI-NEXT: s_load_dword s0, s[0:1], 0xb
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_mov_b32 s1, 0x7f800000
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e64 v0, |s0|
; SI-NEXT: v_cmp_eq_f32_e32 vcc, s1, v0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_and_b32 s4, s4, 0x7fff
; SI-NEXT: s_cmpk_eq_i32 s4, 0x7c00
; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_isinf_pattern_f16:
Expand Down Expand Up @@ -667,16 +667,19 @@ define amdgpu_kernel void @test_isinf_pattern_f16(ptr addrspace(1) nocapture %ou
define amdgpu_kernel void @test_isfinite_pattern_0_f16(ptr addrspace(1) nocapture %out, half %x) #0 {
; SI-LABEL: test_isfinite_pattern_0_f16:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
; SI-NEXT: s_load_dword s0, s[0:1], 0xb
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_movk_i32 s1, 0x1f8
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
; SI-NEXT: v_cmp_class_f32_e64 s[0:1], v0, s1
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: v_cvt_f32_f16_e32 v0, s4
; SI-NEXT: s_and_b32 s4, s4, 0x7fff
; SI-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
; SI-NEXT: s_cmpk_lg_i32 s4, 0x7c00
; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
; SI-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_isfinite_pattern_0_f16:
Expand Down Expand Up @@ -718,16 +721,19 @@ define amdgpu_kernel void @test_isfinite_pattern_0_f16(ptr addrspace(1) nocaptur
define amdgpu_kernel void @test_isfinite_pattern_4_f16(ptr addrspace(1) nocapture %out, half %x) #0 {
; SI-LABEL: test_isfinite_pattern_4_f16:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
; SI-NEXT: s_load_dword s0, s[0:1], 0xb
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_movk_i32 s1, 0x1f8
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
; SI-NEXT: v_cmp_class_f32_e64 s[0:1], v0, s1
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: v_cvt_f32_f16_e32 v0, s4
; SI-NEXT: s_and_b32 s4, s4, 0x7fff
; SI-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
; SI-NEXT: s_cmpk_lt_i32 s4, 0x7c00
; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
; SI-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_isfinite_pattern_4_f16:
Expand Down
Loading