Skip to content

[SelectionDAG] Make (a & x) | (~a & y) -> (a & (x ^ y)) ^ y available for all targets #137641

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 57 additions & 0 deletions llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8110,6 +8110,59 @@ static SDValue visitORCommutative(SelectionDAG &DAG, SDValue N0, SDValue N1,
return SDValue();
}

static SDValue foldMaskedMergeImpl(SDValue AndL0, SDValue AndR0, SDValue AndL1,
SDValue AndR1, const SDLoc &DL,
SelectionDAG &DAG) {
if (!isBitwiseNot(AndL0, true) || !AndL0->hasOneUse())
return SDValue();
SDValue NotOp = AndL0->getOperand(0);
if (NotOp == AndR1)
std::swap(AndR1, AndL1);
if (NotOp != AndL1)
return SDValue();

EVT VT = AndL1->getValueType(0);
SDValue Xor0 = DAG.getNode(ISD::XOR, DL, VT, AndR1, AndR0);
SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor0, NotOp);
SDValue Xor1 = DAG.getNode(ISD::XOR, DL, VT, And, AndR0);
return Xor1;
}

/// Fold "masked merge" expressions like `(m & x) | (~m & y)` into the
/// equivalent `((x ^ y) & m) ^ y)` pattern.
/// This is typically a better representation for targets without a fused
/// "and-not" operation.
static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG,
const TargetLowering &TLI, const SDLoc &DL) {
// Note that masked-merge variants using XOR or ADD expressions are
// normalized to OR by InstCombine so we only check for OR.
assert(Node->getOpcode() == ISD::OR && "Must be called with ISD::OR node");
SDValue N0 = Node->getOperand(0);
if (N0->getOpcode() != ISD::AND || !N0->hasOneUse())
return SDValue();
SDValue N1 = Node->getOperand(1);
if (N1->getOpcode() != ISD::AND || !N1->hasOneUse())
return SDValue();

// If the target supports and-not, don't fold this.
if (TLI.hasAndNot(SDValue(Node, 0)))
return SDValue();

SDValue N00 = N0->getOperand(0);
SDValue N01 = N0->getOperand(1);
SDValue N10 = N1->getOperand(0);
SDValue N11 = N1->getOperand(1);
if (SDValue Result = foldMaskedMergeImpl(N00, N01, N10, N11, DL, DAG))
return Result;
if (SDValue Result = foldMaskedMergeImpl(N01, N00, N10, N11, DL, DAG))
return Result;
if (SDValue Result = foldMaskedMergeImpl(N10, N11, N00, N01, DL, DAG))
return Result;
if (SDValue Result = foldMaskedMergeImpl(N11, N10, N00, N01, DL, DAG))
return Result;
return SDValue();
}

SDValue DAGCombiner::visitOR(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
Expand Down Expand Up @@ -8288,6 +8341,10 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
if (SDValue R = foldLogicTreeOfShifts(N, N0, N1, DAG))
return R;

if (VT.isScalarInteger() && VT != MVT::i1)
if (SDValue R = foldMaskedMerge(N, DAG, TLI, DL))
return R;

return SDValue();
}

Expand Down
14 changes: 14 additions & 0 deletions llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1296,6 +1296,20 @@ bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(
return true;
}

bool SystemZTargetLowering::hasAndNot(SDValue Y) const {
EVT VT = Y.getValueType();

// We can use NC(G)RK for types in GPRs ...
if (VT == MVT::i32 || VT == MVT::i64)
return Subtarget.hasMiscellaneousExtensions3();

// ... or VNC for types in VRs.
if (VT.isVector() || VT == MVT::i128)
return Subtarget.hasVector();

return false;
}

// Information about the addressing mode for a memory access.
struct AddressingMode {
// True if a long displacement is supported.
Expand Down
1 change: 1 addition & 0 deletions llvm/lib/Target/SystemZ/SystemZISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -671,6 +671,7 @@ class SystemZTargetLowering : public TargetLowering {
}

unsigned getStackProbeSize(const MachineFunction &MF) const;
bool hasAndNot(SDValue Y) const override;

private:
const SystemZSubtarget &Subtarget;
Expand Down
58 changes: 0 additions & 58 deletions llvm/lib/Target/X86/X86ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52091,59 +52091,6 @@ static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
}

static SDValue foldMaskedMergeImpl(SDValue And0_L, SDValue And0_R,
SDValue And1_L, SDValue And1_R,
const SDLoc &DL, SelectionDAG &DAG) {
if (!isBitwiseNot(And0_L, true) || !And0_L->hasOneUse())
return SDValue();
SDValue NotOp = And0_L->getOperand(0);
if (NotOp == And1_R)
std::swap(And1_R, And1_L);
if (NotOp != And1_L)
return SDValue();

// (~(NotOp) & And0_R) | (NotOp & And1_R)
// --> ((And0_R ^ And1_R) & NotOp) ^ And1_R
EVT VT = And1_L->getValueType(0);
SDValue Freeze_And0_R = DAG.getNode(ISD::FREEZE, SDLoc(), VT, And0_R);
SDValue Xor0 = DAG.getNode(ISD::XOR, DL, VT, And1_R, Freeze_And0_R);
SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor0, NotOp);
SDValue Xor1 = DAG.getNode(ISD::XOR, DL, VT, And, Freeze_And0_R);
return Xor1;
}

/// Fold "masked merge" expressions like `(m & x) | (~m & y)` into the
/// equivalent `((x ^ y) & m) ^ y)` pattern.
/// This is typically a better representation for targets without a fused
/// "and-not" operation. This function is intended to be called from a
/// `TargetLowering::PerformDAGCombine` callback on `ISD::OR` nodes.
static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG) {
// Note that masked-merge variants using XOR or ADD expressions are
// normalized to OR by InstCombine so we only check for OR.
assert(Node->getOpcode() == ISD::OR && "Must be called with ISD::OR node");
SDValue N0 = Node->getOperand(0);
if (N0->getOpcode() != ISD::AND || !N0->hasOneUse())
return SDValue();
SDValue N1 = Node->getOperand(1);
if (N1->getOpcode() != ISD::AND || !N1->hasOneUse())
return SDValue();

SDLoc DL(Node);
SDValue N00 = N0->getOperand(0);
SDValue N01 = N0->getOperand(1);
SDValue N10 = N1->getOperand(0);
SDValue N11 = N1->getOperand(1);
if (SDValue Result = foldMaskedMergeImpl(N00, N01, N10, N11, DL, DAG))
return Result;
if (SDValue Result = foldMaskedMergeImpl(N01, N00, N10, N11, DL, DAG))
return Result;
if (SDValue Result = foldMaskedMergeImpl(N10, N11, N00, N01, DL, DAG))
return Result;
if (SDValue Result = foldMaskedMergeImpl(N11, N10, N00, N01, DL, DAG))
return Result;
return SDValue();
}

/// If this is an add or subtract where one operand is produced by a cmp+setcc,
/// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
/// with CMP+{ADC, SBB}.
Expand Down Expand Up @@ -52547,11 +52494,6 @@ static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
}
}

// We should fold "masked merge" patterns when `andn` is not available.
if (!Subtarget.hasBMI() && VT.isScalarInteger() && VT != MVT::i1)
if (SDValue R = foldMaskedMerge(N, DAG))
return R;

if (SDValue R = combineOrXorWithSETCC(N->getOpcode(), dl, VT, N0, N1, DAG))
return R;

Expand Down
30 changes: 15 additions & 15 deletions llvm/test/CodeGen/AMDGPU/bfi_int.ll
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ define amdgpu_kernel void @s_bfi_def_i32(ptr addrspace(1) %out, i32 %x, i32 %y,
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s6, -1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: s_andn2_b32 s2, s2, s0
; GFX7-NEXT: s_xor_b32 s1, s1, s2
; GFX7-NEXT: s_and_b32 s0, s1, s0
; GFX7-NEXT: s_or_b32 s0, s2, s0
; GFX7-NEXT: s_xor_b32 s0, s0, s2
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX7-NEXT: s_endpgm
Expand All @@ -28,9 +28,9 @@ define amdgpu_kernel void @s_bfi_def_i32(ptr addrspace(1) %out, i32 %x, i32 %y,
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2c
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_andn2_b32 s2, s2, s0
; GFX8-NEXT: s_xor_b32 s1, s1, s2
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

all the amdgpu changes look neutral

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe that's because those subtargets don't correctly report them having AndNot instructions.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is in #112647

; GFX8-NEXT: s_and_b32 s0, s1, s0
; GFX8-NEXT: s_or_b32 s0, s2, s0
; GFX8-NEXT: s_xor_b32 s0, s0, s2
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: v_mov_b32_e32 v1, s5
; GFX8-NEXT: v_mov_b32_e32 v2, s0
Expand All @@ -44,9 +44,9 @@ define amdgpu_kernel void @s_bfi_def_i32(ptr addrspace(1) %out, i32 %x, i32 %y,
; GFX10-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
; GFX10-NEXT: v_mov_b32_e32 v0, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_andn2_b32 s2, s2, s0
; GFX10-NEXT: s_xor_b32 s1, s1, s2
; GFX10-NEXT: s_and_b32 s0, s1, s0
; GFX10-NEXT: s_or_b32 s0, s2, s0
; GFX10-NEXT: s_xor_b32 s0, s0, s2
; GFX10-NEXT: v_mov_b32_e32 v1, s0
; GFX10-NEXT: global_store_dword v0, v1, s[4:5]
; GFX10-NEXT: s_endpgm
Expand Down Expand Up @@ -1407,9 +1407,9 @@ define amdgpu_kernel void @s_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s6, -1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: s_and_b64 s[2:3], s[0:1], s[2:3]
; GFX7-NEXT: s_andn2_b64 s[0:1], s[4:5], s[0:1]
; GFX7-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
; GFX7-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5]
; GFX7-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1]
; GFX7-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
; GFX7-NEXT: s_add_u32 s0, s0, 10
; GFX7-NEXT: s_addc_u32 s1, s1, 0
; GFX7-NEXT: v_mov_b32_e32 v0, s0
Expand All @@ -1422,9 +1422,9 @@ define amdgpu_kernel void @s_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_and_b64 s[2:3], s[0:1], s[2:3]
; GFX8-NEXT: s_andn2_b64 s[0:1], s[4:5], s[0:1]
; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
; GFX8-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5]
; GFX8-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1]
; GFX8-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
; GFX8-NEXT: s_add_u32 s0, s0, 10
; GFX8-NEXT: s_addc_u32 s1, s1, 0
; GFX8-NEXT: v_mov_b32_e32 v0, s0
Expand All @@ -1438,9 +1438,9 @@ define amdgpu_kernel void @s_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX10-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_and_b64 s[2:3], s[0:1], s[2:3]
; GFX10-NEXT: s_andn2_b64 s[0:1], s[4:5], s[0:1]
; GFX10-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
; GFX10-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5]
; GFX10-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1]
; GFX10-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
; GFX10-NEXT: s_add_u32 s0, s0, 10
; GFX10-NEXT: s_addc_u32 s1, s1, 0
; GFX10-NEXT: v_mov_b32_e32 v0, s0
Expand Down
42 changes: 21 additions & 21 deletions llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
Original file line number Diff line number Diff line change
Expand Up @@ -289,16 +289,16 @@ entry:
define amdgpu_kernel void @half4_inselt(ptr addrspace(1) %out, <4 x half> %vec, i32 %sel) {
; GCN-LABEL: half4_inselt:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
; GCN-NEXT: s_mov_b32 s4, 0x3c003c00
; GCN-NEXT: s_mov_b32 s5, s4
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_xor_b64 s[4:5], s[2:3], s[4:5]
; GCN-NEXT: s_lshl_b32 s6, s6, 4
; GCN-NEXT: s_lshl_b64 s[6:7], 0xffff, s6
; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
; GCN-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
; GCN-NEXT: s_or_b64 s[2:3], s[4:5], s[2:3]
; GCN-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
; GCN-NEXT: s_xor_b64 s[2:3], s[4:5], s[2:3]
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v2, s2
; GCN-NEXT: v_mov_b32_e32 v1, s1
Expand All @@ -317,10 +317,10 @@ define amdgpu_kernel void @half2_inselt(ptr addrspace(1) %out, <2 x half> %vec,
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_lshl_b32 s3, s3, 4
; GCN-NEXT: s_xor_b32 s4, s2, 0x3c003c00
; GCN-NEXT: s_lshl_b32 s3, 0xffff, s3
; GCN-NEXT: s_andn2_b32 s2, s2, s3
; GCN-NEXT: s_and_b32 s3, s3, 0x3c003c00
; GCN-NEXT: s_or_b32 s2, s3, s2
; GCN-NEXT: s_and_b32 s3, s4, s3
; GCN-NEXT: s_xor_b32 s2, s3, s2
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v1, s1
; GCN-NEXT: v_mov_b32_e32 v2, s2
Expand Down Expand Up @@ -399,10 +399,10 @@ define amdgpu_kernel void @short2_inselt(ptr addrspace(1) %out, <2 x i16> %vec,
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_lshl_b32 s3, s3, 4
; GCN-NEXT: s_xor_b32 s4, s2, 0x10001
; GCN-NEXT: s_lshl_b32 s3, 0xffff, s3
; GCN-NEXT: s_andn2_b32 s2, s2, s3
; GCN-NEXT: s_and_b32 s3, s3, 0x10001
; GCN-NEXT: s_or_b32 s2, s3, s2
; GCN-NEXT: s_and_b32 s3, s4, s3
; GCN-NEXT: s_xor_b32 s2, s3, s2
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v1, s1
; GCN-NEXT: v_mov_b32_e32 v2, s2
Expand All @@ -417,16 +417,16 @@ entry:
define amdgpu_kernel void @short4_inselt(ptr addrspace(1) %out, <4 x i16> %vec, i32 %sel) {
; GCN-LABEL: short4_inselt:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
; GCN-NEXT: s_mov_b32 s4, 0x10001
; GCN-NEXT: s_mov_b32 s5, s4
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_xor_b64 s[4:5], s[2:3], s[4:5]
; GCN-NEXT: s_lshl_b32 s6, s6, 4
; GCN-NEXT: s_lshl_b64 s[6:7], 0xffff, s6
; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
; GCN-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
; GCN-NEXT: s_or_b64 s[2:3], s[4:5], s[2:3]
; GCN-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
; GCN-NEXT: s_xor_b64 s[2:3], s[4:5], s[2:3]
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v2, s2
; GCN-NEXT: v_mov_b32_e32 v1, s1
Expand All @@ -442,15 +442,15 @@ entry:
define amdgpu_kernel void @byte8_inselt(ptr addrspace(1) %out, <8 x i8> %vec, i32 %sel) {
; GCN-LABEL: byte8_inselt:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_lshl_b32 s4, s6, 3
; GCN-NEXT: s_lshl_b64 s[4:5], 0xff, s4
; GCN-NEXT: s_and_b32 s7, s5, 0x1010101
; GCN-NEXT: s_and_b32 s6, s4, 0x1010101
; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
; GCN-NEXT: s_or_b64 s[2:3], s[6:7], s[2:3]
; GCN-NEXT: s_xor_b32 s5, s3, 0x1010101
; GCN-NEXT: s_lshl_b32 s6, s6, 3
; GCN-NEXT: s_xor_b32 s4, s2, 0x1010101
; GCN-NEXT: s_lshl_b64 s[6:7], 0xff, s6
; GCN-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
; GCN-NEXT: s_xor_b64 s[2:3], s[4:5], s[2:3]
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v2, s2
; GCN-NEXT: v_mov_b32_e32 v1, s1
Expand Down
Loading