Skip to content

Commit bba83e2

Browse files
committed
[AArch64] LowerMUL - use SDValue directly instead of SDNode. NFC.
As discussed on D159537, using the SDValue operands directly instead of peeking inside to the SDNode prevents any issues where a non-zero result index has been used.
1 parent 23ea98f commit bba83e2

File tree

1 file changed

+51
-51
lines changed

1 file changed

+51
-51
lines changed

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Lines changed: 51 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -4399,11 +4399,11 @@ getConstantLaneNumOfExtractHalfOperand(SDValue &Op) {
43994399
return C->getZExtValue();
44004400
}
44014401

4402-
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
4402+
static bool isExtendedBUILD_VECTOR(SDValue N, SelectionDAG &DAG,
44034403
bool isSigned) {
4404-
EVT VT = N->getValueType(0);
4404+
EVT VT = N.getValueType();
44054405

4406-
if (N->getOpcode() != ISD::BUILD_VECTOR)
4406+
if (N.getOpcode() != ISD::BUILD_VECTOR)
44074407
return false;
44084408

44094409
for (const SDValue &Elt : N->op_values()) {
@@ -4425,22 +4425,22 @@ static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
44254425
return true;
44264426
}
44274427

4428-
static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) {
4429-
if (ISD::isExtOpcode(N->getOpcode()))
4430-
return addRequiredExtensionForVectorMULL(N->getOperand(0), DAG,
4431-
N->getOperand(0)->getValueType(0),
4432-
N->getValueType(0),
4433-
N->getOpcode());
4428+
static SDValue skipExtensionForVectorMULL(SDValue N, SelectionDAG &DAG) {
4429+
if (ISD::isExtOpcode(N.getOpcode()))
4430+
return addRequiredExtensionForVectorMULL(N.getOperand(0), DAG,
4431+
N.getOperand(0).getValueType(),
4432+
N.getValueType(),
4433+
N.getOpcode());
44344434

4435-
assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
4436-
EVT VT = N->getValueType(0);
4435+
assert(N.getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
4436+
EVT VT = N.getValueType();
44374437
SDLoc dl(N);
44384438
unsigned EltSize = VT.getScalarSizeInBits() / 2;
44394439
unsigned NumElts = VT.getVectorNumElements();
44404440
MVT TruncVT = MVT::getIntegerVT(EltSize);
44414441
SmallVector<SDValue, 8> Ops;
44424442
for (unsigned i = 0; i != NumElts; ++i) {
4443-
ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
4443+
ConstantSDNode *C = cast<ConstantSDNode>(N.getOperand(i));
44444444
const APInt &CInt = C->getAPIntValue();
44454445
// Element types smaller than 32 bits are not legal, so use i32 elements.
44464446
// The values are implicitly truncated so sext vs. zext doesn't matter.
@@ -4449,34 +4449,34 @@ static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) {
44494449
return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
44504450
}
44514451

4452-
static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
4453-
return N->getOpcode() == ISD::SIGN_EXTEND ||
4454-
N->getOpcode() == ISD::ANY_EXTEND ||
4452+
static bool isSignExtended(SDValue N, SelectionDAG &DAG) {
4453+
return N.getOpcode() == ISD::SIGN_EXTEND ||
4454+
N.getOpcode() == ISD::ANY_EXTEND ||
44554455
isExtendedBUILD_VECTOR(N, DAG, true);
44564456
}
44574457

4458-
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
4459-
return N->getOpcode() == ISD::ZERO_EXTEND ||
4460-
N->getOpcode() == ISD::ANY_EXTEND ||
4458+
static bool isZeroExtended(SDValue N, SelectionDAG &DAG) {
4459+
return N.getOpcode() == ISD::ZERO_EXTEND ||
4460+
N.getOpcode() == ISD::ANY_EXTEND ||
44614461
isExtendedBUILD_VECTOR(N, DAG, false);
44624462
}
44634463

4464-
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
4465-
unsigned Opcode = N->getOpcode();
4464+
static bool isAddSubSExt(SDValue N, SelectionDAG &DAG) {
4465+
unsigned Opcode = N.getOpcode();
44664466
if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4467-
SDNode *N0 = N->getOperand(0).getNode();
4468-
SDNode *N1 = N->getOperand(1).getNode();
4467+
SDValue N0 = N.getOperand(0);
4468+
SDValue N1 = N.getOperand(1);
44694469
return N0->hasOneUse() && N1->hasOneUse() &&
44704470
isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
44714471
}
44724472
return false;
44734473
}
44744474

4475-
static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
4476-
unsigned Opcode = N->getOpcode();
4475+
static bool isAddSubZExt(SDValue N, SelectionDAG &DAG) {
4476+
unsigned Opcode = N.getOpcode();
44774477
if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4478-
SDNode *N0 = N->getOperand(0).getNode();
4479-
SDNode *N1 = N->getOperand(1).getNode();
4478+
SDValue N0 = N.getOperand(0);
4479+
SDValue N1 = N.getOperand(1);
44804480
return N0->hasOneUse() && N1->hasOneUse() &&
44814481
isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
44824482
}
@@ -4550,7 +4550,7 @@ SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op,
45504550
return DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops2);
45514551
}
45524552

4553-
static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
4553+
static unsigned selectUmullSmull(SDValue &N0, SDValue &N1, SelectionDAG &DAG,
45544554
SDLoc DL, bool &IsMLA) {
45554555
bool IsN0SExt = isSignExtended(N0, DAG);
45564556
bool IsN1SExt = isSignExtended(N1, DAG);
@@ -4569,12 +4569,12 @@ static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
45694569
!isExtendedBUILD_VECTOR(N1, DAG, false)) {
45704570
SDValue ZextOperand;
45714571
if (IsN0ZExt)
4572-
ZextOperand = N0->getOperand(0);
4572+
ZextOperand = N0.getOperand(0);
45734573
else
4574-
ZextOperand = N1->getOperand(0);
4574+
ZextOperand = N1.getOperand(0);
45754575
if (DAG.SignBitIsZero(ZextOperand)) {
4576-
SDNode *NewSext =
4577-
DAG.getSExtOrTrunc(ZextOperand, DL, N0->getValueType(0)).getNode();
4576+
SDValue NewSext =
4577+
DAG.getSExtOrTrunc(ZextOperand, DL, N0.getValueType());
45784578
if (IsN0ZExt)
45794579
N0 = NewSext;
45804580
else
@@ -4585,10 +4585,10 @@ static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
45854585

45864586
// Select UMULL if we can replace the other operand with an extend.
45874587
if (IsN0ZExt || IsN1ZExt) {
4588-
EVT VT = N0->getValueType(0);
4588+
EVT VT = N0.getValueType();
45894589
APInt Mask = APInt::getHighBitsSet(VT.getScalarSizeInBits(),
45904590
VT.getScalarSizeInBits() / 2);
4591-
if (DAG.MaskedValueIsZero(SDValue(IsN0ZExt ? N1 : N0, 0), Mask)) {
4591+
if (DAG.MaskedValueIsZero(IsN0ZExt ? N1 : N0, Mask)) {
45924592
EVT HalfVT;
45934593
switch (VT.getSimpleVT().SimpleTy) {
45944594
case MVT::v2i64:
@@ -4604,13 +4604,13 @@ static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
46044604
return 0;
46054605
}
46064606
// Truncate and then extend the result.
4607-
SDValue NewExt = DAG.getNode(ISD::TRUNCATE, DL, HalfVT,
4608-
SDValue(IsN0ZExt ? N1 : N0, 0));
4607+
SDValue NewExt =
4608+
DAG.getNode(ISD::TRUNCATE, DL, HalfVT, IsN0ZExt ? N1 : N0);
46094609
NewExt = DAG.getZExtOrTrunc(NewExt, DL, VT);
46104610
if (IsN0ZExt)
4611-
N1 = NewExt.getNode();
4611+
N1 = NewExt;
46124612
else
4613-
N0 = NewExt.getNode();
4613+
N0 = NewExt;
46144614
return AArch64ISD::UMULL;
46154615
}
46164616
}
@@ -4647,18 +4647,18 @@ SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
46474647
// that VMULL can be detected. Otherwise v2i64 multiplications are not legal.
46484648
assert((VT.is128BitVector() || VT.is64BitVector()) && VT.isInteger() &&
46494649
"unexpected type for custom-lowering ISD::MUL");
4650-
SDNode *N0 = Op.getOperand(0).getNode();
4651-
SDNode *N1 = Op.getOperand(1).getNode();
4650+
SDValue N0 = Op.getOperand(0);
4651+
SDValue N1 = Op.getOperand(1);
46524652
bool isMLA = false;
46534653
EVT OVT = VT;
46544654
if (VT.is64BitVector()) {
4655-
if (N0->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
4656-
isNullConstant(N0->getOperand(1)) &&
4657-
N1->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
4658-
isNullConstant(N1->getOperand(1))) {
4659-
N0 = N0->getOperand(0).getNode();
4660-
N1 = N1->getOperand(0).getNode();
4661-
VT = N0->getValueType(0);
4655+
if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
4656+
isNullConstant(N0.getOperand(1)) &&
4657+
N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
4658+
isNullConstant(N1.getOperand(1))) {
4659+
N0 = N0.getOperand(0);
4660+
N1 = N1.getOperand(0);
4661+
VT = N0.getValueType();
46624662
} else {
46634663
if (VT == MVT::v1i64) {
46644664
if (Subtarget->hasSVE())
@@ -4702,12 +4702,12 @@ SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
47024702
// Optimizing (zext A + zext B) * C, to (S/UMULL A, C) + (S/UMULL B, C) during
47034703
// isel lowering to take advantage of no-stall back to back s/umul + s/umla.
47044704
// This is true for CPUs with accumulate forwarding such as Cortex-A53/A57
4705-
SDValue N00 = skipExtensionForVectorMULL(N0->getOperand(0).getNode(), DAG);
4706-
SDValue N01 = skipExtensionForVectorMULL(N0->getOperand(1).getNode(), DAG);
4705+
SDValue N00 = skipExtensionForVectorMULL(N0.getOperand(0), DAG);
4706+
SDValue N01 = skipExtensionForVectorMULL(N0.getOperand(1), DAG);
47074707
EVT Op1VT = Op1.getValueType();
47084708
return DAG.getNode(
47094709
ISD::EXTRACT_SUBVECTOR, DL, OVT,
4710-
DAG.getNode(N0->getOpcode(), DL, VT,
4710+
DAG.getNode(N0.getOpcode(), DL, VT,
47114711
DAG.getNode(NewOpc, DL, VT,
47124712
DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
47134713
DAG.getNode(NewOpc, DL, VT,
@@ -16476,8 +16476,8 @@ static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
1647616476
if (TrailingZeroes) {
1647716477
// Conservatively do not lower to shift+add+shift if the mul might be
1647816478
// folded into smul or umul.
16479-
if (N0->hasOneUse() && (isSignExtended(N0.getNode(), DAG) ||
16480-
isZeroExtended(N0.getNode(), DAG)))
16479+
if (N0->hasOneUse() && (isSignExtended(N0, DAG) ||
16480+
isZeroExtended(N0, DAG)))
1648116481
return SDValue();
1648216482
// Conservatively do not lower to shift+add+shift if the mul might be
1648316483
// folded into madd or msub.

0 commit comments

Comments
 (0)