@@ -4399,11 +4399,11 @@ getConstantLaneNumOfExtractHalfOperand(SDValue &Op) {
4399
4399
return C->getZExtValue();
4400
4400
}
4401
4401
4402
- static bool isExtendedBUILD_VECTOR(SDNode * N, SelectionDAG &DAG,
4402
+ static bool isExtendedBUILD_VECTOR(SDValue N, SelectionDAG &DAG,
4403
4403
bool isSigned) {
4404
- EVT VT = N-> getValueType(0 );
4404
+ EVT VT = N. getValueType();
4405
4405
4406
- if (N-> getOpcode() != ISD::BUILD_VECTOR)
4406
+ if (N. getOpcode() != ISD::BUILD_VECTOR)
4407
4407
return false;
4408
4408
4409
4409
for (const SDValue &Elt : N->op_values()) {
@@ -4425,22 +4425,22 @@ static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
4425
4425
return true;
4426
4426
}
4427
4427
4428
- static SDValue skipExtensionForVectorMULL(SDNode * N, SelectionDAG &DAG) {
4429
- if (ISD::isExtOpcode(N-> getOpcode()))
4430
- return addRequiredExtensionForVectorMULL(N-> getOperand(0), DAG,
4431
- N-> getOperand(0)-> getValueType(0 ),
4432
- N-> getValueType(0 ),
4433
- N-> getOpcode());
4428
+ static SDValue skipExtensionForVectorMULL(SDValue N, SelectionDAG &DAG) {
4429
+ if (ISD::isExtOpcode(N. getOpcode()))
4430
+ return addRequiredExtensionForVectorMULL(N. getOperand(0), DAG,
4431
+ N. getOperand(0). getValueType(),
4432
+ N. getValueType(),
4433
+ N. getOpcode());
4434
4434
4435
- assert(N-> getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
4436
- EVT VT = N-> getValueType(0 );
4435
+ assert(N. getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
4436
+ EVT VT = N. getValueType();
4437
4437
SDLoc dl(N);
4438
4438
unsigned EltSize = VT.getScalarSizeInBits() / 2;
4439
4439
unsigned NumElts = VT.getVectorNumElements();
4440
4440
MVT TruncVT = MVT::getIntegerVT(EltSize);
4441
4441
SmallVector<SDValue, 8> Ops;
4442
4442
for (unsigned i = 0; i != NumElts; ++i) {
4443
- ConstantSDNode *C = cast<ConstantSDNode>(N-> getOperand(i));
4443
+ ConstantSDNode *C = cast<ConstantSDNode>(N. getOperand(i));
4444
4444
const APInt &CInt = C->getAPIntValue();
4445
4445
// Element types smaller than 32 bits are not legal, so use i32 elements.
4446
4446
// The values are implicitly truncated so sext vs. zext doesn't matter.
@@ -4449,34 +4449,34 @@ static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) {
4449
4449
return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
4450
4450
}
4451
4451
4452
- static bool isSignExtended(SDNode * N, SelectionDAG &DAG) {
4453
- return N-> getOpcode() == ISD::SIGN_EXTEND ||
4454
- N-> getOpcode() == ISD::ANY_EXTEND ||
4452
+ static bool isSignExtended(SDValue N, SelectionDAG &DAG) {
4453
+ return N. getOpcode() == ISD::SIGN_EXTEND ||
4454
+ N. getOpcode() == ISD::ANY_EXTEND ||
4455
4455
isExtendedBUILD_VECTOR(N, DAG, true);
4456
4456
}
4457
4457
4458
- static bool isZeroExtended(SDNode * N, SelectionDAG &DAG) {
4459
- return N-> getOpcode() == ISD::ZERO_EXTEND ||
4460
- N-> getOpcode() == ISD::ANY_EXTEND ||
4458
+ static bool isZeroExtended(SDValue N, SelectionDAG &DAG) {
4459
+ return N. getOpcode() == ISD::ZERO_EXTEND ||
4460
+ N. getOpcode() == ISD::ANY_EXTEND ||
4461
4461
isExtendedBUILD_VECTOR(N, DAG, false);
4462
4462
}
4463
4463
4464
- static bool isAddSubSExt(SDNode * N, SelectionDAG &DAG) {
4465
- unsigned Opcode = N-> getOpcode();
4464
+ static bool isAddSubSExt(SDValue N, SelectionDAG &DAG) {
4465
+ unsigned Opcode = N. getOpcode();
4466
4466
if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4467
- SDNode * N0 = N-> getOperand(0).getNode( );
4468
- SDNode * N1 = N-> getOperand(1).getNode( );
4467
+ SDValue N0 = N. getOperand(0);
4468
+ SDValue N1 = N. getOperand(1);
4469
4469
return N0->hasOneUse() && N1->hasOneUse() &&
4470
4470
isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
4471
4471
}
4472
4472
return false;
4473
4473
}
4474
4474
4475
- static bool isAddSubZExt(SDNode * N, SelectionDAG &DAG) {
4476
- unsigned Opcode = N-> getOpcode();
4475
+ static bool isAddSubZExt(SDValue N, SelectionDAG &DAG) {
4476
+ unsigned Opcode = N. getOpcode();
4477
4477
if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4478
- SDNode * N0 = N-> getOperand(0).getNode( );
4479
- SDNode * N1 = N-> getOperand(1).getNode( );
4478
+ SDValue N0 = N. getOperand(0);
4479
+ SDValue N1 = N. getOperand(1);
4480
4480
return N0->hasOneUse() && N1->hasOneUse() &&
4481
4481
isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
4482
4482
}
@@ -4550,7 +4550,7 @@ SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op,
4550
4550
return DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops2);
4551
4551
}
4552
4552
4553
- static unsigned selectUmullSmull(SDNode * &N0, SDNode * &N1, SelectionDAG &DAG,
4553
+ static unsigned selectUmullSmull(SDValue &N0, SDValue &N1, SelectionDAG &DAG,
4554
4554
SDLoc DL, bool &IsMLA) {
4555
4555
bool IsN0SExt = isSignExtended(N0, DAG);
4556
4556
bool IsN1SExt = isSignExtended(N1, DAG);
@@ -4569,12 +4569,12 @@ static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
4569
4569
!isExtendedBUILD_VECTOR(N1, DAG, false)) {
4570
4570
SDValue ZextOperand;
4571
4571
if (IsN0ZExt)
4572
- ZextOperand = N0-> getOperand(0);
4572
+ ZextOperand = N0. getOperand(0);
4573
4573
else
4574
- ZextOperand = N1-> getOperand(0);
4574
+ ZextOperand = N1. getOperand(0);
4575
4575
if (DAG.SignBitIsZero(ZextOperand)) {
4576
- SDNode * NewSext =
4577
- DAG.getSExtOrTrunc(ZextOperand, DL, N0-> getValueType(0)).getNode( );
4576
+ SDValue NewSext =
4577
+ DAG.getSExtOrTrunc(ZextOperand, DL, N0. getValueType() );
4578
4578
if (IsN0ZExt)
4579
4579
N0 = NewSext;
4580
4580
else
@@ -4585,10 +4585,10 @@ static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
4585
4585
4586
4586
// Select UMULL if we can replace the other operand with an extend.
4587
4587
if (IsN0ZExt || IsN1ZExt) {
4588
- EVT VT = N0-> getValueType(0 );
4588
+ EVT VT = N0. getValueType();
4589
4589
APInt Mask = APInt::getHighBitsSet(VT.getScalarSizeInBits(),
4590
4590
VT.getScalarSizeInBits() / 2);
4591
- if (DAG.MaskedValueIsZero(SDValue( IsN0ZExt ? N1 : N0, 0) , Mask)) {
4591
+ if (DAG.MaskedValueIsZero(IsN0ZExt ? N1 : N0, Mask)) {
4592
4592
EVT HalfVT;
4593
4593
switch (VT.getSimpleVT().SimpleTy) {
4594
4594
case MVT::v2i64:
@@ -4604,13 +4604,13 @@ static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
4604
4604
return 0;
4605
4605
}
4606
4606
// Truncate and then extend the result.
4607
- SDValue NewExt = DAG.getNode(ISD::TRUNCATE, DL, HalfVT,
4608
- SDValue( IsN0ZExt ? N1 : N0, 0) );
4607
+ SDValue NewExt =
4608
+ DAG.getNode(ISD::TRUNCATE, DL, HalfVT, IsN0ZExt ? N1 : N0);
4609
4609
NewExt = DAG.getZExtOrTrunc(NewExt, DL, VT);
4610
4610
if (IsN0ZExt)
4611
- N1 = NewExt.getNode() ;
4611
+ N1 = NewExt;
4612
4612
else
4613
- N0 = NewExt.getNode() ;
4613
+ N0 = NewExt;
4614
4614
return AArch64ISD::UMULL;
4615
4615
}
4616
4616
}
@@ -4647,18 +4647,18 @@ SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
4647
4647
// that VMULL can be detected. Otherwise v2i64 multiplications are not legal.
4648
4648
assert((VT.is128BitVector() || VT.is64BitVector()) && VT.isInteger() &&
4649
4649
"unexpected type for custom-lowering ISD::MUL");
4650
- SDNode * N0 = Op.getOperand(0).getNode( );
4651
- SDNode * N1 = Op.getOperand(1).getNode( );
4650
+ SDValue N0 = Op.getOperand(0);
4651
+ SDValue N1 = Op.getOperand(1);
4652
4652
bool isMLA = false;
4653
4653
EVT OVT = VT;
4654
4654
if (VT.is64BitVector()) {
4655
- if (N0-> getOpcode() == ISD::EXTRACT_SUBVECTOR &&
4656
- isNullConstant(N0-> getOperand(1)) &&
4657
- N1-> getOpcode() == ISD::EXTRACT_SUBVECTOR &&
4658
- isNullConstant(N1-> getOperand(1))) {
4659
- N0 = N0-> getOperand(0).getNode( );
4660
- N1 = N1-> getOperand(0).getNode( );
4661
- VT = N0-> getValueType(0 );
4655
+ if (N0. getOpcode() == ISD::EXTRACT_SUBVECTOR &&
4656
+ isNullConstant(N0. getOperand(1)) &&
4657
+ N1. getOpcode() == ISD::EXTRACT_SUBVECTOR &&
4658
+ isNullConstant(N1. getOperand(1))) {
4659
+ N0 = N0. getOperand(0);
4660
+ N1 = N1. getOperand(0);
4661
+ VT = N0. getValueType();
4662
4662
} else {
4663
4663
if (VT == MVT::v1i64) {
4664
4664
if (Subtarget->hasSVE())
@@ -4702,12 +4702,12 @@ SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
4702
4702
// Optimizing (zext A + zext B) * C, to (S/UMULL A, C) + (S/UMULL B, C) during
4703
4703
// isel lowering to take advantage of no-stall back to back s/umul + s/umla.
4704
4704
// This is true for CPUs with accumulate forwarding such as Cortex-A53/A57
4705
- SDValue N00 = skipExtensionForVectorMULL(N0-> getOperand(0).getNode( ), DAG);
4706
- SDValue N01 = skipExtensionForVectorMULL(N0-> getOperand(1).getNode( ), DAG);
4705
+ SDValue N00 = skipExtensionForVectorMULL(N0. getOperand(0), DAG);
4706
+ SDValue N01 = skipExtensionForVectorMULL(N0. getOperand(1), DAG);
4707
4707
EVT Op1VT = Op1.getValueType();
4708
4708
return DAG.getNode(
4709
4709
ISD::EXTRACT_SUBVECTOR, DL, OVT,
4710
- DAG.getNode(N0-> getOpcode(), DL, VT,
4710
+ DAG.getNode(N0. getOpcode(), DL, VT,
4711
4711
DAG.getNode(NewOpc, DL, VT,
4712
4712
DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
4713
4713
DAG.getNode(NewOpc, DL, VT,
@@ -16476,8 +16476,8 @@ static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
16476
16476
if (TrailingZeroes) {
16477
16477
// Conservatively do not lower to shift+add+shift if the mul might be
16478
16478
// folded into smul or umul.
16479
- if (N0->hasOneUse() && (isSignExtended(N0.getNode() , DAG) ||
16480
- isZeroExtended(N0.getNode() , DAG)))
16479
+ if (N0->hasOneUse() && (isSignExtended(N0, DAG) ||
16480
+ isZeroExtended(N0, DAG)))
16481
16481
return SDValue();
16482
16482
// Conservatively do not lower to shift+add+shift if the mul might be
16483
16483
// folded into madd or msub.
0 commit comments