@@ -157,7 +157,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
157
157
auto addRegClassForRVV = [this](MVT VT) {
158
158
// Disable the smallest fractional LMUL types if ELEN is less than
159
159
// RVVBitsPerBlock.
160
- unsigned MinElts = RISCV::RVVBitsPerBlock / Subtarget.getELEN ();
160
+ unsigned MinElts = RISCV::RVVBitsPerBlock / Subtarget.getELen ();
161
161
if (VT.getVectorMinNumElements() < MinElts)
162
162
return;
163
163
@@ -1333,7 +1333,7 @@ bool RISCVTargetLowering::shouldExpandGetVectorLength(EVT TripCountVT,
1333
1333
return true;
1334
1334
1335
1335
// Don't allow VF=1 if those types are't legal.
1336
- if (VF < RISCV::RVVBitsPerBlock / Subtarget.getELEN ())
1336
+ if (VF < RISCV::RVVBitsPerBlock / Subtarget.getELen ())
1337
1337
return true;
1338
1338
1339
1339
// VLEN=32 support is incomplete.
@@ -2341,7 +2341,7 @@ static bool useRVVForFixedLengthVectorVT(MVT VT,
2341
2341
}
2342
2342
2343
2343
// Reject elements larger than ELEN.
2344
- if (EltVT.getSizeInBits() > Subtarget.getELEN ())
2344
+ if (EltVT.getSizeInBits() > Subtarget.getELen ())
2345
2345
return false;
2346
2346
2347
2347
unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
@@ -2370,7 +2370,7 @@ static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
2370
2370
"Expected legal fixed length vector!");
2371
2371
2372
2372
unsigned MinVLen = Subtarget.getRealMinVLen();
2373
- unsigned MaxELen = Subtarget.getELEN ();
2373
+ unsigned MaxELen = Subtarget.getELen ();
2374
2374
2375
2375
MVT EltVT = VT.getVectorElementType();
2376
2376
switch (EltVT.SimpleTy) {
@@ -3222,7 +3222,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
3222
3222
// XLenVT if we're producing a v8i1. This results in more consistent
3223
3223
// codegen across RV32 and RV64.
3224
3224
unsigned NumViaIntegerBits = std::clamp(NumElts, 8u, Subtarget.getXLen());
3225
- NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELEN ());
3225
+ NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELen ());
3226
3226
// If we have to use more than one INSERT_VECTOR_ELT then this
3227
3227
// optimization is likely to increase code size; avoid peforming it in
3228
3228
// such a case. We can use a load from a constant pool in this case.
@@ -3722,7 +3722,7 @@ static bool isDeinterleaveShuffle(MVT VT, MVT ContainerVT, SDValue V1,
3722
3722
SDValue V2, ArrayRef<int> Mask,
3723
3723
const RISCVSubtarget &Subtarget) {
3724
3724
// Need to be able to widen the vector.
3725
- if (VT.getScalarSizeInBits() >= Subtarget.getELEN ())
3725
+ if (VT.getScalarSizeInBits() >= Subtarget.getELen ())
3726
3726
return false;
3727
3727
3728
3728
// Both input must be extracts.
@@ -3766,7 +3766,7 @@ static bool isDeinterleaveShuffle(MVT VT, MVT ContainerVT, SDValue V1,
3766
3766
static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, int &EvenSrc,
3767
3767
int &OddSrc, const RISCVSubtarget &Subtarget) {
3768
3768
// We need to be able to widen elements to the next larger integer type.
3769
- if (VT.getScalarSizeInBits() >= Subtarget.getELEN ())
3769
+ if (VT.getScalarSizeInBits() >= Subtarget.getELen ())
3770
3770
return false;
3771
3771
3772
3772
int Size = Mask.size();
@@ -4117,7 +4117,7 @@ static SDValue getWideningInterleave(SDValue EvenV, SDValue OddV,
4117
4117
OddV = convertToScalableVector(VecContainerVT, OddV, DAG, Subtarget);
4118
4118
}
4119
4119
4120
- assert(VecVT.getScalarSizeInBits() < Subtarget.getELEN ());
4120
+ assert(VecVT.getScalarSizeInBits() < Subtarget.getELen ());
4121
4121
4122
4122
// We're working with a vector of the same size as the resulting
4123
4123
// interleaved vector, but with half the number of elements and
@@ -7385,7 +7385,7 @@ SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
7385
7385
unsigned WidenVecLen;
7386
7386
SDValue ExtractElementIdx;
7387
7387
SDValue ExtractBitIdx;
7388
- unsigned MaxEEW = Subtarget.getELEN ();
7388
+ unsigned MaxEEW = Subtarget.getELen ();
7389
7389
MVT LargestEltVT = MVT::getIntegerVT(
7390
7390
std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
7391
7391
if (NumElts <= LargestEltVT.getSizeInBits()) {
@@ -7686,7 +7686,7 @@ static SDValue lowerGetVectorLength(SDNode *N, SelectionDAG &DAG,
7686
7686
// Determine the VF that corresponds to LMUL 1 for ElementWidth.
7687
7687
unsigned LMul1VF = RISCV::RVVBitsPerBlock / ElementWidth;
7688
7688
// We don't support VF==1 with ELEN==32.
7689
- unsigned MinVF = RISCV::RVVBitsPerBlock / Subtarget.getELEN ();
7689
+ unsigned MinVF = RISCV::RVVBitsPerBlock / Subtarget.getELen ();
7690
7690
7691
7691
unsigned VF = N->getConstantOperandVal(2);
7692
7692
assert(VF >= MinVF && VF <= (LMul1VF * 8) && isPowerOf2_32(VF) &&
@@ -8769,7 +8769,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_DEINTERLEAVE(SDValue Op,
8769
8769
8770
8770
// We can deinterleave through vnsrl.wi if the element type is smaller than
8771
8771
// ELEN
8772
- if (VecVT.getScalarSizeInBits() < Subtarget.getELEN ()) {
8772
+ if (VecVT.getScalarSizeInBits() < Subtarget.getELen ()) {
8773
8773
SDValue Even =
8774
8774
getDeinterleaveViaVNSRL(DL, VecVT, Concat, true, Subtarget, DAG);
8775
8775
SDValue Odd =
@@ -8838,7 +8838,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_INTERLEAVE(SDValue Op,
8838
8838
8839
8839
// If the element type is smaller than ELEN, then we can interleave with
8840
8840
// vwaddu.vv and vwmaccu.vx
8841
- if (VecVT.getScalarSizeInBits() < Subtarget.getELEN ()) {
8841
+ if (VecVT.getScalarSizeInBits() < Subtarget.getELen ()) {
8842
8842
Interleaved = getWideningInterleave(Op.getOperand(0), Op.getOperand(1), DL,
8843
8843
DAG, Subtarget);
8844
8844
} else {
@@ -17805,7 +17805,7 @@ EVT RISCVTargetLowering::getOptimalMemOpType(const MemOp &Op,
17805
17805
// a large scalar constant and instead use vmv.v.x/i to do the
17806
17806
// broadcast. For everything else, prefer ELenVT to minimize VL and thus
17807
17807
// maximize the chance we can encode the size in the vsetvli.
17808
- MVT ELenVT = MVT::getIntegerVT(Subtarget.getELEN ());
17808
+ MVT ELenVT = MVT::getIntegerVT(Subtarget.getELen ());
17809
17809
MVT PreferredVT = (Op.isMemset() && !Op.isZeroMemset()) ? MVT::i8 : ELenVT;
17810
17810
17811
17811
// Do we have sufficient alignment for our preferred VT? If not, revert
0 commit comments