-
Notifications
You must be signed in to change notification settings - Fork 13.6k
[RISCV] Migrate getConstant indexed insert/extract subvector to new API #139111
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Note that this change is possibly not NFC. The prior routines used getConstant with XLenVT. The new wrappers will used getVectorIdxConstant instead. Digging through the code, the type used for the index will be the integer of pointer width from DL. For typical RV32 and RV64 configurations the pointer will be of equal width to XLEN, but you could have a 32b pointer on an RV64 machine.
@llvm/pr-subscribers-backend-risc-v Author: Philip Reames (preames) ChangesNote that this change is possibly not NFC. The prior routines used getConstant with XLenVT. The new wrappers will used getVectorIdxConstant instead. Digging through the code, the type used for the index will be the integer of pointer width from DL. For typical RV32 and RV64 configurations the pointer will be of equal width to XLEN, but you could have a 32b pointer on an RV64 machine. Full diff: https://github.com/llvm/llvm-project/pull/139111.diff 1 Files Affected:
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e5104e9919d51..63245e29a67a8 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2796,8 +2796,7 @@ static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
assert(V.getValueType().isScalableVector() &&
"Expected a scalable vector operand!");
SDLoc DL(V);
- SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
+ return DAG.getExtractSubvector(DL, VT, V, 0);
}
/// Return the type of the mask type suitable for masking the provided
@@ -3906,8 +3905,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
// our final mask.
assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
Vec = DAG.getBitcast(MVT::v8i1, Vec);
- Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
- DAG.getConstant(0, DL, XLenVT));
+ Vec = DAG.getExtractSubvector(DL, VT, Vec, 0);
} else {
// Else we must have produced an integer type with the same size as the
// mask type; bitcast for the final result.
@@ -3970,9 +3968,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
DAG.getSignedConstant(SplatValue, DL, XLenVT),
DAG.getVectorIdxConstant(0, DL));
if (ViaVecLen != 1)
- Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
- MVT::getVectorVT(ViaIntVT, 1), Vec,
- DAG.getConstant(0, DL, XLenVT));
+ Vec = DAG.getExtractSubvector(DL, MVT::getVectorVT(ViaIntVT, 1), Vec, 0);
return DAG.getBitcast(VT, Vec);
}
@@ -4040,9 +4036,8 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
DAG.getSignedConstant(SplatValue, DL, XLenVT), ViaVL);
Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
if (ViaVecLen != RequiredVL)
- Splat = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
- MVT::getVectorVT(ViaIntVT, RequiredVL), Splat,
- DAG.getConstant(0, DL, XLenVT));
+ Splat = DAG.getExtractSubvector(
+ DL, MVT::getVectorVT(ViaIntVT, RequiredVL), Splat, 0);
return DAG.getBitcast(VT, Splat);
}
}
@@ -4876,10 +4871,8 @@ static SDValue lowerVECTOR_SHUFFLEAsVSlidedown(const SDLoc &DL, MVT VT,
getVSlidedown(DAG, Subtarget, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
convertToScalableVector(ContainerVT, Src, DAG, Subtarget),
DAG.getConstant(NewMask[0], DL, XLenVT), TrueMask, VL);
- return DAG.getNode(
- ISD::EXTRACT_SUBVECTOR, DL, VT,
- convertFromScalableVector(SrcVT, Slidedown, DAG, Subtarget),
- DAG.getConstant(0, DL, XLenVT));
+ return DAG.getExtractSubvector(
+ DL, VT, convertFromScalableVector(SrcVT, Slidedown, DAG, Subtarget), 0);
}
// Because vslideup leaves the destination elements at the start intact, we can
@@ -11205,8 +11198,7 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
assert(VLen);
unsigned Vscale = *VLen / RISCV::RVVBitsPerBlock;
SDValue Insert =
- DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVecVT, Vec, SubVec,
- DAG.getConstant(OrigIdx / Vscale, DL, XLenVT));
+ DAG.getInsertSubvector(DL, Vec, SubVec, OrigIdx / Vscale);
if (VecVT.isFixedLengthVector())
Insert = convertFromScalableVector(VecVT, Insert, DAG, Subtarget);
return Insert;
@@ -11402,8 +11394,8 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
if (SubVecVT.isFixedLengthVector()) {
assert(VLen);
unsigned Vscale = *VLen / RISCV::RVVBitsPerBlock;
- Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ContainerSubVecVT, Vec,
- DAG.getConstant(OrigIdx / Vscale, DL, XLenVT));
+ Vec =
+ DAG.getExtractSubvector(DL, ContainerSubVecVT, Vec, OrigIdx / Vscale);
return convertFromScalableVector(SubVecVT, Vec, DAG, Subtarget);
}
return Op;
@@ -11430,8 +11422,7 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
Idx /= *VLen / RISCV::RVVBitsPerBlock;
}
InterSubVT = getLMUL1VT(VecVT);
- Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
- DAG.getConstant(Idx, DL, XLenVT));
+ Vec = DAG.getExtractSubvector(DL, InterSubVT, Vec, Idx);
}
// Slide this vector register down by the desired number of elements in order
|
This isn't possible with the existing possible DataLayouts right? |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
This question was the entire reason for posting for review. I didn't know for sure. :) |
* main: (420 commits) [AArch64] Merge scaled and unscaled narrow zero stores (llvm#136705) [RISCV] One last migration to getInsertSubvector [nfc] [flang][OpenMP] Update `do concurrent` mapping pass to use `fir.do_concurrent` op (llvm#138489) [MLIR][LLVM] Fix llvm.mlir.global mismatching print and parser order (llvm#138986) [lld][NFC] Fix minor typo in docs (llvm#138898) [RISCV] Migrate getConstant indexed insert/extract subvector to new API (llvm#139111) GlobalISel: Translate minimumnum and maximumnum (llvm#139106) [MemProf] Simplify unittest save and restore of options (llvm#139117) [BOLT][AArch64] Patch functions targeted by optional relocs (llvm#138750) [Coverage] Support -fprofile-list for cold function coverage (llvm#136333) Remove unused forward decl (llvm#139108) [AMDGPU][NFC] Get rid of OPW constants. (llvm#139074) [CIR] Upstream extract op for VectorType (llvm#138413) [mlir][xegpu] Handle scalar uniform ops in SIMT distribution. (llvm#138593) [GlobalISel][AMDGPU] Fix handling of v2i128 type for AND, OR, XOR (llvm#138574) AMDGPU][True16][CodeGen] FP_Round f64 to f16 in true16 (llvm#128911) Reland [Clang] Deprecate `__is_trivially_relocatable` (llvm#139061) [HLSL][NFC] Stricter Overload Tests (clamp,max,min,pow) (llvm#138993) [MLIR] Fixing the memref linearization size computation for non-packed memref (llvm#138922) [TableGen][NFC] Use early exit to simplify large block in emitAction. (llvm#138220) ...
Note that this change is possibly not NFC. The prior routines used getConstant with XLenVT. The new wrappers will used getVectorIdxConstant instead. Digging through the code, the type used for the index will be the integer of pointer width from DL. For typical RV32 and RV64 configurations the pointer will be of equal width to XLEN, but you could have a 32b pointer on an RV64 machine.