-
Notifications
You must be signed in to change notification settings - Fork 13.5k
Revert "[RISCV] RISCV vector calling convention (2/2)" #97994
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Revert "[RISCV] RISCV vector calling convention (2/2)" #97994
Conversation
@llvm/pr-subscribers-llvm-ir @llvm/pr-subscribers-clang Author: Brandon Wu (4vtomat) Changes
Patch is 94.58 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/97994.diff 1020 Files Affected:
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index a0820e2093bc2..67f480dec0fe3 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -762,8 +762,10 @@ multiclass RVVUnitStridedSegLoadTuple<string op> {
[]<string>)),
ManualCodegen = [{
{
- llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
- IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
+ if (IsMasked)
+ IntrinsicTypes = {ResultType, Ops[0]->getType(), Ops.back()->getType()};
+ else
+ IntrinsicTypes = {ResultType, Ops.back()->getType()};
SmallVector<llvm::Value*, 12> Operands;
bool NoPassthru =
@@ -772,11 +774,10 @@ multiclass RVVUnitStridedSegLoadTuple<string op> {
unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
if (NoPassthru) { // Push poison into passthru
- Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
} else { // Push intrinsics operands into passthru
llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
- for (unsigned I = 0; I < NF; ++I)
- Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ Operands.push_back(PassthruOperand);
}
Operands.push_back(Ops[Offset]); // Ptr
@@ -785,6 +786,7 @@ multiclass RVVUnitStridedSegLoadTuple<string op> {
Operands.push_back(Ops[Offset + 1]); // VL
if (IsMasked)
Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
@@ -828,24 +830,24 @@ multiclass RVVUnitStridedSegStoreTuple<string op> {
{
// Masked
// Builtin: (mask, ptr, v_tuple, vl)
- // Intrinsic: (val0, val1, ..., ptr, mask, vl)
+ // Intrinsic: (tuple, ptr, mask, vl)
// Unmasked
// Builtin: (ptr, v_tuple, vl)
- // Intrinsic: (val0, val1, ..., ptr, vl)
+ // Intrinsic: (tuple, ptr, vl)
unsigned Offset = IsMasked ? 1 : 0;
- llvm::Value *VTupleOperand = Ops[Offset + 1];
SmallVector<llvm::Value*, 12> Operands;
- for (unsigned I = 0; I < NF; ++I) {
- llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
- Operands.push_back(V);
- }
+ Operands.push_back(Ops[Offset + 1]); // tuple
Operands.push_back(Ops[Offset]); // Ptr
if (IsMasked)
Operands.push_back(Ops[0]);
Operands.push_back(Ops[Offset + 2]); // VL
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
- IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
+ if (IsMasked)
+ IntrinsicTypes = {Operands[0]->getType(), Ops[0]->getType(), Operands.back()->getType()};
+ else
+ IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
return Builder.CreateCall(F, Operands, "");
}
@@ -880,8 +882,10 @@ multiclass RVVUnitStridedSegLoadFFTuple<string op> {
[]<string>)),
ManualCodegen = [{
{
- llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
- IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
+ if (IsMasked)
+ IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[0]->getType()};
+ else
+ IntrinsicTypes = {ResultType, Ops.back()->getType()};
SmallVector<llvm::Value*, 12> Operands;
bool NoPassthru =
@@ -890,11 +894,10 @@ multiclass RVVUnitStridedSegLoadFFTuple<string op> {
unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
if (NoPassthru) { // Push poison into passthru
- Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
} else { // Push intrinsics operands into passthru
llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
- for (unsigned I = 0; I < NF; ++I)
- Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ Operands.push_back(PassthruOperand);
}
Operands.push_back(Ops[Offset]); // Ptr
@@ -903,6 +906,7 @@ multiclass RVVUnitStridedSegLoadFFTuple<string op> {
Operands.push_back(Ops[Offset + 2]); // vl
if (IsMasked)
Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
@@ -911,14 +915,10 @@ multiclass RVVUnitStridedSegLoadFFTuple<string op> {
clang::CharUnits Align =
CGM.getNaturalPointeeTypeAlignment(E->getArg(Offset + 1)->getType());
- llvm::Value *ReturnTuple = llvm::PoisonValue::get(ResultType);
- for (unsigned I = 0; I < NF; ++I) {
- llvm::Value *V = Builder.CreateExtractValue(LoadValue, {I});
- ReturnTuple = Builder.CreateInsertValue(ReturnTuple, V, {I});
- }
+ llvm::Value *ReturnTuple = Builder.CreateExtractValue(LoadValue, 0);
// Store new_vl
- llvm::Value *V = Builder.CreateExtractValue(LoadValue, {NF});
+ llvm::Value *V = Builder.CreateExtractValue(LoadValue, 1);
Builder.CreateStore(V, Address(Ops[Offset + 1], V->getType(), Align));
if (ReturnValue.isNull())
@@ -957,8 +957,10 @@ multiclass RVVStridedSegLoadTuple<string op> {
[]<string>)),
ManualCodegen = [{
{
- llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
- IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
+ if (IsMasked)
+ IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[0]->getType()};
+ else
+ IntrinsicTypes = {ResultType, Ops.back()->getType()};
SmallVector<llvm::Value*, 12> Operands;
bool NoPassthru =
@@ -967,11 +969,10 @@ multiclass RVVStridedSegLoadTuple<string op> {
unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
if (NoPassthru) { // Push poison into passthru
- Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
} else { // Push intrinsics operands into passthru
llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
- for (unsigned I = 0; I < NF; ++I)
- Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ Operands.push_back(PassthruOperand);
}
Operands.push_back(Ops[Offset]); // Ptr
@@ -981,6 +982,7 @@ multiclass RVVStridedSegLoadTuple<string op> {
Operands.push_back(Ops[Offset + 2]); // VL
if (IsMasked)
Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
@@ -1025,25 +1027,25 @@ multiclass RVVStridedSegStoreTuple<string op> {
{
// Masked
// Builtin: (mask, ptr, stride, v_tuple, vl)
- // Intrinsic: (val0, val1, ..., ptr, stride, mask, vl)
+ // Intrinsic: (tuple, ptr, stride, mask, vl)
// Unmasked
// Builtin: (ptr, stride, v_tuple, vl)
- // Intrinsic: (val0, val1, ..., ptr, stride, vl)
+ // Intrinsic: (tuple, ptr, stride, vl)
unsigned Offset = IsMasked ? 1 : 0;
- llvm::Value *VTupleOperand = Ops[Offset + 2];
SmallVector<llvm::Value*, 12> Operands;
- for (unsigned I = 0; I < NF; ++I) {
- llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
- Operands.push_back(V);
- }
+ Operands.push_back(Ops[Offset + 2]); // tuple
Operands.push_back(Ops[Offset]); // Ptr
Operands.push_back(Ops[Offset + 1]); // Stride
if (IsMasked)
Operands.push_back(Ops[0]);
Operands.push_back(Ops[Offset + 3]); // VL
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
- IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
+ if (IsMasked)
+ IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType(), Ops[0]->getType()};
+ else
+ IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
return Builder.CreateCall(F, Operands, "");
}
@@ -1073,8 +1075,6 @@ multiclass RVVIndexedSegLoadTuple<string op> {
[]<string>)),
ManualCodegen = [{
{
- llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
- IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
SmallVector<llvm::Value*, 12> Operands;
bool NoPassthru =
@@ -1083,11 +1083,10 @@ multiclass RVVIndexedSegLoadTuple<string op> {
unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
if (NoPassthru) { // Push poison into passthru
- Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
} else { // Push intrinsics operands into passthru
llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
- for (unsigned I = 0; I < NF; ++I)
- Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ Operands.push_back(PassthruOperand);
}
Operands.push_back(Ops[Offset]); // Ptr
@@ -1097,9 +1096,15 @@ multiclass RVVIndexedSegLoadTuple<string op> {
Operands.push_back(Ops[Offset + 2]); // VL
if (IsMasked)
Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
- IntrinsicTypes = {ElementVectorType, Ops[Offset + 1]->getType(),
- Ops.back()->getType()};
+ if (IsMasked)
+ IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(),
+ Ops[0]->getType(),
+ Ops.back()->getType()};
+ else
+ IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(),
+ Ops.back()->getType()};
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
@@ -1139,26 +1144,28 @@ multiclass RVVIndexedSegStoreTuple<string op> {
{
// Masked
// Builtin: (mask, ptr, index, v_tuple, vl)
- // Intrinsic: (val0, val1, ..., ptr, index, mask, vl)
+ // Intrinsic: (tuple, ptr, index, mask, vl)
// Unmasked
// Builtin: (ptr, index, v_tuple, vl)
- // Intrinsic: (val0, val1, ..., ptr, index, vl)
+ // Intrinsic: (tuple, ptr, index, vl)
unsigned Offset = IsMasked ? 1 : 0;
- llvm::Value *VTupleOperand = Ops[Offset + 2];
SmallVector<llvm::Value*, 12> Operands;
- for (unsigned I = 0; I < NF; ++I) {
- llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
- Operands.push_back(V);
- }
+ Operands.push_back(Ops[Offset + 2]); // tuple
Operands.push_back(Ops[Offset]); // Ptr
Operands.push_back(Ops[Offset + 1]); // Idx
if (IsMasked)
Operands.push_back(Ops[0]);
Operands.push_back(Ops[Offset + 3]); // VL
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
- IntrinsicTypes = {Operands[0]->getType(), Ops[Offset + 1]->getType(),
- Operands.back()->getType()};
+ if (IsMasked)
+ IntrinsicTypes = {Operands[0]->getType(), Ops[Offset + 1]->getType(),
+ Ops[0]->getType(),
+ Operands.back()->getType()};
+ else
+ IntrinsicTypes = {Operands[0]->getType(), Ops[Offset + 1]->getType(),
+ Operands.back()->getType()};
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
return Builder.CreateCall(F, Operands, "");
}
@@ -2468,22 +2475,25 @@ let HasMasked = false, HasVL = false, IRName = "" in {
let Name = "vget_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
{
- if (isa<StructType>(Ops[0]->getType())) // For tuple type
- // Extract value from index (operand 1) of vtuple (operand 0)
- return Builder.CreateExtractValue(
- Ops[0],
- {(unsigned)cast<ConstantInt>(Ops[1])->getZExtValue()});
auto *VecTy = cast<ScalableVectorType>(ResultType);
- auto *OpVecTy = cast<ScalableVectorType>(Ops[0]->getType());
// Mask to only valid indices.
- unsigned MaxIndex = OpVecTy->getMinNumElements() / VecTy->getMinNumElements();
- assert(isPowerOf2_32(MaxIndex));
Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
- Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
- Ops[1] = Builder.CreateMul(Ops[1],
- ConstantInt::get(Ops[1]->getType(),
- VecTy->getMinNumElements()));
- return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]);
+ if (auto *OpVecTy = dyn_cast<ScalableVectorType>(Ops[0]->getType())) {
+ unsigned MaxIndex = OpVecTy->getMinNumElements() / VecTy->getMinNumElements();
+ assert(isPowerOf2_32(MaxIndex));
+ Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
+ Ops[1] = Builder.CreateMul(Ops[1],
+ ConstantInt::get(Ops[1]->getType(),
+ VecTy->getMinNumElements()));
+ return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]);
+ }
+
+ bool IsRISCV64 = getTarget().getTriple().isRISCV64();
+ llvm::Type *XLenTy = IsRISCV64 ? Builder.getInt64Ty() :
+ Builder.getInt32Ty();
+ return Builder.CreateIntrinsic(Intrinsic::riscv_vector_extract,
+ {ResultType, Ops[0]->getType(), XLenTy},
+ {Ops[0], Ops[1]});
}
}] in {
foreach dst_lmul = ["(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
@@ -2500,22 +2510,25 @@ let HasMasked = false, HasVL = false, IRName = "" in {
let Name = "vset_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
{
- if (isa<StructType>(ResultType)) // For tuple type
- // Insert value (operand 2) into index (operand 1) of vtuple (operand 0)
- return Builder.CreateInsertValue(
- Ops[0], Ops[2],
- {(unsigned)cast<ConstantInt>(Ops[1])->getZExtValue()});
- auto *ResVecTy = cast<ScalableVectorType>(ResultType);
auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
// Mask to only valid indices.
- unsigned MaxIndex = ResVecTy->getMinNumElements() / VecTy->getMinNumElements();
- assert(isPowerOf2_32(MaxIndex));
Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
- Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
- Ops[1] = Builder.CreateMul(Ops[1],
- ConstantInt::get(Ops[1]->getType(),
- VecTy->getMinNumElements()));
- return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]);
+ if (auto *ResVecTy = dyn_cast<ScalableVectorType>(ResultType)) {
+ unsigned MaxIndex = ResVecTy->getMinNumElements() / VecTy->getMinNumElements();
+ assert(isPowerOf2_32(MaxIndex));
+ Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
+ Ops[1] = Builder.CreateMul(Ops[1],
+ ConstantInt::get(Ops[1]->getType(),
+ VecTy->getMinNumElements()));
+ return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]);
+ }
+
+ bool IsRISCV64 = getTarget().getTriple().isRISCV64();
+ llvm::Type *XLenTy = IsRISCV64 ? Builder.getInt64Ty() :
+ Builder.getInt32Ty();
+ return Builder.CreateIntrinsic(Intrinsic::riscv_vector_insert,
+ {ResultType, Ops[2]->getType(), XLenTy},
+ {Ops[0], Ops[2], Ops[1]});
}
}] in {
foreach dst_lmul = ["(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
@@ -2539,22 +2552,26 @@ let HasMasked = false, HasVL = false, IRName = "" in {
SupportOverloading = false,
ManualCodegen = [{
{
- if (isa<StructType>(ResultType)) {
- unsigned NF = cast<StructType>(ResultType)->getNumElements();
- llvm::Value *ReturnTuple = llvm::PoisonValue::get(ResultType);
- for (unsigned I = 0; I < NF; ++I) {
- ReturnTuple = Builder.CreateInsertValue(ReturnTuple, Ops[I], {I});
- }
- return ReturnTuple;
- }
llvm::Value *ReturnVector = llvm::PoisonValue::get(ResultType);
auto *VecTy = cast<ScalableVectorType>(Ops[0]->getType());
+ bool IsRISCV64 = getTarget().getTriple().isRISCV64();
+ llvm::Type *XLenTy = IsRISCV64 ? Builder.getInt64Ty() :
+ Builder.getInt32Ty();
for (unsigned I = 0, N = Ops.size(); I < N; ++I) {
llvm::Value *Idx =
ConstantInt::get(Builder.getInt64Ty(),
- VecTy->getMinNumElements() * I);
- ReturnVector =
- Builder.CreateInsertVector(ResultType, ReturnVector, Ops[I], Idx);
+ ResultType->isScalableTy() ?
+ VecTy->getMinNumElements() * I : I);
+
+ if (ResultType->isScalableTy())
+ ReturnVector =
+ Builder.CreateInsertVector(ResultType, ReturnVector, Ops[I], Idx);
+ else
+ ReturnVector =
+ Builder.CreateIntrinsic(Intrinsic::riscv_vector_insert,
+ {ResultType, Ops[I]->getType(), XLenTy},
+ {ReturnVector, Ops[I], Idx});
+
}
return ReturnVector;
}
diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index 97493bae5656e..3386578904156 100644
--- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -429,6 +429,7 @@ class RVVIntrinsic {
bool hasBuiltinAlias() const { return HasBuiltinAlias; }
bool hasManualCodegen() const { return !ManualCodegen.empty(); }
bool isMasked() const { return IsMasked; }
+ llvm::StringRef getOverloadedName() const { return OverloadedName; }
llvm::StringRef getIRName() const { return IRName; }
llvm::StringRef getManualCodegen() const { return ManualCodegen; }
PolicyScheme getPolicyScheme() const { return Scheme; }
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 5b92f1837980c..1c7d1f81e9bcc 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -21751,13 +21751,14 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
}
Intrinsic::ID ID = Intrinsic::not_intrinsic;
- unsigned NF = 1;
// The 0th bit simulates the `vta` of RVV
// The 1st bit simulates the `vma` of RVV
constexpr unsigned RVV_VTA = 0x1;
constexpr unsigned RVV_VMA = 0x2;
int PolicyAttrs = 0;
bool IsMasked = false;
+ // This is used by segment load/store to determine it's llvm type.
+ unsigned SegInstSEW = 8;
// Required for overloaded intrinsics.
llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp
index d823c336e39bf..49f...
[truncated]
|
✅ With the latest revision this PR passed the C/C++ code formatter. |
6fda19d
to
f48403b
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
f48403b
to
6d2380f
Compare
This needs to be rebased. I added another usage of RVVDispatcher to RISCVCallLowering.cpp recently. |
2b37bb8
to
b58c7fa
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
…79096) (llvm#87736)" This reverts commit 91dd844.
b58c7fa
to
0f96474
Compare
…on (#97995) This patch handles target lowering and calling convention. For target lowering, the vector tuple type represented as multiple scalable vectors is now changed to a single `MVT`, each `MVT` has a corresponding register class. The load/store of vector tuples are handled as the same way but need another vector insert/extract instructions to get sub-register group. Inline assembly constraint for vector tuple type can directly be modeled as "vr" which is identical to normal vector registers. For calling convention, it no longer needs an alternative algorithm to handle register allocation, this makes the code easier to maintain and read. Stacked on #97994
This reverts commit 91dd844.
Stacked on #97993