Skip to content

[llvm][RISCV] Add RISCV vector tuple type to value types(MVT) #97724

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
164 changes: 78 additions & 86 deletions clang/include/clang/Basic/riscv_vector.td

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions clang/include/clang/Support/RISCVVIntrinsicUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -429,6 +429,7 @@ class RVVIntrinsic {
bool hasBuiltinAlias() const { return HasBuiltinAlias; }
bool hasManualCodegen() const { return !ManualCodegen.empty(); }
bool isMasked() const { return IsMasked; }
llvm::StringRef getOverloadedName() const { return OverloadedName; }
llvm::StringRef getIRName() const { return IRName; }
llvm::StringRef getManualCodegen() const { return ManualCodegen; }
PolicyScheme getPolicyScheme() const { return Scheme; }
Expand Down
3 changes: 2 additions & 1 deletion clang/lib/CodeGen/CGBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21751,13 +21751,14 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
}

Intrinsic::ID ID = Intrinsic::not_intrinsic;
unsigned NF = 1;
// The 0th bit simulates the `vta` of RVV
// The 1st bit simulates the `vma` of RVV
constexpr unsigned RVV_VTA = 0x1;
constexpr unsigned RVV_VMA = 0x2;
int PolicyAttrs = 0;
bool IsMasked = false;
// This is used by segment load/store to determine it's llvm type.
unsigned SegInstSEW = 8;

// Required for overloaded intrinsics.
llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
Expand Down
15 changes: 8 additions & 7 deletions clang/lib/CodeGen/CodeGenTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -513,14 +513,15 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
{
ASTContext::BuiltinVectorTypeInfo Info =
Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
// Tuple types are expressed as aggregregate types of the same scalable
// vector type (e.g. vint32m1x2_t is two vint32m1_t, which is {<vscale x
// 2 x i32>, <vscale x 2 x i32>}).
if (Info.NumVectors != 1) {
llvm::Type *EltTy = llvm::ScalableVectorType::get(
ConvertType(Info.ElementType), Info.EC.getKnownMinValue());
llvm::SmallVector<llvm::Type *, 4> EltTys(Info.NumVectors, EltTy);
return llvm::StructType::get(getLLVMContext(), EltTys);
int Log2LMUL =
llvm::Log2_64(
Info.EC.getKnownMinValue() *
ConvertType(Info.ElementType)->getScalarSizeInBits() * 8 /
llvm::RISCV::RVVBitsPerBlock) -
3;
return llvm::RISCVVectorTupleType::get(getLLVMContext(), Log2LMUL,
Info.NumVectors);
}
return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
Info.EC.getKnownMinValue());
Expand Down
2 changes: 1 addition & 1 deletion clang/lib/Support/RISCVVIntrinsicUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1011,7 +1011,7 @@ RVVIntrinsic::RVVIntrinsic(
(!IsMasked && hasPassthruOperand())) {
for (auto &I : IntrinsicTypes) {
if (I >= 0)
I += NF;
I += 1;
}
}
}
Expand Down

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -8,112 +8,112 @@

#include <riscv_vector.h>

// CHECK-RV64-LABEL: define dso_local { <vscale x 1 x bfloat>, <vscale x 1 x bfloat> } @test_vloxseg2ei16_v_bf16mf4x2(
// CHECK-RV64-LABEL: define dso_local riscv_mf4x2 @test_vloxseg2ei16_v_bf16mf4x2(
// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x bfloat>, <vscale x 1 x bfloat> } @llvm.riscv.vloxseg2.nxv1bf16.nxv1i16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i16> [[RS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret { <vscale x 1 x bfloat>, <vscale x 1 x bfloat> } [[TMP0]]
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call riscv_mf4x2 @llvm.riscv.vloxseg2.riscv_mf4x2.nxv1i16.i64(riscv_mf4x2 poison, ptr [[RS1]], <vscale x 1 x i16> [[RS2]], i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret riscv_mf4x2 [[TMP0]]
//
vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1,
vuint16mf4_t rs2, size_t vl) {
return __riscv_vloxseg2ei16_v_bf16mf4x2(rs1, rs2, vl);
}

// CHECK-RV64-LABEL: define dso_local { <vscale x 2 x bfloat>, <vscale x 2 x bfloat> } @test_vloxseg2ei16_v_bf16mf2x2(
// CHECK-RV64-LABEL: define dso_local riscv_mf2x2 @test_vloxseg2ei16_v_bf16mf2x2(
// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x bfloat>, <vscale x 2 x bfloat> } @llvm.riscv.vloxseg2.nxv2bf16.nxv2i16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i16> [[RS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret { <vscale x 2 x bfloat>, <vscale x 2 x bfloat> } [[TMP0]]
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call riscv_mf2x2 @llvm.riscv.vloxseg2.riscv_mf2x2.nxv2i16.i64(riscv_mf2x2 poison, ptr [[RS1]], <vscale x 2 x i16> [[RS2]], i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret riscv_mf2x2 [[TMP0]]
//
vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1,
vuint16mf2_t rs2, size_t vl) {
return __riscv_vloxseg2ei16_v_bf16mf2x2(rs1, rs2, vl);
}

// CHECK-RV64-LABEL: define dso_local { <vscale x 4 x bfloat>, <vscale x 4 x bfloat> } @test_vloxseg2ei16_v_bf16m1x2(
// CHECK-RV64-LABEL: define dso_local riscv_m1x2 @test_vloxseg2ei16_v_bf16m1x2(
// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x bfloat>, <vscale x 4 x bfloat> } @llvm.riscv.vloxseg2.nxv4bf16.nxv4i16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i16> [[RS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret { <vscale x 4 x bfloat>, <vscale x 4 x bfloat> } [[TMP0]]
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call riscv_m1x2 @llvm.riscv.vloxseg2.riscv_m1x2.nxv4i16.i64(riscv_m1x2 poison, ptr [[RS1]], <vscale x 4 x i16> [[RS2]], i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret riscv_m1x2 [[TMP0]]
//
vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2,
size_t vl) {
return __riscv_vloxseg2ei16_v_bf16m1x2(rs1, rs2, vl);
}

// CHECK-RV64-LABEL: define dso_local { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @test_vloxseg2ei16_v_bf16m2x2(
// CHECK-RV64-LABEL: define dso_local riscv_m2x2 @test_vloxseg2ei16_v_bf16m2x2(
// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.riscv.vloxseg2.nxv8bf16.nxv8i16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i16> [[RS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]]
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call riscv_m2x2 @llvm.riscv.vloxseg2.riscv_m2x2.nxv8i16.i64(riscv_m2x2 poison, ptr [[RS1]], <vscale x 8 x i16> [[RS2]], i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret riscv_m2x2 [[TMP0]]
//
vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2,
size_t vl) {
return __riscv_vloxseg2ei16_v_bf16m2x2(rs1, rs2, vl);
}

// CHECK-RV64-LABEL: define dso_local { <vscale x 16 x bfloat>, <vscale x 16 x bfloat> } @test_vloxseg2ei16_v_bf16m4x2(
// CHECK-RV64-LABEL: define dso_local riscv_m4x2 @test_vloxseg2ei16_v_bf16m4x2(
// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x bfloat>, <vscale x 16 x bfloat> } @llvm.riscv.vloxseg2.nxv16bf16.nxv16i16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i16> [[RS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret { <vscale x 16 x bfloat>, <vscale x 16 x bfloat> } [[TMP0]]
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call riscv_m4x2 @llvm.riscv.vloxseg2.riscv_m4x2.nxv16i16.i64(riscv_m4x2 poison, ptr [[RS1]], <vscale x 16 x i16> [[RS2]], i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret riscv_m4x2 [[TMP0]]
//
vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2,
size_t vl) {
return __riscv_vloxseg2ei16_v_bf16m4x2(rs1, rs2, vl);
}

// CHECK-RV64-LABEL: define dso_local { <vscale x 1 x bfloat>, <vscale x 1 x bfloat> } @test_vloxseg2ei16_v_bf16mf4x2_m(
// CHECK-RV64-LABEL: define dso_local riscv_mf4x2 @test_vloxseg2ei16_v_bf16mf4x2_m(
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 1 x bfloat>, <vscale x 1 x bfloat> } @llvm.riscv.vloxseg2.mask.nxv1bf16.nxv1i16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i16> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret { <vscale x 1 x bfloat>, <vscale x 1 x bfloat> } [[TMP0]]
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call riscv_mf4x2 @llvm.riscv.vloxseg2.mask.riscv_mf4x2.nxv1i16.nxv1i1.i64(riscv_mf4x2 poison, ptr [[RS1]], <vscale x 1 x i16> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 0)
// CHECK-RV64-NEXT: ret riscv_mf4x2 [[TMP0]]
//
vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm,
const __bf16 *rs1,
vuint16mf4_t rs2, size_t vl) {
return __riscv_vloxseg2ei16_v_bf16mf4x2_m(vm, rs1, rs2, vl);
}

// CHECK-RV64-LABEL: define dso_local { <vscale x 2 x bfloat>, <vscale x 2 x bfloat> } @test_vloxseg2ei16_v_bf16mf2x2_m(
// CHECK-RV64-LABEL: define dso_local riscv_mf2x2 @test_vloxseg2ei16_v_bf16mf2x2_m(
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x bfloat>, <vscale x 2 x bfloat> } @llvm.riscv.vloxseg2.mask.nxv2bf16.nxv2i16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i16> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret { <vscale x 2 x bfloat>, <vscale x 2 x bfloat> } [[TMP0]]
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call riscv_mf2x2 @llvm.riscv.vloxseg2.mask.riscv_mf2x2.nxv2i16.nxv2i1.i64(riscv_mf2x2 poison, ptr [[RS1]], <vscale x 2 x i16> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 0)
// CHECK-RV64-NEXT: ret riscv_mf2x2 [[TMP0]]
//
vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm,
const __bf16 *rs1,
vuint16mf2_t rs2, size_t vl) {
return __riscv_vloxseg2ei16_v_bf16mf2x2_m(vm, rs1, rs2, vl);
}

// CHECK-RV64-LABEL: define dso_local { <vscale x 4 x bfloat>, <vscale x 4 x bfloat> } @test_vloxseg2ei16_v_bf16m1x2_m(
// CHECK-RV64-LABEL: define dso_local riscv_m1x2 @test_vloxseg2ei16_v_bf16m1x2_m(
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 4 x bfloat>, <vscale x 4 x bfloat> } @llvm.riscv.vloxseg2.mask.nxv4bf16.nxv4i16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i16> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret { <vscale x 4 x bfloat>, <vscale x 4 x bfloat> } [[TMP0]]
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call riscv_m1x2 @llvm.riscv.vloxseg2.mask.riscv_m1x2.nxv4i16.nxv4i1.i64(riscv_m1x2 poison, ptr [[RS1]], <vscale x 4 x i16> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 0)
// CHECK-RV64-NEXT: ret riscv_m1x2 [[TMP0]]
//
vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
vuint16m1_t rs2, size_t vl) {
return __riscv_vloxseg2ei16_v_bf16m1x2_m(vm, rs1, rs2, vl);
}

// CHECK-RV64-LABEL: define dso_local { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @test_vloxseg2ei16_v_bf16m2x2_m(
// CHECK-RV64-LABEL: define dso_local riscv_m2x2 @test_vloxseg2ei16_v_bf16m2x2_m(
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.riscv.vloxseg2.mask.nxv8bf16.nxv8i16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i16> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]]
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call riscv_m2x2 @llvm.riscv.vloxseg2.mask.riscv_m2x2.nxv8i16.nxv8i1.i64(riscv_m2x2 poison, ptr [[RS1]], <vscale x 8 x i16> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 0)
// CHECK-RV64-NEXT: ret riscv_m2x2 [[TMP0]]
//
vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
vuint16m2_t rs2, size_t vl) {
return __riscv_vloxseg2ei16_v_bf16m2x2_m(vm, rs1, rs2, vl);
}

// CHECK-RV64-LABEL: define dso_local { <vscale x 16 x bfloat>, <vscale x 16 x bfloat> } @test_vloxseg2ei16_v_bf16m4x2_m(
// CHECK-RV64-LABEL: define dso_local riscv_m4x2 @test_vloxseg2ei16_v_bf16m4x2_m(
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 16 x bfloat>, <vscale x 16 x bfloat> } @llvm.riscv.vloxseg2.mask.nxv16bf16.nxv16i16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i16> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret { <vscale x 16 x bfloat>, <vscale x 16 x bfloat> } [[TMP0]]
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call riscv_m4x2 @llvm.riscv.vloxseg2.mask.riscv_m4x2.nxv16i16.nxv16i1.i64(riscv_m4x2 poison, ptr [[RS1]], <vscale x 16 x i16> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3, i64 0)
// CHECK-RV64-NEXT: ret riscv_m4x2 [[TMP0]]
//
vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1,
vuint16m4_t rs2, size_t vl) {
Expand Down
Loading
Loading