Skip to content

[GlobalIsel] Combine G_VSCALE #94096

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jun 6, 2024
Merged

[GlobalIsel] Combine G_VSCALE #94096

merged 2 commits into from
Jun 6, 2024

Conversation

tschuett
Copy link

@tschuett tschuett commented Jun 1, 2024

We need them for scalable address calculation and
legal scalable addressing modes.

@llvmbot
Copy link
Member

llvmbot commented Jun 1, 2024

@llvm/pr-subscribers-llvm-globalisel

Author: Thorsten Schütt (tschuett)

Changes

We need them for scalable address calculation and
legal scalable addressing modes.


Full diff: https://github.com/llvm/llvm-project/pull/94096.diff

5 Files Affected:

  • (modified) llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h (+8)
  • (modified) llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h (+40-1)
  • (modified) llvm/include/llvm/Target/GlobalISel/Combine.td (+36-1)
  • (modified) llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp (+85-1)
  • (added) llvm/test/CodeGen/AArch64/GlobalISel/combine-vscale.mir (+113)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 2ddf20ebe7af7..5e476b9f7bf31 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -868,6 +868,14 @@ class CombinerHelper {
   bool matchFreezeOfSingleMaybePoisonOperand(MachineInstr &MI,
                                              BuildFnTy &MatchInfo);
 
+  bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
+
+  bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
+
+  bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
+
+  bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
+
 private:
   /// Checks for legality of an indexed variant of \p LdSt.
   bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
index 2b3efc3b609f0..36ae9beed8aa9 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
@@ -14,10 +14,12 @@
 #ifndef LLVM_CODEGEN_GLOBALISEL_GENERICMACHINEINSTRS_H
 #define LLVM_CODEGEN_GLOBALISEL_GENERICMACHINEINSTRS_H
 
-#include "llvm/IR/Instructions.h"
+#include "llvm/ADT/APInt.h"
 #include "llvm/CodeGen/MachineInstr.h"
 #include "llvm/CodeGen/MachineMemOperand.h"
 #include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
 #include "llvm/Support/Casting.h"
 
 namespace llvm {
@@ -856,6 +858,43 @@ class GTrunc : public GCastOp {
   };
 };
 
+/// Represents a vscale.
+class GVScale : public GenericMachineInstr {
+public:
+  APInt getSrc() const { return getOperand(1).getCImm()->getValue(); }
+
+  static bool classof(const MachineInstr *MI) {
+    return MI->getOpcode() == TargetOpcode::G_VSCALE;
+  };
+};
+
+/// Represents an integer subtraction.
+class GSub : public GIntBinOp {
+public:
+  static bool classof(const MachineInstr *MI) {
+    return MI->getOpcode() == TargetOpcode::G_SUB;
+  };
+};
+
+/// Represents an integer multiplication.
+class GMul : public GIntBinOp {
+public:
+  static bool classof(const MachineInstr *MI) {
+    return MI->getOpcode() == TargetOpcode::G_MUL;
+  };
+};
+
+/// Represents a shift left.
+class GSHL : public GenericMachineInstr {
+public:
+  Register getSrcReg() const { return getOperand(1).getReg(); }
+  Register getShiftReg() const { return getOperand(2).getReg(); }
+
+  static bool classof(const MachineInstr *MI) {
+    return MI->getOpcode() == TargetOpcode::G_SHL;
+  };
+};
+
 } // namespace llvm
 
 #endif // LLVM_CODEGEN_GLOBALISEL_GENERICMACHINEINSTRS_H
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 383589add7755..94abf10a033ce 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1598,6 +1598,37 @@ def insert_vector_elt_oob : GICombineRule<
          [{ return Helper.matchInsertVectorElementOOB(*${root}, ${matchinfo}); }]),
   (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
 
+def add_of_vscale : GICombineRule<
+   (defs root:$root, build_fn_matchinfo:$matchinfo),
+   (match (G_VSCALE $left, $imm1),
+          (G_VSCALE $right, $imm2),
+          (G_ADD $root, $left, $right, (MIFlags NoSWrap)),
+   [{ return Helper.matchAddOfVScale(${root}, ${matchinfo}); }]),
+   (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def mul_of_vscale : GICombineRule<
+   (defs root:$root, build_fn_matchinfo:$matchinfo),
+   (match (G_VSCALE $left, $scale),
+          (G_CONSTANT $x, $imm1),
+          (G_MUL $root, $left, $x, (MIFlags NoSWrap)),
+   [{ return Helper.matchMulOfVScale(${root}, ${matchinfo}); }]),
+   (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def shl_of_vscale : GICombineRule<
+   (defs root:$root, build_fn_matchinfo:$matchinfo),
+   (match (G_VSCALE $left, $imm),
+          (G_CONSTANT $x, $imm1),
+          (G_SHL $root, $left, $x, (MIFlags NoSWrap)),
+   [{ return Helper.matchShlOfVScale(${root}, ${matchinfo}); }]),
+   (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def sub_of_vscale : GICombineRule<
+   (defs root:$root, build_fn_matchinfo:$matchinfo),
+   (match (G_VSCALE $right, $imm),
+          (G_SUB $root, $x, $right, (MIFlags NoSWrap)),
+   [{ return Helper.matchSubOfVScale(${root}, ${matchinfo}); }]),
+   (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
 // match_extract_of_element and insert_vector_elt_oob must be the first!
 def vector_ops_combines: GICombineGroup<[
 match_extract_of_element_undef_vector,
@@ -1630,7 +1661,11 @@ extract_vector_element_build_vector_trunc6,
 extract_vector_element_build_vector_trunc7,
 extract_vector_element_build_vector_trunc8,
 extract_vector_element_shuffle_vector,
-insert_vector_element_extract_vector_element
+insert_vector_element_extract_vector_element,
+add_of_vscale,
+mul_of_vscale,
+shl_of_vscale,
+sub_of_vscale,
 ]>;
 
 
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
index b4765fb280f9d..62ee80f49b7b6 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
@@ -6,7 +6,8 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This file implements CombinerHelper for G_EXTRACT_VECTOR_ELT.
+// This file implements CombinerHelper for G_EXTRACT_VECTOR_ELT,
+// G_INSERT_VECTOR_ELT, and G_VSCALE
 //
 //===----------------------------------------------------------------------===//
 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
@@ -400,3 +401,86 @@ bool CombinerHelper::matchInsertVectorElementOOB(MachineInstr &MI,
 
   return false;
 }
+
+bool CombinerHelper::matchAddOfVScale(const MachineOperand &MO,
+                                      BuildFnTy &MatchInfo) {
+  GAdd *Add = cast<GAdd>(MRI.getVRegDef(MO.getReg()));
+  GVScale *LHSVScale = cast<GVScale>(MRI.getVRegDef(Add->getLHSReg()));
+  GVScale *RHSVScale = cast<GVScale>(MRI.getVRegDef(Add->getRHSReg()));
+
+  Register Dst = Add->getReg(0);
+
+  if (!MRI.hasOneNonDBGUse(LHSVScale->getReg(0)) ||
+      !MRI.hasOneNonDBGUse(RHSVScale->getReg(0)))
+    return false;
+
+  MatchInfo = [=](MachineIRBuilder &B) {
+    B.buildVScale(Dst, LHSVScale->getSrc() + RHSVScale->getSrc());
+  };
+
+  return true;
+}
+
+bool CombinerHelper::matchMulOfVScale(const MachineOperand &MO,
+                                      BuildFnTy &MatchInfo) {
+  GMul *Mul = cast<GMul>(MRI.getVRegDef(MO.getReg()));
+  GVScale *LHSVScale = cast<GVScale>(MRI.getVRegDef(Mul->getLHSReg()));
+
+  std::optional<APInt> MaybeRHS = getIConstantVRegVal(Mul->getRHSReg(), MRI);
+  if (!MaybeRHS)
+    return false;
+
+  Register Dst = MO.getReg();
+
+  if (!MRI.hasOneNonDBGUse(LHSVScale->getReg(0)))
+    return false;
+
+  MatchInfo = [=](MachineIRBuilder &B) {
+    B.buildVScale(Dst, LHSVScale->getSrc() * *MaybeRHS);
+  };
+
+  return true;
+}
+
+bool CombinerHelper::matchSubOfVScale(const MachineOperand &MO,
+                                      BuildFnTy &MatchInfo) {
+  GSub *Sub = cast<GSub>(MRI.getVRegDef(MO.getReg()));
+  GVScale *RHSVScale = cast<GVScale>(MRI.getVRegDef(Sub->getRHSReg()));
+
+  Register Dst = MO.getReg();
+  LLT DstTy = MRI.getType(Dst);
+
+  if (!MRI.hasOneNonDBGUse(RHSVScale->getReg(0)) ||
+      !isLegalOrBeforeLegalizer({TargetOpcode::G_ADD, DstTy}))
+    return false;
+
+  MatchInfo = [=](MachineIRBuilder &B) {
+    auto VScale = B.buildVScale(DstTy, -RHSVScale->getSrc());
+    B.buildAdd(Dst, Sub->getLHSReg(), VScale, Sub->getFlags());
+  };
+
+  return true;
+}
+
+bool CombinerHelper::matchShlOfVScale(const MachineOperand &MO,
+                                      BuildFnTy &MatchInfo) {
+  GSHL *Shl = cast<GSHL>(MRI.getVRegDef(MO.getReg()));
+  GVScale *LHSVScale = cast<GVScale>(MRI.getVRegDef(Shl->getSrcReg()));
+
+  std::optional<APInt> MaybeRHS = getIConstantVRegVal(Shl->getShiftReg(), MRI);
+  if (!MaybeRHS)
+    return false;
+
+  Register Dst = MO.getReg();
+  LLT DstTy = MRI.getType(Dst);
+
+  if (!MRI.hasOneNonDBGUse(LHSVScale->getReg(0)) ||
+      !isLegalOrBeforeLegalizer({TargetOpcode::G_VSCALE, DstTy}))
+    return false;
+
+  MatchInfo = [=](MachineIRBuilder &B) {
+    B.buildVScale(Dst, LHSVScale->getSrc().shl(*MaybeRHS));
+  };
+
+  return true;
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-vscale.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-vscale.mir
new file mode 100644
index 0000000000000..9b7a44954afdb
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-vscale.mir
@@ -0,0 +1,113 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -o - -mtriple=aarch64 -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs %s | FileCheck %s
+
+...
+---
+name:            sum_of_vscale
+body:             |
+  bb.1:
+    liveins: $x0, $x1
+    ; CHECK-LABEL: name: sum_of_vscale
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %sum:_(s64) = G_VSCALE i64 20
+    ; CHECK-NEXT: $x0 = COPY %sum(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %rhs:_(s64) = G_VSCALE i64 11
+    %lhs:_(s64) = G_VSCALE i64 9
+    %sum:_(s64) = nsw G_ADD %lhs(s64), %rhs(s64)
+    $x0 = COPY %sum(s64)
+    RET_ReallyLR implicit $x0
+...
+---
+name:            sum_of_vscale_multi_use
+body:             |
+  bb.1:
+    liveins: $x0, $x1
+    ; CHECK-LABEL: name: sum_of_vscale_multi_use
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %rhs:_(s64) = G_VSCALE i64 11
+    ; CHECK-NEXT: %lhs:_(s64) = G_VSCALE i64 9
+    ; CHECK-NEXT: %sum:_(s64) = nsw G_ADD %lhs, %rhs
+    ; CHECK-NEXT: $x0 = COPY %sum(s64)
+    ; CHECK-NEXT: $x1 = COPY %rhs(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %rhs:_(s64) = G_VSCALE i64 11
+    %lhs:_(s64) = G_VSCALE i64 9
+    %sum:_(s64) = nsw G_ADD %lhs(s64), %rhs(s64)
+    $x0 = COPY %sum(s64)
+    $x1 = COPY %rhs(s64)
+    RET_ReallyLR implicit $x0
+...
+---
+name:            mul_of_vscale
+body:             |
+  bb.1:
+    liveins: $x0, $x1
+    ; CHECK-LABEL: name: mul_of_vscale
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %mul:_(s64) = G_VSCALE i64 99
+    ; CHECK-NEXT: $x0 = COPY %mul(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %rhs:_(s64) = G_CONSTANT i64 11
+    %lhs:_(s64) = G_VSCALE i64 9
+    %mul:_(s64) = nsw G_MUL %lhs(s64), %rhs(s64)
+    $x0 = COPY %mul(s64)
+    RET_ReallyLR implicit $x0
+...
+---
+name:            sub_of_vscale
+body:             |
+  bb.1:
+    liveins: $x0, $x1
+    ; CHECK-LABEL: name: sub_of_vscale
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[VSCALE:%[0-9]+]]:_(s64) = G_VSCALE i64 -9
+    ; CHECK-NEXT: %sub:_(s64) = nsw G_ADD %x, [[VSCALE]]
+    ; CHECK-NEXT: $x0 = COPY %sub(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %x:_(s64) = COPY $x0
+    %rhs:_(s64) = G_VSCALE i64 9
+    %sub:_(s64) = nsw G_SUB %x(s64), %rhs(s64)
+    $x0 = COPY %sub(s64)
+    RET_ReallyLR implicit $x0
+...
+---
+name:            shl_of_vscale
+body:             |
+  bb.1:
+    liveins: $x0, $x1
+    ; CHECK-LABEL: name: shl_of_vscale
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %shl:_(s64) = G_VSCALE i64 44
+    ; CHECK-NEXT: $x0 = COPY %shl(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %rhs:_(s64) = G_CONSTANT i64 2
+    %lhs:_(s64) = G_VSCALE i64 11
+    %shl:_(s64) = nsw G_SHL %lhs(s64), %rhs(s64)
+    $x0 = COPY %shl(s64)
+    RET_ReallyLR implicit $x0
+...
+---
+name:            shl_of_vscale_wrong_flag
+body:             |
+  bb.1:
+    liveins: $x0, $x1
+    ; CHECK-LABEL: name: shl_of_vscale_wrong_flag
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %rhs:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: %lhs:_(s64) = G_VSCALE i64 11
+    ; CHECK-NEXT: %shl:_(s64) = nuw G_SHL %lhs, %rhs(s64)
+    ; CHECK-NEXT: $x0 = COPY %shl(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %rhs:_(s64) = G_CONSTANT i64 2
+    %lhs:_(s64) = G_VSCALE i64 11
+    %shl:_(s64) = nuw G_SHL %lhs(s64), %rhs(s64)
+    $x0 = COPY %shl(s64)
+    RET_ReallyLR implicit $x0

@llvmbot
Copy link
Member

llvmbot commented Jun 1, 2024

@llvm/pr-subscribers-backend-aarch64

Author: Thorsten Schütt (tschuett)

Changes

We need them for scalable address calculation and
legal scalable addressing modes.


Full diff: https://github.com/llvm/llvm-project/pull/94096.diff

5 Files Affected:

  • (modified) llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h (+8)
  • (modified) llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h (+40-1)
  • (modified) llvm/include/llvm/Target/GlobalISel/Combine.td (+36-1)
  • (modified) llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp (+85-1)
  • (added) llvm/test/CodeGen/AArch64/GlobalISel/combine-vscale.mir (+113)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 2ddf20ebe7af7..5e476b9f7bf31 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -868,6 +868,14 @@ class CombinerHelper {
   bool matchFreezeOfSingleMaybePoisonOperand(MachineInstr &MI,
                                              BuildFnTy &MatchInfo);
 
+  bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
+
+  bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
+
+  bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
+
+  bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
+
 private:
   /// Checks for legality of an indexed variant of \p LdSt.
   bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
index 2b3efc3b609f0..36ae9beed8aa9 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
@@ -14,10 +14,12 @@
 #ifndef LLVM_CODEGEN_GLOBALISEL_GENERICMACHINEINSTRS_H
 #define LLVM_CODEGEN_GLOBALISEL_GENERICMACHINEINSTRS_H
 
-#include "llvm/IR/Instructions.h"
+#include "llvm/ADT/APInt.h"
 #include "llvm/CodeGen/MachineInstr.h"
 #include "llvm/CodeGen/MachineMemOperand.h"
 #include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
 #include "llvm/Support/Casting.h"
 
 namespace llvm {
@@ -856,6 +858,43 @@ class GTrunc : public GCastOp {
   };
 };
 
+/// Represents a vscale.
+class GVScale : public GenericMachineInstr {
+public:
+  APInt getSrc() const { return getOperand(1).getCImm()->getValue(); }
+
+  static bool classof(const MachineInstr *MI) {
+    return MI->getOpcode() == TargetOpcode::G_VSCALE;
+  };
+};
+
+/// Represents an integer subtraction.
+class GSub : public GIntBinOp {
+public:
+  static bool classof(const MachineInstr *MI) {
+    return MI->getOpcode() == TargetOpcode::G_SUB;
+  };
+};
+
+/// Represents an integer multiplication.
+class GMul : public GIntBinOp {
+public:
+  static bool classof(const MachineInstr *MI) {
+    return MI->getOpcode() == TargetOpcode::G_MUL;
+  };
+};
+
+/// Represents a shift left.
+class GSHL : public GenericMachineInstr {
+public:
+  Register getSrcReg() const { return getOperand(1).getReg(); }
+  Register getShiftReg() const { return getOperand(2).getReg(); }
+
+  static bool classof(const MachineInstr *MI) {
+    return MI->getOpcode() == TargetOpcode::G_SHL;
+  };
+};
+
 } // namespace llvm
 
 #endif // LLVM_CODEGEN_GLOBALISEL_GENERICMACHINEINSTRS_H
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 383589add7755..94abf10a033ce 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1598,6 +1598,37 @@ def insert_vector_elt_oob : GICombineRule<
          [{ return Helper.matchInsertVectorElementOOB(*${root}, ${matchinfo}); }]),
   (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
 
+def add_of_vscale : GICombineRule<
+   (defs root:$root, build_fn_matchinfo:$matchinfo),
+   (match (G_VSCALE $left, $imm1),
+          (G_VSCALE $right, $imm2),
+          (G_ADD $root, $left, $right, (MIFlags NoSWrap)),
+   [{ return Helper.matchAddOfVScale(${root}, ${matchinfo}); }]),
+   (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def mul_of_vscale : GICombineRule<
+   (defs root:$root, build_fn_matchinfo:$matchinfo),
+   (match (G_VSCALE $left, $scale),
+          (G_CONSTANT $x, $imm1),
+          (G_MUL $root, $left, $x, (MIFlags NoSWrap)),
+   [{ return Helper.matchMulOfVScale(${root}, ${matchinfo}); }]),
+   (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def shl_of_vscale : GICombineRule<
+   (defs root:$root, build_fn_matchinfo:$matchinfo),
+   (match (G_VSCALE $left, $imm),
+          (G_CONSTANT $x, $imm1),
+          (G_SHL $root, $left, $x, (MIFlags NoSWrap)),
+   [{ return Helper.matchShlOfVScale(${root}, ${matchinfo}); }]),
+   (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
+def sub_of_vscale : GICombineRule<
+   (defs root:$root, build_fn_matchinfo:$matchinfo),
+   (match (G_VSCALE $right, $imm),
+          (G_SUB $root, $x, $right, (MIFlags NoSWrap)),
+   [{ return Helper.matchSubOfVScale(${root}, ${matchinfo}); }]),
+   (apply [{ Helper.applyBuildFnMO(${root}, ${matchinfo}); }])>;
+
 // match_extract_of_element and insert_vector_elt_oob must be the first!
 def vector_ops_combines: GICombineGroup<[
 match_extract_of_element_undef_vector,
@@ -1630,7 +1661,11 @@ extract_vector_element_build_vector_trunc6,
 extract_vector_element_build_vector_trunc7,
 extract_vector_element_build_vector_trunc8,
 extract_vector_element_shuffle_vector,
-insert_vector_element_extract_vector_element
+insert_vector_element_extract_vector_element,
+add_of_vscale,
+mul_of_vscale,
+shl_of_vscale,
+sub_of_vscale,
 ]>;
 
 
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
index b4765fb280f9d..62ee80f49b7b6 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
@@ -6,7 +6,8 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This file implements CombinerHelper for G_EXTRACT_VECTOR_ELT.
+// This file implements CombinerHelper for G_EXTRACT_VECTOR_ELT,
+// G_INSERT_VECTOR_ELT, and G_VSCALE
 //
 //===----------------------------------------------------------------------===//
 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
@@ -400,3 +401,86 @@ bool CombinerHelper::matchInsertVectorElementOOB(MachineInstr &MI,
 
   return false;
 }
+
+bool CombinerHelper::matchAddOfVScale(const MachineOperand &MO,
+                                      BuildFnTy &MatchInfo) {
+  GAdd *Add = cast<GAdd>(MRI.getVRegDef(MO.getReg()));
+  GVScale *LHSVScale = cast<GVScale>(MRI.getVRegDef(Add->getLHSReg()));
+  GVScale *RHSVScale = cast<GVScale>(MRI.getVRegDef(Add->getRHSReg()));
+
+  Register Dst = Add->getReg(0);
+
+  if (!MRI.hasOneNonDBGUse(LHSVScale->getReg(0)) ||
+      !MRI.hasOneNonDBGUse(RHSVScale->getReg(0)))
+    return false;
+
+  MatchInfo = [=](MachineIRBuilder &B) {
+    B.buildVScale(Dst, LHSVScale->getSrc() + RHSVScale->getSrc());
+  };
+
+  return true;
+}
+
+bool CombinerHelper::matchMulOfVScale(const MachineOperand &MO,
+                                      BuildFnTy &MatchInfo) {
+  GMul *Mul = cast<GMul>(MRI.getVRegDef(MO.getReg()));
+  GVScale *LHSVScale = cast<GVScale>(MRI.getVRegDef(Mul->getLHSReg()));
+
+  std::optional<APInt> MaybeRHS = getIConstantVRegVal(Mul->getRHSReg(), MRI);
+  if (!MaybeRHS)
+    return false;
+
+  Register Dst = MO.getReg();
+
+  if (!MRI.hasOneNonDBGUse(LHSVScale->getReg(0)))
+    return false;
+
+  MatchInfo = [=](MachineIRBuilder &B) {
+    B.buildVScale(Dst, LHSVScale->getSrc() * *MaybeRHS);
+  };
+
+  return true;
+}
+
+bool CombinerHelper::matchSubOfVScale(const MachineOperand &MO,
+                                      BuildFnTy &MatchInfo) {
+  GSub *Sub = cast<GSub>(MRI.getVRegDef(MO.getReg()));
+  GVScale *RHSVScale = cast<GVScale>(MRI.getVRegDef(Sub->getRHSReg()));
+
+  Register Dst = MO.getReg();
+  LLT DstTy = MRI.getType(Dst);
+
+  if (!MRI.hasOneNonDBGUse(RHSVScale->getReg(0)) ||
+      !isLegalOrBeforeLegalizer({TargetOpcode::G_ADD, DstTy}))
+    return false;
+
+  MatchInfo = [=](MachineIRBuilder &B) {
+    auto VScale = B.buildVScale(DstTy, -RHSVScale->getSrc());
+    B.buildAdd(Dst, Sub->getLHSReg(), VScale, Sub->getFlags());
+  };
+
+  return true;
+}
+
+bool CombinerHelper::matchShlOfVScale(const MachineOperand &MO,
+                                      BuildFnTy &MatchInfo) {
+  GSHL *Shl = cast<GSHL>(MRI.getVRegDef(MO.getReg()));
+  GVScale *LHSVScale = cast<GVScale>(MRI.getVRegDef(Shl->getSrcReg()));
+
+  std::optional<APInt> MaybeRHS = getIConstantVRegVal(Shl->getShiftReg(), MRI);
+  if (!MaybeRHS)
+    return false;
+
+  Register Dst = MO.getReg();
+  LLT DstTy = MRI.getType(Dst);
+
+  if (!MRI.hasOneNonDBGUse(LHSVScale->getReg(0)) ||
+      !isLegalOrBeforeLegalizer({TargetOpcode::G_VSCALE, DstTy}))
+    return false;
+
+  MatchInfo = [=](MachineIRBuilder &B) {
+    B.buildVScale(Dst, LHSVScale->getSrc().shl(*MaybeRHS));
+  };
+
+  return true;
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-vscale.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-vscale.mir
new file mode 100644
index 0000000000000..9b7a44954afdb
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-vscale.mir
@@ -0,0 +1,113 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -o - -mtriple=aarch64 -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs %s | FileCheck %s
+
+...
+---
+name:            sum_of_vscale
+body:             |
+  bb.1:
+    liveins: $x0, $x1
+    ; CHECK-LABEL: name: sum_of_vscale
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %sum:_(s64) = G_VSCALE i64 20
+    ; CHECK-NEXT: $x0 = COPY %sum(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %rhs:_(s64) = G_VSCALE i64 11
+    %lhs:_(s64) = G_VSCALE i64 9
+    %sum:_(s64) = nsw G_ADD %lhs(s64), %rhs(s64)
+    $x0 = COPY %sum(s64)
+    RET_ReallyLR implicit $x0
+...
+---
+name:            sum_of_vscale_multi_use
+body:             |
+  bb.1:
+    liveins: $x0, $x1
+    ; CHECK-LABEL: name: sum_of_vscale_multi_use
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %rhs:_(s64) = G_VSCALE i64 11
+    ; CHECK-NEXT: %lhs:_(s64) = G_VSCALE i64 9
+    ; CHECK-NEXT: %sum:_(s64) = nsw G_ADD %lhs, %rhs
+    ; CHECK-NEXT: $x0 = COPY %sum(s64)
+    ; CHECK-NEXT: $x1 = COPY %rhs(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %rhs:_(s64) = G_VSCALE i64 11
+    %lhs:_(s64) = G_VSCALE i64 9
+    %sum:_(s64) = nsw G_ADD %lhs(s64), %rhs(s64)
+    $x0 = COPY %sum(s64)
+    $x1 = COPY %rhs(s64)
+    RET_ReallyLR implicit $x0
+...
+---
+name:            mul_of_vscale
+body:             |
+  bb.1:
+    liveins: $x0, $x1
+    ; CHECK-LABEL: name: mul_of_vscale
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %mul:_(s64) = G_VSCALE i64 99
+    ; CHECK-NEXT: $x0 = COPY %mul(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %rhs:_(s64) = G_CONSTANT i64 11
+    %lhs:_(s64) = G_VSCALE i64 9
+    %mul:_(s64) = nsw G_MUL %lhs(s64), %rhs(s64)
+    $x0 = COPY %mul(s64)
+    RET_ReallyLR implicit $x0
+...
+---
+name:            sub_of_vscale
+body:             |
+  bb.1:
+    liveins: $x0, $x1
+    ; CHECK-LABEL: name: sub_of_vscale
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[VSCALE:%[0-9]+]]:_(s64) = G_VSCALE i64 -9
+    ; CHECK-NEXT: %sub:_(s64) = nsw G_ADD %x, [[VSCALE]]
+    ; CHECK-NEXT: $x0 = COPY %sub(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %x:_(s64) = COPY $x0
+    %rhs:_(s64) = G_VSCALE i64 9
+    %sub:_(s64) = nsw G_SUB %x(s64), %rhs(s64)
+    $x0 = COPY %sub(s64)
+    RET_ReallyLR implicit $x0
+...
+---
+name:            shl_of_vscale
+body:             |
+  bb.1:
+    liveins: $x0, $x1
+    ; CHECK-LABEL: name: shl_of_vscale
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %shl:_(s64) = G_VSCALE i64 44
+    ; CHECK-NEXT: $x0 = COPY %shl(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %rhs:_(s64) = G_CONSTANT i64 2
+    %lhs:_(s64) = G_VSCALE i64 11
+    %shl:_(s64) = nsw G_SHL %lhs(s64), %rhs(s64)
+    $x0 = COPY %shl(s64)
+    RET_ReallyLR implicit $x0
+...
+---
+name:            shl_of_vscale_wrong_flag
+body:             |
+  bb.1:
+    liveins: $x0, $x1
+    ; CHECK-LABEL: name: shl_of_vscale_wrong_flag
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %rhs:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: %lhs:_(s64) = G_VSCALE i64 11
+    ; CHECK-NEXT: %shl:_(s64) = nuw G_SHL %lhs, %rhs(s64)
+    ; CHECK-NEXT: $x0 = COPY %shl(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %rhs:_(s64) = G_CONSTANT i64 2
+    %lhs:_(s64) = G_VSCALE i64 11
+    %shl:_(s64) = nuw G_SHL %lhs(s64), %rhs(s64)
+    $x0 = COPY %shl(s64)
+    RET_ReallyLR implicit $x0

@tschuett tschuett requested review from aemerson and arsenm June 1, 2024 06:21
Comment on lines +413 to +414
if (!MRI.hasOneNonDBGUse(LHSVScale->getReg(0)) ||
!MRI.hasOneNonDBGUse(RHSVScale->getReg(0)))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does the HasOneUse predicate added in d0dc29c need additional wiring to be used in combines?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

From a brief reading, it seems to be limited to instruction emitters and cannot be used in the combiner.

Thorsten Schütt added 2 commits June 1, 2024 13:00
We need them for scalable address calculation and
legal scalable addressing modes.
Copy link
Contributor

@arsenm arsenm left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would be helped by a hasOneUse operator

@tschuett tschuett merged commit 9b692e5 into llvm:main Jun 6, 2024
7 checks passed
@tschuett tschuett deleted the gisel-vscale branch June 6, 2024 10:22
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants