@@ -886,6 +886,7 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPValue {
886
886
case VPRecipeBase::VPWidenCanonicalIVSC:
887
887
case VPRecipeBase::VPWidenCastSC:
888
888
case VPRecipeBase::VPWidenGEPSC:
889
+ case VPRecipeBase::VPWidenIntrinsicSC:
889
890
case VPRecipeBase::VPWidenSC:
890
891
case VPRecipeBase::VPWidenEVLSC:
891
892
case VPRecipeBase::VPWidenSelectSC:
@@ -1613,25 +1614,85 @@ class VPScalarCastRecipe : public VPSingleDefRecipe {
1613
1614
}
1614
1615
};
1615
1616
1616
- // / A recipe for widening Call instructions.
1617
- class VPWidenCallRecipe : public VPRecipeWithIRFlags {
1618
- // / ID of the vector intrinsic to call when widening the call. If set the
1619
- // / Intrinsic::not_intrinsic, a library call will be used instead.
1617
+ // / A recipe for widening vector intrinsics.
1618
+ class VPWidenIntrinsicRecipe : public VPRecipeWithIRFlags {
1619
+ // / ID of the vector intrinsic to widen.
1620
1620
Intrinsic::ID VectorIntrinsicID;
1621
- // / If this recipe represents a library call, Variant stores a pointer to
1622
- // / the chosen function. There is a 1:1 mapping between a given VF and the
1623
- // / chosen vectorized variant, so there will be a different vplan for each
1624
- // / VF with a valid variant.
1621
+
1622
+ // / Scalar return type of the intrinsic.
1623
+ Type *ResultTy;
1624
+
1625
+ // / True if the intrinsic may read from memory.
1626
+ bool MayReadFromMemory;
1627
+
1628
+ // / True if the intrinsic may read write to memory.
1629
+ bool MayWriteToMemory;
1630
+
1631
+ // / True if the intrinsic may have side-effects.
1632
+ bool MayHaveSideEffects;
1633
+
1634
+ public:
1635
+ VPWidenIntrinsicRecipe (CallInst &CI, Intrinsic::ID VectorIntrinsicID,
1636
+ ArrayRef<VPValue *> CallArguments, Type *Ty,
1637
+ DebugLoc DL = {})
1638
+ : VPRecipeWithIRFlags(VPDef::VPWidenIntrinsicSC, CallArguments, CI),
1639
+ VectorIntrinsicID (VectorIntrinsicID), ResultTy(Ty),
1640
+ MayReadFromMemory(CI.mayReadFromMemory()),
1641
+ MayWriteToMemory(CI.mayWriteToMemory()),
1642
+ MayHaveSideEffects(CI.mayHaveSideEffects()) {}
1643
+
1644
+ ~VPWidenIntrinsicRecipe () override = default ;
1645
+
1646
+ VPWidenIntrinsicRecipe *clone () override {
1647
+ return new VPWidenIntrinsicRecipe (*cast<CallInst>(getUnderlyingValue ()),
1648
+ VectorIntrinsicID, {op_begin (), op_end ()},
1649
+ ResultTy, getDebugLoc ());
1650
+ }
1651
+
1652
+ VP_CLASSOF_IMPL (VPDef::VPWidenIntrinsicSC)
1653
+
1654
+ // / Produce a widened version of the vector intrinsic.
1655
+ void execute(VPTransformState &State) override ;
1656
+
1657
+ // / Return the cost of this vector intrinsic.
1658
+ InstructionCost computeCost (ElementCount VF,
1659
+ VPCostContext &Ctx) const override ;
1660
+
1661
+ // / Return the scalar return type of the intrinsic.
1662
+ Type *getResultType () const { return ResultTy; }
1663
+
1664
+ // / Return to name of the intrinsic as string.
1665
+ StringRef getIntrinsicName () const ;
1666
+
1667
+ // / Returns true if the intrinsic may read from memory.
1668
+ bool mayReadFromMemory () const { return MayReadFromMemory; }
1669
+
1670
+ // / Returns true if the intrinsic may write to memory.
1671
+ bool mayWriteToMemory () const { return MayWriteToMemory; }
1672
+
1673
+ // / Returns true if the intrinsic may have side-effects.
1674
+ bool mayHaveSideEffects () const { return MayHaveSideEffects; }
1675
+
1676
+ #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1677
+ // / Print the recipe.
1678
+ void print (raw_ostream &O, const Twine &Indent,
1679
+ VPSlotTracker &SlotTracker) const override ;
1680
+ #endif
1681
+ };
1682
+
1683
+ // / A recipe for widening Call instructions using library calls.
1684
+ class VPWidenCallRecipe : public VPRecipeWithIRFlags {
1685
+ // / Variant stores a pointer to the chosen function. There is a 1:1 mapping
1686
+ // / between a given VF and the chosen vectorized variant, so there will be a
1687
+ // / different VPlan for each VF with a valid variant.
1625
1688
Function *Variant;
1626
1689
1627
1690
public:
1628
- template <typename IterT>
1629
- VPWidenCallRecipe (Value *UV, iterator_range<IterT> CallArguments,
1630
- Intrinsic::ID VectorIntrinsicID, DebugLoc DL = {},
1631
- Function *Variant = nullptr )
1691
+ VPWidenCallRecipe (Value *UV, Function *Variant,
1692
+ ArrayRef<VPValue *> CallArguments, DebugLoc DL = {})
1632
1693
: VPRecipeWithIRFlags(VPDef::VPWidenCallSC, CallArguments,
1633
1694
*cast<Instruction>(UV)),
1634
- VectorIntrinsicID (VectorIntrinsicID), Variant(Variant) {
1695
+ Variant (Variant) {
1635
1696
assert (
1636
1697
isa<Function>(getOperand (getNumOperands () - 1 )->getLiveInIRValue ()) &&
1637
1698
" last operand must be the called function" );
@@ -1640,8 +1701,8 @@ class VPWidenCallRecipe : public VPRecipeWithIRFlags {
1640
1701
~VPWidenCallRecipe () override = default ;
1641
1702
1642
1703
VPWidenCallRecipe *clone () override {
1643
- return new VPWidenCallRecipe (getUnderlyingValue (), operands () ,
1644
- VectorIntrinsicID, getDebugLoc (), Variant );
1704
+ return new VPWidenCallRecipe (getUnderlyingValue (), Variant ,
1705
+ { op_begin (), op_end ()}, getDebugLoc () );
1645
1706
}
1646
1707
1647
1708
VP_CLASSOF_IMPL (VPDef::VPWidenCallSC)
0 commit comments