@@ -1715,18 +1715,22 @@ void VPWidenIntrinsicRecipe::execute(VPTransformState &State) {
1715
1715
State.set (this , V);
1716
1716
}
1717
1717
1718
- InstructionCost VPWidenIntrinsicRecipe::computeCost (ElementCount VF,
1719
- VPCostContext &Ctx) const {
1718
+ // / Compute the cost for the intrinsic \p ID with \p Operands, produced by \p R.
1719
+ static InstructionCost getCostForIntrinsics (Intrinsic::ID ID,
1720
+ ArrayRef<const VPValue *> Operands,
1721
+ const VPRecipeWithIRFlags &R,
1722
+ ElementCount VF,
1723
+ VPCostContext &Ctx) {
1720
1724
// Some backends analyze intrinsic arguments to determine cost. Use the
1721
1725
// underlying value for the operand if it has one. Otherwise try to use the
1722
1726
// operand of the underlying call instruction, if there is one. Otherwise
1723
1727
// clear Arguments.
1724
1728
// TODO: Rework TTI interface to be independent of concrete IR values.
1725
1729
SmallVector<const Value *> Arguments;
1726
- for (const auto &[Idx, Op] : enumerate(operands () )) {
1730
+ for (const auto &[Idx, Op] : enumerate(Operands )) {
1727
1731
auto *V = Op->getUnderlyingValue ();
1728
1732
if (!V) {
1729
- if (auto *UI = dyn_cast_or_null<CallBase>(getUnderlyingValue ())) {
1733
+ if (auto *UI = dyn_cast_or_null<CallBase>(R. getUnderlyingValue ())) {
1730
1734
Arguments.push_back (UI->getArgOperand (Idx));
1731
1735
continue ;
1732
1736
}
@@ -1736,21 +1740,31 @@ InstructionCost VPWidenIntrinsicRecipe::computeCost(ElementCount VF,
1736
1740
Arguments.push_back (V);
1737
1741
}
1738
1742
1739
- Type *RetTy = toVectorizedTy (Ctx.Types .inferScalarType (this ), VF);
1743
+ Type *ScalarRetTy = Ctx.Types .inferScalarType (&R);
1744
+ Type *RetTy = VF.isVector () ? toVectorizedTy (ScalarRetTy, VF) : ScalarRetTy;
1740
1745
SmallVector<Type *> ParamTys;
1741
- for (unsigned I = 0 ; I != getNumOperands (); ++I)
1742
- ParamTys.push_back (
1743
- toVectorTy (Ctx.Types .inferScalarType (getOperand (I)), VF));
1746
+ for (const VPValue *Op : Operands) {
1747
+ ParamTys.push_back (VF.isVector ()
1748
+ ? toVectorTy (Ctx.Types .inferScalarType (Op), VF)
1749
+ : Ctx.Types .inferScalarType (Op));
1750
+ }
1744
1751
1745
1752
// TODO: Rework TTI interface to avoid reliance on underlying IntrinsicInst.
1746
- FastMathFlags FMF = hasFastMathFlags () ? getFastMathFlags () : FastMathFlags ();
1753
+ FastMathFlags FMF =
1754
+ R.hasFastMathFlags () ? R.getFastMathFlags () : FastMathFlags ();
1747
1755
IntrinsicCostAttributes CostAttrs (
1748
- VectorIntrinsicID , RetTy, Arguments, ParamTys, FMF,
1749
- dyn_cast_or_null<IntrinsicInst>(getUnderlyingValue ()),
1756
+ ID , RetTy, Arguments, ParamTys, FMF,
1757
+ dyn_cast_or_null<IntrinsicInst>(R. getUnderlyingValue ()),
1750
1758
InstructionCost::getInvalid (), &Ctx.TLI );
1751
1759
return Ctx.TTI .getIntrinsicInstrCost (CostAttrs, Ctx.CostKind );
1752
1760
}
1753
1761
1762
+ InstructionCost VPWidenIntrinsicRecipe::computeCost (ElementCount VF,
1763
+ VPCostContext &Ctx) const {
1764
+ SmallVector<const VPValue *> ArgOps (operands ());
1765
+ return getCostForIntrinsics (VectorIntrinsicID, ArgOps, *this , VF, Ctx);
1766
+ }
1767
+
1754
1768
StringRef VPWidenIntrinsicRecipe::getIntrinsicName () const {
1755
1769
return Intrinsic::getBaseName (VectorIntrinsicID);
1756
1770
}
0 commit comments