Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 23 additions & 9 deletions llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3965,7 +3965,7 @@ void LoopVectorizationPlanner::emitInvalidCostRemarks(
[](const auto *R) { return Instruction::Select; })
.Case<VPWidenStoreRecipe>(
[](const auto *R) { return Instruction::Store; })
.Case<VPWidenLoadRecipe>(
.Case<VPWidenLoadRecipe, VPWidenStridedLoadRecipe>(
[](const auto *R) { return Instruction::Load; })
.Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
[](const auto *R) { return Instruction::Call; })
Expand Down Expand Up @@ -4065,6 +4065,7 @@ static bool willGenerateVectors(VPlan &Plan, ElementCount VF,
case VPDef::VPReductionPHISC:
case VPDef::VPInterleaveEVLSC:
case VPDef::VPInterleaveSC:
case VPDef::VPWidenStridedLoadSC:
case VPDef::VPWidenLoadEVLSC:
case VPDef::VPWidenLoadSC:
case VPDef::VPWidenStoreEVLSC:
Expand Down Expand Up @@ -6925,6 +6926,12 @@ static bool planContainsAdditionalSimplifications(VPlan &Plan,
RepR->getUnderlyingInstr(), VF))
return true;
}

// The strided load is transformed from a gather through VPlanTransform,
// and its cost will be lower than the original gather.
if (isa<VPWidenStridedLoadRecipe>(&R))
return true;

if (Instruction *UI = GetInstructionForCost(&R)) {
// If we adjusted the predicate of the recipe, the cost in the legacy
// cost model may be different.
Expand Down Expand Up @@ -7551,7 +7558,10 @@ VPRecipeBuilder::tryToWidenMemory(Instruction *I, ArrayRef<VPValue *> Operands,
new VPVectorEndPointerRecipe(Ptr, &Plan.getVF(), getLoadStoreType(I),
/*Stride*/ -1, Flags, I->getDebugLoc());
} else {
VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I),
const DataLayout &DL = I->getDataLayout();
auto *StrideTy = DL.getIndexType(Ptr->getUnderlyingValue()->getType());
VPValue *StrideOne = Plan.getOrAddLiveIn(ConstantInt::get(StrideTy, 1));
VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I), StrideOne,
GEP ? GEP->getNoWrapFlags()
: GEPNoWrapFlags::none(),
I->getDebugLoc());
Expand Down Expand Up @@ -8646,19 +8656,14 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
*Plan))
return nullptr;

VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind);
// Transform recipes to abstract recipes if it is legal and beneficial and
// clamp the range for better cost estimation.
// TODO: Enable following transform when the EVL-version of extended-reduction
// and mulacc-reduction are implemented.
if (!CM.foldTailWithEVL()) {
VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind);
if (!CM.foldTailWithEVL())
VPlanTransforms::runPass(VPlanTransforms::convertToAbstractRecipes, *Plan,
CostCtx, Range);
}

for (ElementCount VF : Range)
Plan->addVF(VF);
Plan->setName("Initial VPlan");

// Interleave memory: for each Interleave Group we marked earlier as relevant
// for this VPlan, replace the Recipes widening its memory instructions with a
Expand All @@ -8671,6 +8676,15 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
VPlanTransforms::runPass(VPlanTransforms::replaceSymbolicStrides, *Plan, PSE,
Legal->getLAI()->getSymbolicStrides());

// Convert memory recipes to strided access recipes if the strided access is
// legal and profitable.
VPlanTransforms::runPass(VPlanTransforms::convertToStridedAccesses, *Plan,
CostCtx, Range);

for (ElementCount VF : Range)
Plan->addVF(VF);
Plan->setName("Initial VPlan");

auto BlockNeedsPredication = [this](BasicBlock *BB) {
return Legal->blockNeedsPredication(BB);
};
Expand Down
95 changes: 83 additions & 12 deletions llvm/lib/Transforms/Vectorize/VPlan.h
Original file line number Diff line number Diff line change
Expand Up @@ -563,6 +563,7 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPValue {
case VPRecipeBase::VPInterleaveEVLSC:
case VPRecipeBase::VPInterleaveSC:
case VPRecipeBase::VPIRInstructionSC:
case VPRecipeBase::VPWidenStridedLoadSC:
case VPRecipeBase::VPWidenLoadEVLSC:
case VPRecipeBase::VPWidenLoadSC:
case VPRecipeBase::VPWidenStoreEVLSC:
Expand Down Expand Up @@ -1764,10 +1765,6 @@ struct LLVM_ABI_FOR_TEST VPWidenSelectRecipe : public VPRecipeWithIRFlags,

/// A recipe for handling GEP instructions.
class LLVM_ABI_FOR_TEST VPWidenGEPRecipe : public VPRecipeWithIRFlags {
bool isPointerLoopInvariant() const {
return getOperand(0)->isDefinedOutsideLoopRegions();
}

bool isIndexLoopInvariant(unsigned I) const {
return getOperand(I + 1)->isDefinedOutsideLoopRegions();
}
Expand Down Expand Up @@ -1796,6 +1793,30 @@ class LLVM_ABI_FOR_TEST VPWidenGEPRecipe : public VPRecipeWithIRFlags {

VP_CLASSOF_IMPL(VPDef::VPWidenGEPSC)

bool isPointerLoopInvariant() const {
return getOperand(0)->isDefinedOutsideLoopRegions();
}

std::optional<unsigned> getUniqueVariantIndex() const {
std::optional<unsigned> VarIdx;
for (unsigned I = 0, E = getNumOperands() - 1; I < E; ++I) {
if (isIndexLoopInvariant(I))
continue;

if (VarIdx)
return std::nullopt;
VarIdx = I;
}
return VarIdx;
}

Type *getIndexedType(unsigned I) const {
auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
Type *SourceElementType = GEP->getSourceElementType();
SmallVector<Value *, 4> Ops(GEP->idx_begin(), GEP->idx_begin() + I);
return GetElementPtrInst::getIndexedType(SourceElementType, Ops);
}

/// Generate the gep nodes.
void execute(VPTransformState &State) override;

Expand Down Expand Up @@ -1884,20 +1905,23 @@ class VPVectorEndPointerRecipe : public VPRecipeWithIRFlags,
#endif
};

/// A recipe to compute the pointers for widened memory accesses of IndexTy.
/// A recipe to compute the pointers for widened memory accesses of IndexedTy,
/// with the Stride expressed in units of IndexedTy.
class VPVectorPointerRecipe : public VPRecipeWithIRFlags,
public VPUnrollPartAccessor<1> {
public VPUnrollPartAccessor<2> {
Type *IndexedTy;

public:
VPVectorPointerRecipe(VPValue *Ptr, Type *IndexedTy, GEPNoWrapFlags GEPFlags,
DebugLoc DL)
: VPRecipeWithIRFlags(VPDef::VPVectorPointerSC, ArrayRef<VPValue *>(Ptr),
GEPFlags, DL),
VPVectorPointerRecipe(VPValue *Ptr, Type *IndexedTy, VPValue *Stride,
GEPNoWrapFlags GEPFlags, DebugLoc DL)
: VPRecipeWithIRFlags(VPDef::VPVectorPointerSC,
ArrayRef<VPValue *>({Ptr, Stride}), GEPFlags, DL),
IndexedTy(IndexedTy) {}

VP_CLASSOF_IMPL(VPDef::VPVectorPointerSC)

VPValue *getStride() const { return getOperand(1); }

void execute(VPTransformState &State) override;

bool onlyFirstLaneUsed(const VPValue *Op) const override {
Expand All @@ -1915,7 +1939,7 @@ class VPVectorPointerRecipe : public VPRecipeWithIRFlags,
}

VPVectorPointerRecipe *clone() override {
return new VPVectorPointerRecipe(getOperand(0), IndexedTy,
return new VPVectorPointerRecipe(getOperand(0), IndexedTy, getStride(),
getGEPNoWrapFlags(), getDebugLoc());
}

Expand Down Expand Up @@ -3156,7 +3180,8 @@ class LLVM_ABI_FOR_TEST VPWidenMemoryRecipe : public VPRecipeBase,
return R->getVPDefID() == VPRecipeBase::VPWidenLoadSC ||
R->getVPDefID() == VPRecipeBase::VPWidenStoreSC ||
R->getVPDefID() == VPRecipeBase::VPWidenLoadEVLSC ||
R->getVPDefID() == VPRecipeBase::VPWidenStoreEVLSC;
R->getVPDefID() == VPRecipeBase::VPWidenStoreEVLSC ||
R->getVPDefID() == VPRecipeBase::VPWidenStridedLoadSC;
}

static inline bool classof(const VPUser *U) {
Expand Down Expand Up @@ -3277,6 +3302,52 @@ struct VPWidenLoadEVLRecipe final : public VPWidenMemoryRecipe, public VPValue {
}
};

/// A recipe for strided load operations, using the base address, stride, and an
/// optional mask. This recipe will generate an vp.strided.load intrinsic call
/// to represent memory accesses with a fixed stride.
struct VPWidenStridedLoadRecipe final : public VPWidenMemoryRecipe,
public VPValue {
VPWidenStridedLoadRecipe(LoadInst &Load, VPValue *Addr, VPValue *Stride,
VPValue *VF, VPValue *Mask,
const VPIRMetadata &Metadata, DebugLoc DL)
: VPWidenMemoryRecipe(
VPDef::VPWidenStridedLoadSC, Load, {Addr, Stride, VF},
/*Consecutive=*/false, /*Reverse=*/false, Metadata, DL),
VPValue(this, &Load) {
setMask(Mask);
}

VPWidenStridedLoadRecipe *clone() override {
return new VPWidenStridedLoadRecipe(cast<LoadInst>(Ingredient), getAddr(),
getStride(), getVF(), getMask(), *this,
getDebugLoc());
}

VP_CLASSOF_IMPL(VPDef::VPWidenStridedLoadSC);

/// Return the stride operand.
VPValue *getStride() const { return getOperand(1); }

/// Return the VF operand.
VPValue *getVF() const { return getOperand(2); }

/// Generate a strided load.
void execute(VPTransformState &State) override;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif

/// Returns true if the recipe only uses the first lane of operand \p Op.
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
return Op == getAddr() || Op == getStride() || Op == getVF();
}
};

/// A recipe for widening store operations, using the stored value, the address
/// to store to and an optional mask.
struct LLVM_ABI_FOR_TEST VPWidenStoreRecipe final : public VPWidenMemoryRecipe {
Expand Down
6 changes: 4 additions & 2 deletions llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -187,8 +187,10 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenCallRecipe *R) {
}

Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenMemoryRecipe *R) {
assert((isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(R)) &&
"Store recipes should not define any values");
assert(
(isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe, VPWidenStridedLoadRecipe>(
R)) &&
"Store recipes should not define any values");
return cast<LoadInst>(&R->getIngredient())->getType();
}

Expand Down
72 changes: 65 additions & 7 deletions llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ bool VPRecipeBase::mayWriteToMemory() const {
case VPWidenCastSC:
case VPWidenGEPSC:
case VPWidenIntOrFpInductionSC:
case VPWidenStridedLoadSC:
case VPWidenLoadEVLSC:
case VPWidenLoadSC:
case VPWidenPHISC:
Expand All @@ -105,6 +106,7 @@ bool VPRecipeBase::mayReadFromMemory() const {
return cast<VPExpressionRecipe>(this)->mayReadOrWriteMemory();
case VPInstructionSC:
return cast<VPInstruction>(this)->opcodeMayReadOrWriteFromMemory();
case VPWidenStridedLoadSC:
case VPWidenLoadEVLSC:
case VPWidenLoadSC:
return true;
Expand Down Expand Up @@ -188,6 +190,7 @@ bool VPRecipeBase::mayHaveSideEffects() const {
case VPInterleaveEVLSC:
case VPInterleaveSC:
return mayWriteToMemory();
case VPWidenStridedLoadSC:
case VPWidenLoadEVLSC:
case VPWidenLoadSC:
case VPWidenStoreEVLSC:
Expand Down Expand Up @@ -2566,13 +2569,22 @@ void VPVectorEndPointerRecipe::print(raw_ostream &O, const Twine &Indent,
void VPVectorPointerRecipe::execute(VPTransformState &State) {
auto &Builder = State.Builder;
unsigned CurrentPart = getUnrollPart(*this);
Type *IndexTy = getGEPIndexTy(State.VF.isScalable(), /*IsReverse*/ false,
/*IsUnitStride*/ true, CurrentPart, Builder);
Value *Stride = State.get(getStride(), /*IsScalar*/ true);

auto *StrideC = dyn_cast<ConstantInt>(Stride);
bool IsStrideOne = StrideC && StrideC->isOne();
bool IsUnitStride = IsStrideOne || (StrideC && StrideC->isMinusOne());
Type *IndexTy =
getGEPIndexTy(State.VF.isScalable(),
/*IsReverse*/ false, IsUnitStride, CurrentPart, Builder);
Value *Ptr = State.get(getOperand(0), VPLane(0));

Stride = Builder.CreateSExtOrTrunc(Stride, IndexTy);
Value *Increment = createStepForVF(Builder, IndexTy, State.VF, CurrentPart);
Value *Index = IsStrideOne ? Increment : Builder.CreateMul(Increment, Stride);

Value *ResultPtr =
Builder.CreateGEP(IndexedTy, Ptr, Increment, "", getGEPNoWrapFlags());
Builder.CreateGEP(IndexedTy, Ptr, Index, "", getGEPNoWrapFlags());

State.set(this, ResultPtr, /*IsScalar*/ true);
}
Expand Down Expand Up @@ -3281,9 +3293,11 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
const Align Alignment = getLoadStoreAlignment(&Ingredient);
unsigned AS = cast<PointerType>(Ctx.Types.inferScalarType(getAddr()))
->getAddressSpace();
unsigned Opcode = isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(this)
? Instruction::Load
: Instruction::Store;
unsigned Opcode =
isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe, VPWidenStridedLoadRecipe>(
this)
? Instruction::Load
: Instruction::Store;

if (!Consecutive) {
// TODO: Using the original IR may not be accurate.
Expand All @@ -3293,8 +3307,11 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
"Inconsecutive memory access should not have the order.");

const Value *Ptr = getLoadStorePointerOperand(&Ingredient);
Type *PtrTy = Ptr->getType();
if (isa<VPWidenStridedLoadRecipe>(this))
return Ctx.TTI.getStridedMemoryOpCost(
Opcode, Ty, Ptr, IsMasked, Alignment, Ctx.CostKind, &Ingredient);

Type *PtrTy = Ptr->getType();
// If the address value is uniform across all lanes, then the address can be
// calculated with scalar type and broadcast.
if (!vputils::isSingleScalar(getAddr()))
Expand Down Expand Up @@ -3449,6 +3466,47 @@ void VPWidenLoadEVLRecipe::print(raw_ostream &O, const Twine &Indent,
}
#endif

void VPWidenStridedLoadRecipe::execute(VPTransformState &State) {
Type *ScalarDataTy = getLoadStoreType(&Ingredient);
auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
const Align Alignment = getLoadStoreAlignment(&Ingredient);

auto &Builder = State.Builder;
Value *Addr = State.get(getAddr(), /*IsScalar*/ true);
Value *StrideInBytes = State.get(getStride(), /*IsScalar*/ true);
Value *Mask = nullptr;
if (VPValue *VPMask = getMask())
Mask = State.get(VPMask);
else
Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
Value *RunTimeVF = Builder.CreateZExtOrTrunc(State.get(getVF(), VPLane(0)),
Builder.getInt32Ty());

auto *PtrTy = Addr->getType();
auto *StrideTy = StrideInBytes->getType();
CallInst *NewLI = Builder.CreateIntrinsic(
Intrinsic::experimental_vp_strided_load, {DataTy, PtrTy, StrideTy},
{Addr, StrideInBytes, Mask, RunTimeVF}, nullptr, "wide.strided.load");
NewLI->addParamAttr(
0, Attribute::getWithAlignment(NewLI->getContext(), Alignment));
applyMetadata(*NewLI);
State.set(this, NewLI);
}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void VPWidenStridedLoadRecipe::print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const {
O << Indent << "WIDEN ";
printAsOperand(O, SlotTracker);
O << " = load ";
getAddr()->printAsOperand(O, SlotTracker);
O << ", stride = ";
getStride()->printAsOperand(O, SlotTracker);
O << ", runtimeVF = ";
getVF()->printAsOperand(O, SlotTracker);
}
#endif

void VPWidenStoreRecipe::execute(VPTransformState &State) {
VPValue *StoredVPValue = getStoredValue();
bool CreateScatter = !isConsecutive();
Expand Down
Loading