Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 16 additions & 6 deletions llvm/include/llvm/Analysis/LoopAccessAnalysis.h
Original file line number Diff line number Diff line change
Expand Up @@ -183,10 +183,12 @@ class MemoryDepChecker {
MemoryDepChecker(PredicatedScalarEvolution &PSE, AssumptionCache *AC,
DominatorTree *DT, const Loop *L,
const DenseMap<Value *, const SCEV *> &SymbolicStrides,
unsigned MaxTargetVectorWidthInBits)
unsigned MaxTargetVectorWidthInBits,
std::optional<ScalarEvolution::LoopGuards> &LoopGuards)
: PSE(PSE), AC(AC), DT(DT), InnermostLoop(L),
SymbolicStrides(SymbolicStrides),
MaxTargetVectorWidthInBits(MaxTargetVectorWidthInBits) {}
MaxTargetVectorWidthInBits(MaxTargetVectorWidthInBits),
LoopGuards(LoopGuards) {}

/// Register the location (instructions are given increasing numbers)
/// of a write access.
Expand Down Expand Up @@ -373,7 +375,7 @@ class MemoryDepChecker {
PointerBounds;

/// Cache for the loop guards of InnermostLoop.
std::optional<ScalarEvolution::LoopGuards> LoopGuards;
std::optional<ScalarEvolution::LoopGuards> &LoopGuards;

/// Check whether there is a plausible dependence between the two
/// accesses.
Expand Down Expand Up @@ -531,8 +533,9 @@ class RuntimePointerChecking {
AliasSetId(AliasSetId), Expr(Expr), NeedsFreeze(NeedsFreeze) {}
};

RuntimePointerChecking(MemoryDepChecker &DC, ScalarEvolution *SE)
: DC(DC), SE(SE) {}
RuntimePointerChecking(MemoryDepChecker &DC, ScalarEvolution *SE,
std::optional<ScalarEvolution::LoopGuards> &LoopGuards)
: DC(DC), SE(SE), LoopGuards(LoopGuards) {}

/// Reset the state of the pointer runtime information.
void reset() {
Expand Down Expand Up @@ -646,6 +649,9 @@ class RuntimePointerChecking {
/// Holds a pointer to the ScalarEvolution analysis.
ScalarEvolution *SE;

/// Cache for the loop guards of the loop.
std::optional<ScalarEvolution::LoopGuards> &LoopGuards;

/// Set of run-time checks required to establish independence of
/// otherwise may-aliasing pointers in the loop.
SmallVector<RuntimePointerCheck, 4> Checks;
Expand Down Expand Up @@ -821,6 +827,9 @@ class LoopAccessInfo {

Loop *TheLoop;

/// Cache for the loop guards of TheLoop.
std::optional<ScalarEvolution::LoopGuards> LoopGuards;

/// Determines whether we should generate partial runtime checks when not all
/// memory accesses could be analyzed.
bool AllowPartial;
Expand Down Expand Up @@ -938,7 +947,8 @@ LLVM_ABI std::pair<const SCEV *, const SCEV *> getStartAndEndForAccess(
const SCEV *MaxBTC, ScalarEvolution *SE,
DenseMap<std::pair<const SCEV *, Type *>,
std::pair<const SCEV *, const SCEV *>> *PointerBounds,
DominatorTree *DT, AssumptionCache *AC);
DominatorTree *DT, AssumptionCache *AC,
std::optional<ScalarEvolution::LoopGuards> &LoopGuards);

class LoopAccessInfoManager {
/// The cache.
Expand Down
28 changes: 11 additions & 17 deletions llvm/lib/Analysis/Loads.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,6 @@

using namespace llvm;

static cl::opt<bool>
UseSymbolicMaxBTCForDerefInLoop("use-symbolic-maxbtc-deref-loop",
cl::init(false));

static bool isAligned(const Value *Base, Align Alignment,
const DataLayout &DL) {
return Base->getPointerAlignment(DL) >= Alignment;
Expand Down Expand Up @@ -335,18 +331,10 @@ bool llvm::isDereferenceableAndAlignedInLoop(
: SE.getBackedgeTakenCount(L);
if (isa<SCEVCouldNotCompute>(MaxBECount))
return false;

if (isa<SCEVCouldNotCompute>(BECount) && !UseSymbolicMaxBTCForDerefInLoop) {
// TODO: Support symbolic max backedge taken counts for loops without
// computable backedge taken counts.
MaxBECount =
Predicates
? SE.getPredicatedConstantMaxBackedgeTakenCount(L, *Predicates)
: SE.getConstantMaxBackedgeTakenCount(L);
}

const auto &[AccessStart, AccessEnd] = getStartAndEndForAccess(
L, PtrScev, LI->getType(), BECount, MaxBECount, &SE, nullptr, &DT, AC);
std::optional<ScalarEvolution::LoopGuards> LoopGuards;
const auto &[AccessStart, AccessEnd] =
getStartAndEndForAccess(L, PtrScev, LI->getType(), BECount, MaxBECount,
&SE, nullptr, &DT, AC, LoopGuards);
if (isa<SCEVCouldNotCompute>(AccessStart) ||
isa<SCEVCouldNotCompute>(AccessEnd))
return false;
Expand All @@ -355,7 +343,13 @@ bool llvm::isDereferenceableAndAlignedInLoop(
const SCEV *PtrDiff = SE.getMinusSCEV(AccessEnd, AccessStart);
if (isa<SCEVCouldNotCompute>(PtrDiff))
return false;
APInt MaxPtrDiff = SE.getUnsignedRangeMax(PtrDiff);

if (!LoopGuards)
LoopGuards.emplace(
ScalarEvolution::LoopGuards::collect(AddRec->getLoop(), SE));

APInt MaxPtrDiff =
SE.getUnsignedRangeMax(SE.applyLoopGuards(PtrDiff, *LoopGuards));

Value *Base = nullptr;
APInt AccessSize;
Expand Down
66 changes: 39 additions & 27 deletions llvm/lib/Analysis/LoopAccessAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -193,30 +193,28 @@ RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup(
/// Returns \p A + \p B, if it is guaranteed not to unsigned wrap. Otherwise
/// return nullptr. \p A and \p B must have the same type.
static const SCEV *addSCEVNoOverflow(const SCEV *A, const SCEV *B,
ScalarEvolution &SE,
const Instruction *CtxI) {
if (!SE.willNotOverflow(Instruction::Add, /*IsSigned=*/false, A, B, CtxI))
ScalarEvolution &SE) {
if (!SE.willNotOverflow(Instruction::Add, /*IsSigned=*/false, A, B))
return nullptr;
return SE.getAddExpr(A, B);
}

/// Returns \p A * \p B, if it is guaranteed not to unsigned wrap. Otherwise
/// return nullptr. \p A and \p B must have the same type.
static const SCEV *mulSCEVOverflow(const SCEV *A, const SCEV *B,
ScalarEvolution &SE,
const Instruction *CtxI) {
if (!SE.willNotOverflow(Instruction::Mul, /*IsSigned=*/false, A, B, CtxI))
ScalarEvolution &SE) {
if (!SE.willNotOverflow(Instruction::Mul, /*IsSigned=*/false, A, B))
return nullptr;
return SE.getMulExpr(A, B);
}

/// Return true, if evaluating \p AR at \p MaxBTC cannot wrap, because \p AR at
/// \p MaxBTC is guaranteed inbounds of the accessed object.
static bool
evaluatePtrAddRecAtMaxBTCWillNotWrap(const SCEVAddRecExpr *AR,
const SCEV *MaxBTC, const SCEV *EltSize,
ScalarEvolution &SE, const DataLayout &DL,
DominatorTree *DT, AssumptionCache *AC) {
static bool evaluatePtrAddRecAtMaxBTCWillNotWrap(
const SCEVAddRecExpr *AR, const SCEV *MaxBTC, const SCEV *EltSize,
ScalarEvolution &SE, const DataLayout &DL, DominatorTree *DT,
AssumptionCache *AC,
std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
auto *PointerBase = SE.getPointerBase(AR->getStart());
auto *StartPtr = dyn_cast<SCEVUnknown>(PointerBase);
if (!StartPtr)
Expand All @@ -234,12 +232,11 @@ evaluatePtrAddRecAtMaxBTCWillNotWrap(const SCEVAddRecExpr *AR,
Type *WiderTy = SE.getWiderType(MaxBTC->getType(), Step->getType());
const SCEV *DerefBytesSCEV = SE.getConstant(WiderTy, DerefBytes);

// Context which dominates the entire loop.
auto *CtxI = L->getLoopPredecessor()->getTerminator();
// Check if we have a suitable dereferencable assumption we can use.
if (!StartPtrV->canBeFreed()) {
RetainedKnowledge DerefRK = getKnowledgeValidInContext(
StartPtrV, {Attribute::Dereferenceable}, *AC, CtxI, DT);
StartPtrV, {Attribute::Dereferenceable}, *AC,
L->getLoopPredecessor()->getTerminator(), DT);
if (DerefRK) {
DerefBytesSCEV = SE.getUMaxExpr(
DerefBytesSCEV, SE.getConstant(WiderTy, DerefRK.ArgValue));
Expand All @@ -263,23 +260,36 @@ evaluatePtrAddRecAtMaxBTCWillNotWrap(const SCEVAddRecExpr *AR,
SE.getMinusSCEV(AR->getStart(), StartPtr), WiderTy);

const SCEV *OffsetAtLastIter =
mulSCEVOverflow(MaxBTC, SE.getAbsExpr(Step, /*IsNSW=*/false), SE, CtxI);
if (!OffsetAtLastIter)
return false;
mulSCEVOverflow(MaxBTC, SE.getAbsExpr(Step, /*IsNSW=*/false), SE);
if (!OffsetAtLastIter) {
// Re-try with constant max backedge-taken count if using the symbolic one
// failed.
MaxBTC = SE.getNoopOrZeroExtend(
SE.getConstantMaxBackedgeTakenCount(AR->getLoop()), WiderTy);
OffsetAtLastIter =
mulSCEVOverflow(MaxBTC, SE.getAbsExpr(Step, /*IsNSW=*/false), SE);
Comment on lines +265 to +270
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are some cases where we cannot apply loop guards effectively to MaxBTC (if it is an AddRec), but constant BTC computation uses them to get a tighter upper bound.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why not use the CtxI above as done previously? Would that along with the mulSCEVOverflow change allow to get whats needed?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unfortunately that's not enough in some cases, for example https://github.com/llvm/llvm-project/blob/main/llvm/test/Transforms/LoopVectorize/single_early_exit.ll#L333 where the trip count of the inner loop depends on an induction from the outer loop

if (!OffsetAtLastIter)
return false;
}

const SCEV *OffsetEndBytes = addSCEVNoOverflow(
OffsetAtLastIter, SE.getNoopOrZeroExtend(EltSize, WiderTy), SE, CtxI);
OffsetAtLastIter, SE.getNoopOrZeroExtend(EltSize, WiderTy), SE);
if (!OffsetEndBytes)
return false;

if (IsKnownNonNegative) {
// For positive steps, check if
// (AR->getStart() - StartPtr) + (MaxBTC * Step) + EltSize <= DerefBytes,
// while making sure none of the computations unsigned wrap themselves.
const SCEV *EndBytes =
addSCEVNoOverflow(StartOffset, OffsetEndBytes, SE, CtxI);
const SCEV *EndBytes = addSCEVNoOverflow(StartOffset, OffsetEndBytes, SE);
if (!EndBytes)
return false;

if (!LoopGuards)
LoopGuards.emplace(
ScalarEvolution::LoopGuards::collect(AR->getLoop(), SE));

EndBytes = SE.applyLoopGuards(EndBytes, *LoopGuards);
return SE.isKnownPredicate(CmpInst::ICMP_ULE, EndBytes, DerefBytesSCEV);
}

Expand All @@ -296,7 +306,8 @@ std::pair<const SCEV *, const SCEV *> llvm::getStartAndEndForAccess(
const SCEV *MaxBTC, ScalarEvolution *SE,
DenseMap<std::pair<const SCEV *, Type *>,
std::pair<const SCEV *, const SCEV *>> *PointerBounds,
DominatorTree *DT, AssumptionCache *AC) {
DominatorTree *DT, AssumptionCache *AC,
std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
std::pair<const SCEV *, const SCEV *> *PtrBoundsPair;
if (PointerBounds) {
auto [Iter, Ins] = PointerBounds->insert(
Expand Down Expand Up @@ -332,7 +343,7 @@ std::pair<const SCEV *, const SCEV *> llvm::getStartAndEndForAccess(
// separately checks that accesses cannot not wrap, so unsigned max
// represents an upper bound.
if (evaluatePtrAddRecAtMaxBTCWillNotWrap(AR, MaxBTC, EltSizeSCEV, *SE, DL,
DT, AC)) {
DT, AC, LoopGuards)) {
ScEnd = AR->evaluateAtIteration(MaxBTC, *SE);
} else {
ScEnd = SE->getAddExpr(
Expand Down Expand Up @@ -381,7 +392,7 @@ void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
const SCEV *BTC = PSE.getBackedgeTakenCount();
const auto &[ScStart, ScEnd] = getStartAndEndForAccess(
Lp, PtrExpr, AccessTy, BTC, SymbolicMaxBTC, PSE.getSE(),
&DC.getPointerBounds(), DC.getDT(), DC.getAC());
&DC.getPointerBounds(), DC.getDT(), DC.getAC(), LoopGuards);
assert(!isa<SCEVCouldNotCompute>(ScStart) &&
!isa<SCEVCouldNotCompute>(ScEnd) &&
"must be able to compute both start and end expressions");
Expand Down Expand Up @@ -1987,13 +1998,13 @@ bool MemoryDepChecker::areAccessesCompletelyBeforeOrAfter(const SCEV *Src,
ScalarEvolution &SE = *PSE.getSE();
const auto &[SrcStart_, SrcEnd_] =
getStartAndEndForAccess(InnermostLoop, Src, SrcTy, BTC, SymbolicMaxBTC,
&SE, &PointerBounds, DT, AC);
&SE, &PointerBounds, DT, AC, LoopGuards);
if (isa<SCEVCouldNotCompute>(SrcStart_) || isa<SCEVCouldNotCompute>(SrcEnd_))
return false;

const auto &[SinkStart_, SinkEnd_] =
getStartAndEndForAccess(InnermostLoop, Sink, SinkTy, BTC, SymbolicMaxBTC,
&SE, &PointerBounds, DT, AC);
&SE, &PointerBounds, DT, AC, LoopGuards);
if (isa<SCEVCouldNotCompute>(SinkStart_) ||
isa<SCEVCouldNotCompute>(SinkEnd_))
return false;
Expand Down Expand Up @@ -3040,8 +3051,9 @@ LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) * 2;

DepChecker = std::make_unique<MemoryDepChecker>(
*PSE, AC, DT, L, SymbolicStrides, MaxTargetVectorWidthInBits);
PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
*PSE, AC, DT, L, SymbolicStrides, MaxTargetVectorWidthInBits, LoopGuards);
PtrRtChecking =
std::make_unique<RuntimePointerChecking>(*DepChecker, SE, LoopGuards);
if (canAnalyzeLoop())
CanVecMem = analyzeLoop(AA, LI, TLI, DT);
}
Expand Down
14 changes: 3 additions & 11 deletions llvm/lib/Analysis/ScalarEvolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2338,23 +2338,15 @@ bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
// Can we use context to prove the fact we need?
if (!CtxI)
return false;
// TODO: Support mul.
if (BinOp == Instruction::Mul)
return false;
auto *RHSC = dyn_cast<SCEVConstant>(RHS);
// TODO: Lift this limitation.
if (!RHSC)
return false;
APInt C = RHSC->getAPInt();
unsigned NumBits = C.getBitWidth();
if (BinOp == Instruction::Mul) {
// Multiplying by 0 or 1 never overflows
if (C.isZero() || C.isOne())
return true;
if (Signed)
return false;
APInt Limit = APInt::getMaxValue(NumBits).udiv(C);
// To avoid overflow, we need to make sure that LHS <= MAX / C.
return isKnownPredicateAt(ICmpInst::ICMP_ULE, LHS, getConstant(Limit),
CtxI);
}
Comment on lines -2347 to -2357
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure if we should leave the code in, for the current test cases, we get the expected results due to #155300

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I had some cases downstream where I needed this code. I'll check if #155300 allows to get the test cases, but I think we can remove the code out unless it is exercised.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That would be great. If there are, could you add them?

bool IsSub = (BinOp == Instruction::Sub);
bool IsNegativeConst = (Signed && C.isNegative());
// Compute the direction and magnitude by which we need to check overflow.
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/Transforms/LoopVectorize/vect.stats.ll
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
; RUN: opt < %s -passes=loop-vectorize -force-vector-interleave=4 -force-vector-width=4 -debug-only=loop-vectorize -enable-early-exit-vectorization -use-symbolic-maxbtc-deref-loop --disable-output -stats -S 2>&1 | FileCheck %s
; RUN: opt < %s -passes=loop-vectorize -force-vector-interleave=4 -force-vector-width=4 -debug-only=loop-vectorize -enable-early-exit-vectorization --disable-output -stats -S 2>&1 | FileCheck %s
; REQUIRES: asserts

; We have 3 loops, two of them are vectorizable (with one being early-exit
Expand Down