Skip to content

Commit e65ddca

Browse files
committed
Revert "[SLP] Vectorize jumbled stores."
This reverts commit 21d498c. This commit causes some crashes on some targets.
1 parent 9d4bbe8 commit e65ddca

File tree

3 files changed

+25
-98
lines changed

3 files changed

+25
-98
lines changed

llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp

Lines changed: 16 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -2666,74 +2666,24 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
26662666
}
26672667
case Instruction::Store: {
26682668
// Check if the stores are consecutive or if we need to swizzle them.
2669-
llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType();
2670-
// Make sure all stores in the bundle are simple - we can't vectorize
2671-
// atomic or volatile stores.
2672-
SmallVector<Value *, 4> PointerOps(VL.size());
2673-
ValueList Operands(VL.size());
2674-
auto POIter = PointerOps.begin();
2675-
auto OIter = Operands.begin();
2676-
for (Value *V : VL) {
2677-
auto *SI = cast<StoreInst>(V);
2678-
if (!SI->isSimple()) {
2669+
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
2670+
if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
26792671
BS.cancelScheduling(VL, VL0);
26802672
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
26812673
ReuseShuffleIndicies);
2682-
LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n");
2674+
LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
26832675
return;
26842676
}
2685-
*POIter = SI->getPointerOperand();
2686-
*OIter = SI->getValueOperand();
2687-
++POIter;
2688-
++OIter;
2689-
}
26902677

2691-
OrdersType CurrentOrder;
2692-
// Check the order of pointer operands.
2693-
if (llvm::sortPtrAccesses(PointerOps, *DL, *SE, CurrentOrder)) {
2694-
Value *Ptr0;
2695-
Value *PtrN;
2696-
if (CurrentOrder.empty()) {
2697-
Ptr0 = PointerOps.front();
2698-
PtrN = PointerOps.back();
2699-
} else {
2700-
Ptr0 = PointerOps[CurrentOrder.front()];
2701-
PtrN = PointerOps[CurrentOrder.back()];
2702-
}
2703-
const SCEV *Scev0 = SE->getSCEV(Ptr0);
2704-
const SCEV *ScevN = SE->getSCEV(PtrN);
2705-
const auto *Diff =
2706-
dyn_cast<SCEVConstant>(SE->getMinusSCEV(ScevN, Scev0));
2707-
uint64_t Size = DL->getTypeAllocSize(ScalarTy);
2708-
// Check that the sorted pointer operands are consecutive.
2709-
if (Diff && Diff->getAPInt() == (VL.size() - 1) * Size) {
2710-
if (CurrentOrder.empty()) {
2711-
// Original stores are consecutive and does not require reordering.
2712-
++NumOpsWantToKeepOriginalOrder;
2713-
TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S,
2714-
UserTreeIdx, ReuseShuffleIndicies);
2715-
TE->setOperandsInOrder();
2716-
buildTree_rec(Operands, Depth + 1, {TE, 0});
2717-
LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
2718-
} else {
2719-
// Need to reorder.
2720-
auto I = NumOpsWantToKeepOrder.try_emplace(CurrentOrder).first;
2721-
++(I->getSecond());
2722-
TreeEntry *TE =
2723-
newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2724-
ReuseShuffleIndicies, I->getFirst());
2725-
TE->setOperandsInOrder();
2726-
buildTree_rec(Operands, Depth + 1, {TE, 0});
2727-
LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n");
2728-
}
2729-
return;
2730-
}
2731-
}
2678+
TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2679+
ReuseShuffleIndicies);
2680+
LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
27322681

2733-
BS.cancelScheduling(VL, VL0);
2734-
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2735-
ReuseShuffleIndicies);
2736-
LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
2682+
ValueList Operands;
2683+
for (Value *V : VL)
2684+
Operands.push_back(cast<Instruction>(V)->getOperand(0));
2685+
TE->setOperandsInOrder();
2686+
buildTree_rec(Operands, Depth + 1, {TE, 0});
27372687
return;
27382688
}
27392689
case Instruction::Call: {
@@ -3231,23 +3181,15 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
32313181
}
32323182
case Instruction::Store: {
32333183
// We know that we can merge the stores. Calculate the cost.
3234-
bool IsReorder = !E->ReorderIndices.empty();
3235-
auto *SI =
3236-
cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0);
3237-
MaybeAlign Alignment(SI->getAlignment());
3184+
MaybeAlign alignment(cast<StoreInst>(VL0)->getAlignment());
32383185
int ScalarEltCost =
3239-
TTI->getMemoryOpCost(Instruction::Store, ScalarTy, Alignment, 0, VL0);
3186+
TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0, VL0);
32403187
if (NeedToShuffleReuses) {
32413188
ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
32423189
}
32433190
int ScalarStCost = VecTy->getNumElements() * ScalarEltCost;
3244-
int VecStCost = TTI->getMemoryOpCost(Instruction::Store,
3245-
VecTy, Alignment, 0, VL0);
3246-
if (IsReorder) {
3247-
// TODO: Merge this shuffle with the ReuseShuffleCost.
3248-
VecStCost += TTI->getShuffleCost(
3249-
TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
3250-
}
3191+
int VecStCost =
3192+
TTI->getMemoryOpCost(Instruction::Store, VecTy, alignment, 0, VL0);
32513193
return ReuseShuffleCost + VecStCost - ScalarStCost;
32523194
}
32533195
case Instruction::Call: {
@@ -4109,22 +4051,13 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
41094051
return V;
41104052
}
41114053
case Instruction::Store: {
4112-
bool IsReorder = !E->ReorderIndices.empty();
4113-
auto *SI = cast<StoreInst>(
4114-
IsReorder ? E->Scalars[E->ReorderIndices.front()] : VL0);
4054+
StoreInst *SI = cast<StoreInst>(VL0);
41154055
unsigned Alignment = SI->getAlignment();
41164056
unsigned AS = SI->getPointerAddressSpace();
41174057

41184058
setInsertPointAfterBundle(E);
41194059

41204060
Value *VecValue = vectorizeTree(E->getOperand(0));
4121-
if (IsReorder) {
4122-
OrdersType Mask;
4123-
inversePermutation(E->ReorderIndices, Mask);
4124-
VecValue = Builder.CreateShuffleVector(
4125-
VecValue, UndefValue::get(VecValue->getType()), E->ReorderIndices,
4126-
"reorder_shuffle");
4127-
}
41284061
Value *ScalarPtr = SI->getPointerOperand();
41294062
Value *VecPtr = Builder.CreateBitCast(ScalarPtr, VecTy->getPointerTo(AS));
41304063
StoreInst *ST = Builder.CreateStore(VecValue, VecPtr);
@@ -5414,14 +5347,6 @@ bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
54145347
<< "\n");
54155348

54165349
R.buildTree(Chain);
5417-
Optional<ArrayRef<unsigned>> Order = R.bestOrder();
5418-
if (Order) {
5419-
// TODO: reorder tree nodes without tree rebuilding.
5420-
SmallVector<Value *, 4> ReorderedOps(Chain.rbegin(), Chain.rend());
5421-
llvm::transform(*Order, ReorderedOps.begin(),
5422-
[Chain](const unsigned Idx) { return Chain[Idx]; });
5423-
R.buildTree(ReorderedOps);
5424-
}
54255350
if (R.isTreeTinyAndNotFullyVectorizable())
54265351
return false;
54275352

llvm/test/Transforms/SLPVectorizer/X86/store-jumbled.ll

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,20 +11,21 @@ define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn
1111
; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3
1212
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[IN_ADDR]] to <4 x i32>*
1313
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
14+
; CHECK-NEXT: [[REORDER_SHUFFLE:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
1415
; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[INN:%.*]], i64 0
1516
; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 1
1617
; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 2
1718
; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 3
1819
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[INN_ADDR]] to <4 x i32>*
1920
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
20-
; CHECK-NEXT: [[TMP5:%.*]] = mul <4 x i32> [[TMP2]], [[TMP4]]
21+
; CHECK-NEXT: [[REORDER_SHUFFLE1:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
22+
; CHECK-NEXT: [[TMP5:%.*]] = mul <4 x i32> [[REORDER_SHUFFLE]], [[REORDER_SHUFFLE1]]
2123
; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0
2224
; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 1
2325
; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 2
2426
; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 3
25-
; CHECK-NEXT: [[REORDER_SHUFFLE:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
2627
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[GEP_7]] to <4 x i32>*
27-
; CHECK-NEXT: store <4 x i32> [[REORDER_SHUFFLE]], <4 x i32>* [[TMP6]], align 4
28+
; CHECK-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4
2829
; CHECK-NEXT: ret i32 undef
2930
;
3031
%in.addr = getelementptr inbounds i32, i32* %in, i64 0

llvm/test/Transforms/SLPVectorizer/X86/stores_vectorize.ll

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -92,14 +92,15 @@ define void @store_reverse(i64* %p3) {
9292
; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i64, i64* [[P3]], i64 3
9393
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i64* [[P3]] to <4 x i64>*
9494
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* [[TMP0]], align 8
95+
; CHECK-NEXT: [[REORDER_SHUFFLE:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
9596
; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i64, i64* [[P3]], i64 11
9697
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i64* [[ARRAYIDX1]] to <4 x i64>*
9798
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* [[TMP2]], align 8
98-
; CHECK-NEXT: [[TMP4:%.*]] = shl <4 x i64> [[TMP1]], [[TMP3]]
99+
; CHECK-NEXT: [[REORDER_SHUFFLE1:%.*]] = shufflevector <4 x i64> [[TMP3]], <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
100+
; CHECK-NEXT: [[TMP4:%.*]] = shl <4 x i64> [[REORDER_SHUFFLE]], [[REORDER_SHUFFLE1]]
99101
; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i64, i64* [[P3]], i64 4
100-
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i64> [[TMP4]], <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
101-
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i64* [[ARRAYIDX14]] to <4 x i64>*
102-
; CHECK-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* [[TMP6]], align 8
102+
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i64* [[ARRAYIDX14]] to <4 x i64>*
103+
; CHECK-NEXT: store <4 x i64> [[TMP4]], <4 x i64>* [[TMP5]], align 8
103104
; CHECK-NEXT: ret void
104105
;
105106
entry:

0 commit comments

Comments
 (0)