Skip to content

Commit 21d498c

Browse files
committed
[SLP] Vectorize jumbled stores.
Summary: Patch adds support for vectorization of the jumbled stores. The value operands are vectorized and then shuffled in the right order before store. Reviewers: RKSimon, spatel, hfinkel, mkuper Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D43339
1 parent 72bc291 commit 21d498c

File tree

3 files changed

+98
-25
lines changed

3 files changed

+98
-25
lines changed

llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp

Lines changed: 91 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2666,24 +2666,74 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
26662666
}
26672667
case Instruction::Store: {
26682668
// Check if the stores are consecutive or if we need to swizzle them.
2669-
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
2670-
if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
2669+
llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType();
2670+
// Make sure all stores in the bundle are simple - we can't vectorize
2671+
// atomic or volatile stores.
2672+
SmallVector<Value *, 4> PointerOps(VL.size());
2673+
ValueList Operands(VL.size());
2674+
auto POIter = PointerOps.begin();
2675+
auto OIter = Operands.begin();
2676+
for (Value *V : VL) {
2677+
auto *SI = cast<StoreInst>(V);
2678+
if (!SI->isSimple()) {
26712679
BS.cancelScheduling(VL, VL0);
26722680
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
26732681
ReuseShuffleIndicies);
2674-
LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
2682+
LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n");
26752683
return;
26762684
}
2685+
*POIter = SI->getPointerOperand();
2686+
*OIter = SI->getValueOperand();
2687+
++POIter;
2688+
++OIter;
2689+
}
26772690

2678-
TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2679-
ReuseShuffleIndicies);
2680-
LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
2691+
OrdersType CurrentOrder;
2692+
// Check the order of pointer operands.
2693+
if (llvm::sortPtrAccesses(PointerOps, *DL, *SE, CurrentOrder)) {
2694+
Value *Ptr0;
2695+
Value *PtrN;
2696+
if (CurrentOrder.empty()) {
2697+
Ptr0 = PointerOps.front();
2698+
PtrN = PointerOps.back();
2699+
} else {
2700+
Ptr0 = PointerOps[CurrentOrder.front()];
2701+
PtrN = PointerOps[CurrentOrder.back()];
2702+
}
2703+
const SCEV *Scev0 = SE->getSCEV(Ptr0);
2704+
const SCEV *ScevN = SE->getSCEV(PtrN);
2705+
const auto *Diff =
2706+
dyn_cast<SCEVConstant>(SE->getMinusSCEV(ScevN, Scev0));
2707+
uint64_t Size = DL->getTypeAllocSize(ScalarTy);
2708+
// Check that the sorted pointer operands are consecutive.
2709+
if (Diff && Diff->getAPInt() == (VL.size() - 1) * Size) {
2710+
if (CurrentOrder.empty()) {
2711+
// Original stores are consecutive and does not require reordering.
2712+
++NumOpsWantToKeepOriginalOrder;
2713+
TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S,
2714+
UserTreeIdx, ReuseShuffleIndicies);
2715+
TE->setOperandsInOrder();
2716+
buildTree_rec(Operands, Depth + 1, {TE, 0});
2717+
LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
2718+
} else {
2719+
// Need to reorder.
2720+
auto I = NumOpsWantToKeepOrder.try_emplace(CurrentOrder).first;
2721+
++(I->getSecond());
2722+
TreeEntry *TE =
2723+
newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2724+
ReuseShuffleIndicies, I->getFirst());
2725+
TE->setOperandsInOrder();
2726+
buildTree_rec(Operands, Depth + 1, {TE, 0});
2727+
LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n");
2728+
}
2729+
return;
2730+
}
2731+
}
26812732

2682-
ValueList Operands;
2683-
for (Value *V : VL)
2684-
Operands.push_back(cast<Instruction>(V)->getOperand(0));
2685-
TE->setOperandsInOrder();
2686-
buildTree_rec(Operands, Depth + 1, {TE, 0});
2733+
BS.cancelScheduling(VL, VL0);
2734+
newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2735+
ReuseShuffleIndicies);
2736+
LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
26872737
return;
26882738
}
26892739
case Instruction::Call: {
@@ -3181,15 +3231,23 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
31813231
}
31823232
case Instruction::Store: {
31833233
// We know that we can merge the stores. Calculate the cost.
3184-
MaybeAlign alignment(cast<StoreInst>(VL0)->getAlignment());
3234+
bool IsReorder = !E->ReorderIndices.empty();
3235+
auto *SI =
3236+
cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0);
3237+
MaybeAlign Alignment(SI->getAlignment());
31853238
int ScalarEltCost =
3186-
TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0, VL0);
3239+
TTI->getMemoryOpCost(Instruction::Store, ScalarTy, Alignment, 0, VL0);
31873240
if (NeedToShuffleReuses) {
31883241
ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
31893242
}
31903243
int ScalarStCost = VecTy->getNumElements() * ScalarEltCost;
3191-
int VecStCost =
3192-
TTI->getMemoryOpCost(Instruction::Store, VecTy, alignment, 0, VL0);
3244+
int VecStCost = TTI->getMemoryOpCost(Instruction::Store,
3245+
VecTy, Alignment, 0, VL0);
3246+
if (IsReorder) {
3247+
// TODO: Merge this shuffle with the ReuseShuffleCost.
3248+
VecStCost += TTI->getShuffleCost(
3249+
TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
3250+
}
31933251
return ReuseShuffleCost + VecStCost - ScalarStCost;
31943252
}
31953253
case Instruction::Call: {
@@ -4051,13 +4109,22 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
40514109
return V;
40524110
}
40534111
case Instruction::Store: {
4054-
StoreInst *SI = cast<StoreInst>(VL0);
4112+
bool IsReorder = !E->ReorderIndices.empty();
4113+
auto *SI = cast<StoreInst>(
4114+
IsReorder ? E->Scalars[E->ReorderIndices.front()] : VL0);
40554115
unsigned Alignment = SI->getAlignment();
40564116
unsigned AS = SI->getPointerAddressSpace();
40574117

40584118
setInsertPointAfterBundle(E);
40594119

40604120
Value *VecValue = vectorizeTree(E->getOperand(0));
4121+
if (IsReorder) {
4122+
OrdersType Mask;
4123+
inversePermutation(E->ReorderIndices, Mask);
4124+
VecValue = Builder.CreateShuffleVector(
4125+
VecValue, UndefValue::get(VecValue->getType()), E->ReorderIndices,
4126+
"reorder_shuffle");
4127+
}
40614128
Value *ScalarPtr = SI->getPointerOperand();
40624129
Value *VecPtr = Builder.CreateBitCast(ScalarPtr, VecTy->getPointerTo(AS));
40634130
StoreInst *ST = Builder.CreateStore(VecValue, VecPtr);
@@ -5347,6 +5414,14 @@ bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
53475414
<< "\n");
53485415

53495416
R.buildTree(Chain);
5417+
Optional<ArrayRef<unsigned>> Order = R.bestOrder();
5418+
if (Order) {
5419+
// TODO: reorder tree nodes without tree rebuilding.
5420+
SmallVector<Value *, 4> ReorderedOps(Chain.rbegin(), Chain.rend());
5421+
llvm::transform(*Order, ReorderedOps.begin(),
5422+
[Chain](const unsigned Idx) { return Chain[Idx]; });
5423+
R.buildTree(ReorderedOps);
5424+
}
53505425
if (R.isTreeTinyAndNotFullyVectorizable())
53515426
return false;
53525427

llvm/test/Transforms/SLPVectorizer/X86/store-jumbled.ll

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,21 +11,20 @@ define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn
1111
; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3
1212
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[IN_ADDR]] to <4 x i32>*
1313
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
14-
; CHECK-NEXT: [[REORDER_SHUFFLE:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
1514
; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[INN:%.*]], i64 0
1615
; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 1
1716
; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 2
1817
; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 3
1918
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[INN_ADDR]] to <4 x i32>*
2019
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
21-
; CHECK-NEXT: [[REORDER_SHUFFLE1:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
22-
; CHECK-NEXT: [[TMP5:%.*]] = mul <4 x i32> [[REORDER_SHUFFLE]], [[REORDER_SHUFFLE1]]
20+
; CHECK-NEXT: [[TMP5:%.*]] = mul <4 x i32> [[TMP2]], [[TMP4]]
2321
; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0
2422
; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 1
2523
; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 2
2624
; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 3
25+
; CHECK-NEXT: [[REORDER_SHUFFLE:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
2726
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[GEP_7]] to <4 x i32>*
28-
; CHECK-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4
27+
; CHECK-NEXT: store <4 x i32> [[REORDER_SHUFFLE]], <4 x i32>* [[TMP6]], align 4
2928
; CHECK-NEXT: ret i32 undef
3029
;
3130
%in.addr = getelementptr inbounds i32, i32* %in, i64 0

llvm/test/Transforms/SLPVectorizer/X86/stores_vectorize.ll

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -92,15 +92,14 @@ define void @store_reverse(i64* %p3) {
9292
; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i64, i64* [[P3]], i64 3
9393
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i64* [[P3]] to <4 x i64>*
9494
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* [[TMP0]], align 8
95-
; CHECK-NEXT: [[REORDER_SHUFFLE:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
9695
; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i64, i64* [[P3]], i64 11
9796
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i64* [[ARRAYIDX1]] to <4 x i64>*
9897
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* [[TMP2]], align 8
99-
; CHECK-NEXT: [[REORDER_SHUFFLE1:%.*]] = shufflevector <4 x i64> [[TMP3]], <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
100-
; CHECK-NEXT: [[TMP4:%.*]] = shl <4 x i64> [[REORDER_SHUFFLE]], [[REORDER_SHUFFLE1]]
98+
; CHECK-NEXT: [[TMP4:%.*]] = shl <4 x i64> [[TMP1]], [[TMP3]]
10199
; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i64, i64* [[P3]], i64 4
102-
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i64* [[ARRAYIDX14]] to <4 x i64>*
103-
; CHECK-NEXT: store <4 x i64> [[TMP4]], <4 x i64>* [[TMP5]], align 8
100+
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i64> [[TMP4]], <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
101+
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i64* [[ARRAYIDX14]] to <4 x i64>*
102+
; CHECK-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* [[TMP6]], align 8
104103
; CHECK-NEXT: ret void
105104
;
106105
entry:

0 commit comments

Comments
 (0)