1
+ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph\:" --version 5
1
2
; RUN: opt -passes=loop-vectorize -scalable-vectorization=off -force-vector-width=4 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s
2
3
3
4
; NOTE: These tests aren't really target-specific, but it's convenient to target AArch64
@@ -8,26 +9,33 @@ target triple = "aarch64-linux-gnu"
8
9
; The original loop had an unconditional uniform load. Let's make sure
9
10
; we don't artificially create new predicated blocks for the load.
10
11
define void @uniform_load (ptr noalias %dst , ptr noalias readonly %src , i64 %n ) #0 {
11
- ; CHECK-LABEL: @uniform_load(
12
- ; CHECK: vector.ph:
13
- ; CHECK: [[N_MINUS_VF:%.*]] = sub i64 %n, [[VSCALE_X_VF:.*]]
14
- ; CHECK: [[CMP:%.*]] = icmp ugt i64 %n, [[VSCALE_X_VF]]
15
- ; CHECK: [[N2:%.*]] = select i1 [[CMP]], i64 [[N_MINUS_VF]], i64 0
16
- ; CHECK: [[INIT_ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 %n)
17
- ; CHECK: vector.body:
18
- ; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[IDX_NEXT:%.*]], %vector.body ]
19
- ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[INIT_ACTIVE_LANE_MASK]], %vector.ph ], [ [[NEXT_ACTIVE_LANE_MASK:%.*]], %vector.body ]
20
- ; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, ptr %src, align 4
21
- ; CHECK-NOT: load i32, ptr %src, align 4
12
+ ; CHECK-LABEL: define void @uniform_load(
13
+ ; CHECK-SAME: ptr noalias [[DST:%.*]], ptr noalias readonly [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
14
+ ; CHECK-NEXT: [[ENTRY:.*:]]
15
+ ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
16
+ ; CHECK: [[VECTOR_PH]]:
17
+ ; CHECK-NEXT: [[N_MINUS_VF:%.*]] = sub i64 [[N]], 4
18
+ ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[N]], 4
19
+ ; CHECK-NEXT: [[N2:%.*]] = select i1 [[CMP]], i64 [[N_MINUS_VF]], i64 0
20
+ ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 [[N]])
21
+ ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
22
+ ; CHECK: [[VECTOR_BODY]]:
23
+ ; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
24
+ ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[NEXT_ACTIVE_LANE_MASK:%.*]], %[[VECTOR_BODY]] ]
25
+ ; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, ptr [[SRC]], align 4
22
26
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[LOAD_VAL]], i64 0
23
27
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> poison, <4 x i32> zeroinitializer
24
- ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr %dst , i64 [[IDX]]
28
+ ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[DST]] , i64 [[IDX]]
25
29
; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[TMP5]], ptr [[TMP6]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]])
26
- ; CHECK-NEXT: [[IDX_NEXT ]] = add i64 [[IDX]], 4
30
+ ; CHECK-NEXT: [[INDEX_NEXT ]] = add i64 [[IDX]], 4
27
31
; CHECK-NEXT: [[NEXT_ACTIVE_LANE_MASK]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[IDX]], i64 [[N2]])
28
32
; CHECK-NEXT: [[EXTRACT_FIRST_LANE_MASK:%.*]] = extractelement <4 x i1> [[NEXT_ACTIVE_LANE_MASK]], i32 0
29
33
; CHECK-NEXT: [[FIRST_LANE_SET:%.*]] = xor i1 [[EXTRACT_FIRST_LANE_MASK]], true
30
- ; CHECK-NEXT: br i1 [[FIRST_LANE_SET]], label %middle.block, label %vector.body
34
+ ; CHECK-NEXT: br i1 [[FIRST_LANE_SET]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
35
+ ; CHECK: [[MIDDLE_BLOCK]]:
36
+ ; CHECK-NEXT: br [[FOR_END:label %.*]]
37
+ ; CHECK: [[SCALAR_PH]]:
38
+ ;
31
39
32
40
entry:
33
41
br label %for.body
@@ -49,19 +57,39 @@ for.end: ; preds = %for.body, %entry
49
57
; do need to perform conditional loads and so we end up using a gather instead.
50
58
; However, we at least ensure the mask is the overlap of the loop predicate
51
59
; and the original condition.
52
- define void @cond_uniform_load (ptr nocapture %dst , ptr nocapture readonly %src , ptr nocapture readonly %cond , i64 %n ) #0 {
53
- ; CHECK-LABEL: @cond_uniform_load(
54
- ; CHECK: vector.ph:
55
- ; CHECK: [[INIT_ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 %n)
56
- ; CHECK: [[TMP1:%.*]] = insertelement <4 x ptr> poison, ptr %src, i64 0
60
+ define void @cond_uniform_load (ptr noalias nocapture %dst , ptr nocapture readonly %src , ptr nocapture readonly %cond , i64 %n ) #0 {
61
+ ; CHECK-LABEL: define void @cond_uniform_load(
62
+ ; CHECK-SAME: ptr noalias captures(none) [[DST:%.*]], ptr readonly captures(none) [[SRC:%.*]], ptr readonly captures(none) [[COND:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
63
+ ; CHECK-NEXT: [[ENTRY:.*:]]
64
+ ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
65
+ ; CHECK: [[VECTOR_PH]]:
66
+ ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[N]], 4
67
+ ; CHECK-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[N]], 4
68
+ ; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i64 [[TMP5]], i64 0
69
+ ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 [[N]])
70
+ ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x ptr> poison, ptr [[SRC]], i64 0
57
71
; CHECK-NEXT: [[SRC_SPLAT:%.*]] = shufflevector <4 x ptr> [[TMP1]], <4 x ptr> poison, <4 x i32> zeroinitializer
58
- ; CHECK: vector.body:
59
- ; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[IDX_NEXT:%.*]], %vector.body ]
60
- ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[INIT_ACTIVE_LANE_MASK]], %vector.ph ], [ [[NEXT_ACTIVE_LANE_MASK:%.*]], %vector.body ]
61
- ; CHECK: [[COND_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{%.*}}, i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
72
+ ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
73
+ ; CHECK: [[VECTOR_BODY]]:
74
+ ; CHECK-NEXT: [[INDEX6:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT2:%.*]], %[[VECTOR_BODY]] ]
75
+ ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
76
+ ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[INDEX6]]
77
+ ; CHECK-NEXT: [[COND_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP6]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
62
78
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[COND_LOAD]], zeroinitializer
63
79
; CHECK-NEXT: [[MASK:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i1> [[TMP4]], <4 x i1> zeroinitializer
64
- ; CHECK-NEXT: call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[SRC_SPLAT]], i32 4, <4 x i1> [[MASK]], <4 x i32> poison)
80
+ ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[SRC_SPLAT]], i32 4, <4 x i1> [[MASK]], <4 x i32> poison)
81
+ ; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[MASK]], <4 x i32> [[WIDE_MASKED_GATHER]], <4 x i32> zeroinitializer
82
+ ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[INDEX6]]
83
+ ; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[PREDPHI]], ptr [[TMP7]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]])
84
+ ; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX6]], 4
85
+ ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX6]], i64 [[TMP3]])
86
+ ; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
87
+ ; CHECK-NEXT: [[TMP9:%.*]] = xor i1 [[TMP8]], true
88
+ ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
89
+ ; CHECK: [[MIDDLE_BLOCK]]:
90
+ ; CHECK-NEXT: br [[FOR_END:label %.*]]
91
+ ; CHECK: [[SCALAR_PH]]:
92
+ ;
65
93
entry:
66
94
br label %for.body
67
95
0 commit comments