Skip to content

Commit 19e83a9

Browse files
danlark1nikic
authored andcommitted
[ValueTracking] Pointer is known nonnull after load/store
If the pointer was loaded/stored before the null check, the check is redundant and can be removed. For now the optimizers do not remove the nullptr check, see https://gcc.godbolt.org/z/H2r5GG. The patch allows to use more nonnull constraints. Also, it found one more optimization in some PowerPC test. This is my first llvm review, I am free to any comments. Differential Revision: https://reviews.llvm.org/D71177
1 parent fc76569 commit 19e83a9

File tree

7 files changed

+53
-50
lines changed

7 files changed

+53
-50
lines changed

llvm/lib/Analysis/ValueTracking.cpp

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1943,6 +1943,15 @@ static bool isKnownNonNullFromDominatingCondition(const Value *V,
19431943
Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
19441944
return true;
19451945

1946+
// If the value is used as a load/store, then the pointer must be non null.
1947+
if (V == getLoadStorePointerOperand(U)) {
1948+
const Instruction *I = cast<Instruction>(U);
1949+
if (!NullPointerIsDefined(I->getFunction(),
1950+
V->getType()->getPointerAddressSpace()) &&
1951+
DT->dominates(I, CtxI))
1952+
return true;
1953+
}
1954+
19461955
// Consider only compare instructions uniquely controlling a branch
19471956
CmpInst::Predicate Pred;
19481957
if (!match(const_cast<User *>(U),

llvm/test/Analysis/ValueTracking/known-nonnull-at.ll

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -125,14 +125,12 @@ define i1 @unknownReturnTest(i8* %x) {
125125
ret i1 %null_check
126126
}
127127

128-
; TODO: Make sure that if load/store happened, the pointer is nonnull.
128+
; Make sure that if load/store happened, the pointer is nonnull.
129129

130130
define i32 @test_null_after_store(i32* %0) {
131131
; CHECK-LABEL: @test_null_after_store(
132132
; CHECK-NEXT: store i32 123, i32* [[TMP0:%.*]], align 4
133-
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32* [[TMP0]], null
134-
; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 1, i32 2
135-
; CHECK-NEXT: ret i32 [[TMP3]]
133+
; CHECK-NEXT: ret i32 2
136134
;
137135
store i32 123, i32* %0, align 4
138136
%2 = icmp eq i32* %0, null
@@ -142,10 +140,7 @@ define i32 @test_null_after_store(i32* %0) {
142140

143141
define i32 @test_null_after_load(i32* %0) {
144142
; CHECK-LABEL: @test_null_after_load(
145-
; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
146-
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32* [[TMP0]], null
147-
; CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 1
148-
; CHECK-NEXT: ret i32 [[TMP4]]
143+
; CHECK-NEXT: ret i32 1
149144
;
150145
%2 = load i32, i32* %0, align 4
151146
%3 = icmp eq i32* %0, null

llvm/test/CodeGen/PowerPC/pr39815.ll

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,10 @@ entry:
2020
; CHECK: # %bb.0:
2121
; CHECK-DAG: addis [[REG1:[0-9]+]], [[REG2:[0-9]+]], [[VAR1:[a-z0-9A-Z_.]+]]@toc@ha
2222
; CHECK-DAG: ld [[REG3:[0-9]+]], [[VAR1]]@toc@l([[REG1]])
23-
; CHECK-DAG: lwz [[REG4:[0-9]+]], 0([[REG3]])
24-
; CHECK-DAG: addic [[REG5:[0-9]+]], [[REG3]], -1
25-
; CHECK-DAG: addze [[REG7:[0-9]+]], [[REG4]]
26-
; CHECK-DAG: addis [[REG8:[0-9]+]], [[REG2]], [[VAR2:[a-z0-9A-Z_.]+]]@toc@ha
23+
; CHECK-DAG: lbz [[REG4:[0-9]+]], 0([[REG3]])
24+
; CHECK-DAG: addi [[REG7:[0-9]+]], [[REG4]]
2725
; CHECK-DAG: andi. [[REG9:[0-9]+]], [[REG7]], 5
26+
; CHECK-DAG: addis [[REG8:[0-9]+]], [[REG2]], [[VAR2:[a-z0-9A-Z_.]+]]@toc@ha
2827
; CHECK-DAG: stb [[REG9]], [[VAR2]]@toc@l([[REG8]])
2928
; CHECK: blr
3029
}

llvm/test/Transforms/Coroutines/coro-swifterror.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ cleanup:
3333
; CHECK-NEXT: call void @print(i32 %n)
3434
; TODO: figure out a way to eliminate this
3535
; CHECK-NEXT: store i8* null, i8** %errorslot
36-
; CHECK-NEXT: call void @maybeThrow(i8** swifterror %errorslot)
36+
; CHECK-NEXT: call void @maybeThrow(i8** nonnull swifterror %errorslot)
3737
; CHECK-NEXT: [[T1:%.*]] = load i8*, i8** %errorslot
3838
; CHECK-NEXT: call void @logError(i8* [[T1]])
3939
; CHECK-NEXT: store i8* [[T1]], i8** %errorslot
@@ -51,7 +51,7 @@ cleanup:
5151
; CHECK-NEXT: store i32 %inc, i32* [[T0]], align 4
5252
; CHECK-NEXT: call void @print(i32 %inc)
5353
; CHECK-NEXT: store i8* [[ERROR]], i8** %2
54-
; CHECK-NEXT: call void @maybeThrow(i8** swifterror %2)
54+
; CHECK-NEXT: call void @maybeThrow(i8** nonnull swifterror %2)
5555
; CHECK-NEXT: [[T2:%.*]] = load i8*, i8** %2
5656
; CHECK-NEXT: call void @logError(i8* [[T2]])
5757
; CHECK-NEXT: store i8* [[T2]], i8** %2

llvm/test/Transforms/InstCombine/element-atomic-memintrins.ll

Lines changed: 33 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@ define void @test_memset_zero_length(i8* %dest) {
1515
define void @test_memset_to_store(i8* %dest) {
1616
; CHECK-LABEL: @test_memset_to_store(
1717
; CHECK-NEXT: store atomic i8 1, i8* [[DEST:%.*]] unordered, align 1
18-
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 2, i32 1)
19-
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 4, i32 1)
20-
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 8, i32 1)
21-
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 16, i32 1)
18+
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 2, i32 1)
19+
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 4, i32 1)
20+
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 8, i32 1)
21+
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 16, i32 1)
2222
; CHECK-NEXT: ret void
2323
;
2424
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 1, i32 1)
@@ -34,9 +34,9 @@ define void @test_memset_to_store_2(i8* %dest) {
3434
; CHECK-NEXT: store atomic i8 1, i8* [[DEST:%.*]] unordered, align 2
3535
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[DEST]] to i16*
3636
; CHECK-NEXT: store atomic i16 257, i16* [[TMP1]] unordered, align 2
37-
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 4, i32 2)
38-
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 8, i32 2)
39-
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 16, i32 2)
37+
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 2 [[DEST]], i8 1, i32 4, i32 2)
38+
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 2 [[DEST]], i8 1, i32 8, i32 2)
39+
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 2 [[DEST]], i8 1, i32 16, i32 2)
4040
; CHECK-NEXT: ret void
4141
;
4242
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 %dest, i8 1, i32 1, i32 1)
@@ -54,8 +54,8 @@ define void @test_memset_to_store_4(i8* %dest) {
5454
; CHECK-NEXT: store atomic i16 257, i16* [[TMP1]] unordered, align 4
5555
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[DEST]] to i32*
5656
; CHECK-NEXT: store atomic i32 16843009, i32* [[TMP2]] unordered, align 4
57-
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 [[DEST]], i8 1, i32 8, i32 4)
58-
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 [[DEST]], i8 1, i32 16, i32 4)
57+
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 4 [[DEST]], i8 1, i32 8, i32 4)
58+
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 4 [[DEST]], i8 1, i32 16, i32 4)
5959
; CHECK-NEXT: ret void
6060
;
6161
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %dest, i8 1, i32 1, i32 1)
@@ -75,7 +75,7 @@ define void @test_memset_to_store_8(i8* %dest) {
7575
; CHECK-NEXT: store atomic i32 16843009, i32* [[TMP2]] unordered, align 8
7676
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[DEST]] to i64*
7777
; CHECK-NEXT: store atomic i64 72340172838076673, i64* [[TMP3]] unordered, align 8
78-
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 [[DEST]], i8 1, i32 16, i32 8)
78+
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 8 [[DEST]], i8 1, i32 16, i32 8)
7979
; CHECK-NEXT: ret void
8080
;
8181
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 %dest, i8 1, i32 1, i32 1)
@@ -95,7 +95,7 @@ define void @test_memset_to_store_16(i8* %dest) {
9595
; CHECK-NEXT: store atomic i32 16843009, i32* [[TMP2]] unordered, align 16
9696
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[DEST]] to i64*
9797
; CHECK-NEXT: store atomic i64 72340172838076673, i64* [[TMP3]] unordered, align 16
98-
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 [[DEST]], i8 1, i32 16, i32 16)
98+
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 16 [[DEST]], i8 1, i32 16, i32 16)
9999
; CHECK-NEXT: ret void
100100
;
101101
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 %dest, i8 1, i32 1, i32 1)
@@ -154,10 +154,10 @@ define void @test_memmove_loadstore(i8* %dest, i8* %src) {
154154
; CHECK-LABEL: @test_memmove_loadstore(
155155
; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 1
156156
; CHECK-NEXT: store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 1
157-
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 2, i32 1)
158-
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 4, i32 1)
159-
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 8, i32 1)
160-
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 16, i32 1)
157+
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 2, i32 1)
158+
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 4, i32 1)
159+
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 8, i32 1)
160+
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 16, i32 1)
161161
; CHECK-NEXT: ret void
162162
;
163163
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 1, i32 1)
@@ -176,9 +176,9 @@ define void @test_memmove_loadstore_2(i8* %dest, i8* %src) {
176176
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
177177
; CHECK-NEXT: [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 2
178178
; CHECK-NEXT: store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 2
179-
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 4, i32 2)
180-
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 8, i32 2)
181-
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 16, i32 2)
179+
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 4, i32 2)
180+
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 8, i32 2)
181+
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 16, i32 2)
182182
; CHECK-NEXT: ret void
183183
;
184184
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 1, i32 1)
@@ -201,8 +201,8 @@ define void @test_memmove_loadstore_4(i8* %dest, i8* %src) {
201201
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32*
202202
; CHECK-NEXT: [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 4
203203
; CHECK-NEXT: store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 4
204-
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 8, i32 4)
205-
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 16, i32 4)
204+
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 8, i32 4)
205+
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 16, i32 4)
206206
; CHECK-NEXT: ret void
207207
;
208208
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 1, i32 1)
@@ -229,7 +229,7 @@ define void @test_memmove_loadstore_8(i8* %dest, i8* %src) {
229229
; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
230230
; CHECK-NEXT: [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 8
231231
; CHECK-NEXT: store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 8
232-
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i32 16, i32 8)
232+
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 8 [[DEST]], i8* nonnull align 8 [[SRC]], i32 16, i32 8)
233233
; CHECK-NEXT: ret void
234234
;
235235
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 1, i32 1)
@@ -256,7 +256,7 @@ define void @test_memmove_loadstore_16(i8* %dest, i8* %src) {
256256
; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
257257
; CHECK-NEXT: [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 16
258258
; CHECK-NEXT: store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 16
259-
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 [[DEST:%.*]], i8* align 16 [[SRC:%.*]], i32 16, i32 16)
259+
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 16 [[DEST]], i8* nonnull align 16 [[SRC]], i32 16, i32 16)
260260
; CHECK-NEXT: ret void
261261
;
262262
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 1, i32 1)
@@ -302,10 +302,10 @@ define void @test_memcpy_loadstore(i8* %dest, i8* %src) {
302302
; CHECK-LABEL: @test_memcpy_loadstore(
303303
; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 1
304304
; CHECK-NEXT: store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 1
305-
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 2, i32 1)
306-
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 4, i32 1)
307-
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 8, i32 1)
308-
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 16, i32 1)
305+
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 2, i32 1)
306+
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 4, i32 1)
307+
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 8, i32 1)
308+
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 16, i32 1)
309309
; CHECK-NEXT: ret void
310310
;
311311
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 1, i32 1)
@@ -324,9 +324,9 @@ define void @test_memcpy_loadstore_2(i8* %dest, i8* %src) {
324324
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
325325
; CHECK-NEXT: [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 2
326326
; CHECK-NEXT: store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 2
327-
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 4, i32 2)
328-
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 8, i32 2)
329-
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 16, i32 2)
327+
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 4, i32 2)
328+
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 8, i32 2)
329+
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 16, i32 2)
330330
; CHECK-NEXT: ret void
331331
;
332332
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 1, i32 1)
@@ -349,8 +349,8 @@ define void @test_memcpy_loadstore_4(i8* %dest, i8* %src) {
349349
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32*
350350
; CHECK-NEXT: [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 4
351351
; CHECK-NEXT: store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 4
352-
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 8, i32 4)
353-
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 16, i32 4)
352+
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 8, i32 4)
353+
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 16, i32 4)
354354
; CHECK-NEXT: ret void
355355
;
356356
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 1, i32 1)
@@ -377,7 +377,7 @@ define void @test_memcpy_loadstore_8(i8* %dest, i8* %src) {
377377
; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
378378
; CHECK-NEXT: [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 8
379379
; CHECK-NEXT: store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 8
380-
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i32 16, i32 8)
380+
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 8 [[DEST]], i8* nonnull align 8 [[SRC]], i32 16, i32 8)
381381
; CHECK-NEXT: ret void
382382
;
383383
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 1, i32 1)
@@ -404,7 +404,7 @@ define void @test_memcpy_loadstore_16(i8* %dest, i8* %src) {
404404
; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
405405
; CHECK-NEXT: [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 16
406406
; CHECK-NEXT: store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 16
407-
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 [[DEST:%.*]], i8* align 16 [[SRC:%.*]], i32 16, i32 16)
407+
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 16 [[DEST]], i8* nonnull align 16 [[SRC]], i32 16, i32 16)
408408
; CHECK-NEXT: ret void
409409
;
410410
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 1, i32 1)

llvm/test/Transforms/InstCombine/phi-equal-incoming-pointers.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -474,13 +474,13 @@ define i32 @test_extra_uses_non_inbounds(i1 %cond, i1 %cond2) {
474474
; ALL-NEXT: [[PTR1:%.*]] = getelementptr i8, i8* [[OBJ]], i64 16
475475
; ALL-NEXT: [[PTR1_TYPED:%.*]] = bitcast i8* [[PTR1]] to i32*
476476
; ALL-NEXT: [[RES1:%.*]] = load i32, i32* [[PTR1_TYPED]], align 4
477-
; ALL-NEXT: call void @foo.i32(i32* [[PTR1_TYPED]])
477+
; ALL-NEXT: call void @foo.i32(i32* nonnull [[PTR1_TYPED]])
478478
; ALL-NEXT: br label [[EXIT:%.*]]
479479
; ALL: bb2:
480480
; ALL-NEXT: [[PTR2:%.*]] = getelementptr i8, i8* [[OBJ]], i64 16
481481
; ALL-NEXT: [[PTR2_TYPED:%.*]] = bitcast i8* [[PTR2]] to i32*
482482
; ALL-NEXT: [[RES2:%.*]] = load i32, i32* [[PTR2_TYPED]], align 4
483-
; ALL-NEXT: call void @foo.i32(i32* [[PTR2_TYPED]])
483+
; ALL-NEXT: call void @foo.i32(i32* nonnull [[PTR2_TYPED]])
484484
; ALL-NEXT: br label [[EXIT]]
485485
; ALL: exit:
486486
; ALL-NEXT: [[PTR_TYPED:%.*]] = phi i32* [ [[PTR1_TYPED]], [[BB1]] ], [ [[PTR2_TYPED]], [[BB2]] ]

llvm/test/Transforms/InstCombine/sink-alloca.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,6 @@ ret: ; preds = %sinktarget, %nonent
4747
; CHECK: %p = call i32* @use_and_return(i32* nonnull %argmem)
4848
; CHECK: store i32 13, i32* %p
4949
; CHECK: call void @llvm.stackrestore(i8* %sp)
50-
; CHECK: %0 = call i32* @use_and_return(i32* %p)
50+
; CHECK: %0 = call i32* @use_and_return(i32* nonnull %p)
5151

5252
attributes #0 = { nounwind }

0 commit comments

Comments
 (0)