@@ -15,10 +15,10 @@ define void @test_memset_zero_length(i8* %dest) {
15
15
define void @test_memset_to_store (i8* %dest ) {
16
16
; CHECK-LABEL: @test_memset_to_store(
17
17
; CHECK-NEXT: store atomic i8 1, i8* [[DEST:%.*]] unordered, align 1
18
- ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 2, i32 1)
19
- ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 4, i32 1)
20
- ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 8, i32 1)
21
- ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 16, i32 1)
18
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 2, i32 1)
19
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 4, i32 1)
20
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 8, i32 1)
21
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 16, i32 1)
22
22
; CHECK-NEXT: ret void
23
23
;
24
24
call void @llvm.memset.element.unordered.atomic.p0i8.i32 (i8* align 1 %dest , i8 1 , i32 1 , i32 1 )
@@ -34,9 +34,9 @@ define void @test_memset_to_store_2(i8* %dest) {
34
34
; CHECK-NEXT: store atomic i8 1, i8* [[DEST:%.*]] unordered, align 2
35
35
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[DEST]] to i16*
36
36
; CHECK-NEXT: store atomic i16 257, i16* [[TMP1]] unordered, align 2
37
- ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 4, i32 2)
38
- ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 8, i32 2)
39
- ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 16, i32 2)
37
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 2 [[DEST]], i8 1, i32 4, i32 2)
38
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 2 [[DEST]], i8 1, i32 8, i32 2)
39
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 2 [[DEST]], i8 1, i32 16, i32 2)
40
40
; CHECK-NEXT: ret void
41
41
;
42
42
call void @llvm.memset.element.unordered.atomic.p0i8.i32 (i8* align 2 %dest , i8 1 , i32 1 , i32 1 )
@@ -54,8 +54,8 @@ define void @test_memset_to_store_4(i8* %dest) {
54
54
; CHECK-NEXT: store atomic i16 257, i16* [[TMP1]] unordered, align 4
55
55
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[DEST]] to i32*
56
56
; CHECK-NEXT: store atomic i32 16843009, i32* [[TMP2]] unordered, align 4
57
- ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 [[DEST]], i8 1, i32 8, i32 4)
58
- ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 [[DEST]], i8 1, i32 16, i32 4)
57
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 4 [[DEST]], i8 1, i32 8, i32 4)
58
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 4 [[DEST]], i8 1, i32 16, i32 4)
59
59
; CHECK-NEXT: ret void
60
60
;
61
61
call void @llvm.memset.element.unordered.atomic.p0i8.i32 (i8* align 4 %dest , i8 1 , i32 1 , i32 1 )
@@ -75,7 +75,7 @@ define void @test_memset_to_store_8(i8* %dest) {
75
75
; CHECK-NEXT: store atomic i32 16843009, i32* [[TMP2]] unordered, align 8
76
76
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[DEST]] to i64*
77
77
; CHECK-NEXT: store atomic i64 72340172838076673, i64* [[TMP3]] unordered, align 8
78
- ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 [[DEST]], i8 1, i32 16, i32 8)
78
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 8 [[DEST]], i8 1, i32 16, i32 8)
79
79
; CHECK-NEXT: ret void
80
80
;
81
81
call void @llvm.memset.element.unordered.atomic.p0i8.i32 (i8* align 8 %dest , i8 1 , i32 1 , i32 1 )
@@ -95,7 +95,7 @@ define void @test_memset_to_store_16(i8* %dest) {
95
95
; CHECK-NEXT: store atomic i32 16843009, i32* [[TMP2]] unordered, align 16
96
96
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[DEST]] to i64*
97
97
; CHECK-NEXT: store atomic i64 72340172838076673, i64* [[TMP3]] unordered, align 16
98
- ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 [[DEST]], i8 1, i32 16, i32 16)
98
+ ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 16 [[DEST]], i8 1, i32 16, i32 16)
99
99
; CHECK-NEXT: ret void
100
100
;
101
101
call void @llvm.memset.element.unordered.atomic.p0i8.i32 (i8* align 16 %dest , i8 1 , i32 1 , i32 1 )
@@ -154,10 +154,10 @@ define void @test_memmove_loadstore(i8* %dest, i8* %src) {
154
154
; CHECK-LABEL: @test_memmove_loadstore(
155
155
; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 1
156
156
; CHECK-NEXT: store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 1
157
- ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 2, i32 1)
158
- ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 4, i32 1)
159
- ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 8, i32 1)
160
- ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 16, i32 1)
157
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 2, i32 1)
158
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 4, i32 1)
159
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 8, i32 1)
160
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 16, i32 1)
161
161
; CHECK-NEXT: ret void
162
162
;
163
163
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32 (i8* align 1 %dest , i8* align 1 %src , i32 1 , i32 1 )
@@ -176,9 +176,9 @@ define void @test_memmove_loadstore_2(i8* %dest, i8* %src) {
176
176
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
177
177
; CHECK-NEXT: [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 2
178
178
; CHECK-NEXT: store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 2
179
- ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 4, i32 2)
180
- ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 8, i32 2)
181
- ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 16, i32 2)
179
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 4, i32 2)
180
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 8, i32 2)
181
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 16, i32 2)
182
182
; CHECK-NEXT: ret void
183
183
;
184
184
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32 (i8* align 2 %dest , i8* align 2 %src , i32 1 , i32 1 )
@@ -201,8 +201,8 @@ define void @test_memmove_loadstore_4(i8* %dest, i8* %src) {
201
201
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32*
202
202
; CHECK-NEXT: [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 4
203
203
; CHECK-NEXT: store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 4
204
- ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 8, i32 4)
205
- ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 16, i32 4)
204
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 8, i32 4)
205
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 16, i32 4)
206
206
; CHECK-NEXT: ret void
207
207
;
208
208
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32 (i8* align 4 %dest , i8* align 4 %src , i32 1 , i32 1 )
@@ -229,7 +229,7 @@ define void @test_memmove_loadstore_8(i8* %dest, i8* %src) {
229
229
; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
230
230
; CHECK-NEXT: [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 8
231
231
; CHECK-NEXT: store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 8
232
- ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i32 16, i32 8)
232
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 8 [[DEST]], i8* nonnull align 8 [[SRC]], i32 16, i32 8)
233
233
; CHECK-NEXT: ret void
234
234
;
235
235
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32 (i8* align 8 %dest , i8* align 8 %src , i32 1 , i32 1 )
@@ -256,7 +256,7 @@ define void @test_memmove_loadstore_16(i8* %dest, i8* %src) {
256
256
; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
257
257
; CHECK-NEXT: [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 16
258
258
; CHECK-NEXT: store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 16
259
- ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 [[DEST:%.* ]], i8* align 16 [[SRC:%.* ]], i32 16, i32 16)
259
+ ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 16 [[DEST]], i8* nonnull align 16 [[SRC]], i32 16, i32 16)
260
260
; CHECK-NEXT: ret void
261
261
;
262
262
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32 (i8* align 16 %dest , i8* align 16 %src , i32 1 , i32 1 )
@@ -302,10 +302,10 @@ define void @test_memcpy_loadstore(i8* %dest, i8* %src) {
302
302
; CHECK-LABEL: @test_memcpy_loadstore(
303
303
; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 1
304
304
; CHECK-NEXT: store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 1
305
- ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 2, i32 1)
306
- ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 4, i32 1)
307
- ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 8, i32 1)
308
- ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 16, i32 1)
305
+ ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 2, i32 1)
306
+ ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 4, i32 1)
307
+ ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 8, i32 1)
308
+ ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 16, i32 1)
309
309
; CHECK-NEXT: ret void
310
310
;
311
311
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32 (i8* align 1 %dest , i8* align 1 %src , i32 1 , i32 1 )
@@ -324,9 +324,9 @@ define void @test_memcpy_loadstore_2(i8* %dest, i8* %src) {
324
324
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
325
325
; CHECK-NEXT: [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 2
326
326
; CHECK-NEXT: store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 2
327
- ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 4, i32 2)
328
- ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 8, i32 2)
329
- ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 16, i32 2)
327
+ ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 4, i32 2)
328
+ ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 8, i32 2)
329
+ ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 16, i32 2)
330
330
; CHECK-NEXT: ret void
331
331
;
332
332
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32 (i8* align 2 %dest , i8* align 2 %src , i32 1 , i32 1 )
@@ -349,8 +349,8 @@ define void @test_memcpy_loadstore_4(i8* %dest, i8* %src) {
349
349
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32*
350
350
; CHECK-NEXT: [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 4
351
351
; CHECK-NEXT: store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 4
352
- ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 8, i32 4)
353
- ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 16, i32 4)
352
+ ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 8, i32 4)
353
+ ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 16, i32 4)
354
354
; CHECK-NEXT: ret void
355
355
;
356
356
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32 (i8* align 4 %dest , i8* align 4 %src , i32 1 , i32 1 )
@@ -377,7 +377,7 @@ define void @test_memcpy_loadstore_8(i8* %dest, i8* %src) {
377
377
; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
378
378
; CHECK-NEXT: [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 8
379
379
; CHECK-NEXT: store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 8
380
- ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i32 16, i32 8)
380
+ ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 8 [[DEST]], i8* nonnull align 8 [[SRC]], i32 16, i32 8)
381
381
; CHECK-NEXT: ret void
382
382
;
383
383
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32 (i8* align 8 %dest , i8* align 8 %src , i32 1 , i32 1 )
@@ -404,7 +404,7 @@ define void @test_memcpy_loadstore_16(i8* %dest, i8* %src) {
404
404
; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
405
405
; CHECK-NEXT: [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 16
406
406
; CHECK-NEXT: store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 16
407
- ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 [[DEST:%.* ]], i8* align 16 [[SRC:%.* ]], i32 16, i32 16)
407
+ ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 16 [[DEST]], i8* nonnull align 16 [[SRC]], i32 16, i32 16)
408
408
; CHECK-NEXT: ret void
409
409
;
410
410
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32 (i8* align 16 %dest , i8* align 16 %src , i32 1 , i32 1 )
0 commit comments