File: sve-fixed-length-masked-loads.ll

package info (click to toggle)
llvm-toolchain-13 1%3A13.0.1-11
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 1,418,840 kB
  • sloc: cpp: 5,290,826; ansic: 996,570; asm: 544,593; python: 188,212; objc: 72,027; lisp: 30,291; f90: 25,395; sh: 24,898; javascript: 9,780; pascal: 9,398; perl: 7,484; ml: 5,432; awk: 3,523; makefile: 2,913; xml: 953; cs: 573; fortran: 539
file content (512 lines) | stat: -rw-r--r-- 25,443 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
; RUN: llc -aarch64-sve-vector-bits-min=128  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=16  -check-prefix=NO_SVE
; RUN: llc -aarch64-sve-vector-bits-min=256  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=32  -check-prefixes=CHECK
; RUN: llc -aarch64-sve-vector-bits-min=384  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=32  -check-prefixes=CHECK
; RUN: llc -aarch64-sve-vector-bits-min=512  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
; RUN: llc -aarch64-sve-vector-bits-min=640  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
; RUN: llc -aarch64-sve-vector-bits-min=768  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
; RUN: llc -aarch64-sve-vector-bits-min=896  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_2048,VBITS_GE_1024,VBITS_GE_512

target triple = "aarch64-unknown-linux-gnu"

; Don't use SVE when its registers are no bigger than NEON.
; NO_SVE-NOT: ptrue

;
; Masked Loads
;
define <2 x half> @masked_load_v2f16(<2 x half>* %ap, <2 x half>* %bp) #0 {
; CHECK-LABEL: masked_load_v2f16:
; CHECK: ldr s[[N0:[0-9]+]], [x0]
; CHECK-NEXT: ldr s[[N1:[0-9]+]], [x1]
; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].h, vl4
; CHECK-NEXT: fcmeq v[[N2:[0-9]+]].4h, v[[N0]].4h, v[[N1]].4h
; CHECK-NEXT: umov [[W0:w[0-9]+]], v[[N2]].h[0]
; CHECK-NEXT: umov [[W1:w[0-9]+]], v[[N2]].h[1]
; CHECK-NEXT: fmov s[[V0:[0-9]+]], [[W0]]
; CHECK-NEXT: mov v[[V0]].s[1], [[W1]]
; CHECK-NEXT: shl v[[V0]].2s, v[[V0]].2s, #16
; CHECK-NEXT: sshr v[[V0]].2s, v[[V0]].2s, #16
; CHECK-NEXT: movi [[D0:d[0-9]+]], #0000000000000000
; CHECK-NEXT: fmov [[W1]], s[[V0]]
; CHECK-NEXT: mov [[W0]], v[[V0]].s[1]
; CHECK-NEXT: mov [[V1:v[0-9]+]].h[0], [[W1]]
; CHECK-NEXT: mov [[V1]].h[1], [[W0]]
; CHECK-NEXT: shl v[[V0]].4h, [[V1]].4h, #15
; CHECK-NEXT: sshr v[[V0]].4h, v[[V0]].4h, #15
; CHECK-NEXT: cmpne [[PG1:p[0-9]+]].h, [[PG0]]/z, z[[N2]].h, #0
; CHECK-NEXT: ld1h { z0.h }, [[PG1]]/z, [x0]
; CHECK-NEXT: ret
  %a = load <2 x half>, <2 x half>* %ap
  %b = load <2 x half>, <2 x half>* %bp
  %mask = fcmp oeq <2 x half> %a, %b
  %load = call <2 x half> @llvm.masked.load.v2f16(<2 x half>* %ap, i32 8, <2 x i1> %mask, <2 x half> zeroinitializer)
  ret <2 x half> %load
}

define <2 x float> @masked_load_v2f32(<2 x float>* %ap, <2 x float>* %bp) #0 {
; CHECK-LABEL: masked_load_v2f32:
; CHECK: ldr d[[N0:[0-9]+]], [x0]
; CHECK-NEXT: ldr d[[N1:[0-9]+]], [x1]
; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].s, vl2
; CHECK-NEXT: fcmeq v[[N2:[0-9]+]].2s, v[[N0]].2s, v[[N1]].2s
; CHECK-NEXT: cmpne [[PG1:p[0-9]+]].s, [[PG0]]/z, z[[N2]].s, #0
; CHECK-NEXT: ld1w { z0.s }, [[PG1]]/z, [x0]
; CHECK-NEXT: ret
  %a = load <2 x float>, <2 x float>* %ap
  %b = load <2 x float>, <2 x float>* %bp
  %mask = fcmp oeq <2 x float> %a, %b
  %load = call <2 x float> @llvm.masked.load.v2f32(<2 x float>* %ap, i32 8, <2 x i1> %mask, <2 x float> zeroinitializer)
  ret <2 x float> %load
}

define <4 x float> @masked_load_v4f32(<4 x float>* %ap, <4 x float>* %bp) #0 {
; CHECK-LABEL: masked_load_v4f32:
; CHECK: ldr q[[N0:[0-9]+]], [x0]
; CHECK-NEXT: ldr q[[N1:[0-9]+]], [x1]
; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].s, vl4
; CHECK-NEXT: fcmeq v[[N2:[0-9]+]].4s, v[[N0]].4s, v[[N1]].4s
; CHECK-NEXT: cmpne [[PG1:p[0-9]+]].s, [[PG0]]/z, z[[N2]].s, #0
; CHECK-NEXT: ld1w { z0.s }, [[PG1]]/z, [x0]
; CHECK-NEXT: ret
  %a = load <4 x float>, <4 x float>* %ap
  %b = load <4 x float>, <4 x float>* %bp
  %mask = fcmp oeq <4 x float> %a, %b
  %load = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %ap, i32 8, <4 x i1> %mask, <4 x float> zeroinitializer)
  ret <4 x float> %load
}

define <8 x float> @masked_load_v8f32(<8 x float>* %ap, <8 x float>* %bp) #0 {
; CHECK-LABEL: masked_load_v8f32:
; CHECK: ptrue [[PG0:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
; CHECK-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0]
; CHECK-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1]
; CHECK-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
; CHECK-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x0]
; CHECK-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
; CHECK-NEXT: ret
  %a = load <8 x float>, <8 x float>* %ap
  %b = load <8 x float>, <8 x float>* %bp
  %mask = fcmp oeq <8 x float> %a, %b
  %load = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %ap, i32 8, <8 x i1> %mask, <8 x float> zeroinitializer)
  ret <8 x float> %load
}

define <16 x float> @masked_load_v16f32(<16 x float>* %ap, <16 x float>* %bp) #0 {
; CHECK-LABEL: masked_load_v16f32:
; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl[[#min(div(VBYTES,4),16)]]
; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1]
; VBITS_GE_512-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
; VBITS_GE_512-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <16 x float>, <16 x float>* %ap
  %b = load <16 x float>, <16 x float>* %bp
  %mask = fcmp oeq <16 x float> %a, %b
  %load = call <16 x float> @llvm.masked.load.v16f32(<16 x float>* %ap, i32 8, <16 x i1> %mask, <16 x float> zeroinitializer)
  ret <16 x float> %load
}

define <32 x float> @masked_load_v32f32(<32 x float>* %ap, <32 x float>* %bp) #0 {
; CHECK-LABEL: masked_load_v32f32:
; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].s, vl[[#min(div(VBYTES,4),32)]]
; VBITS_GE_1024-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0]
; VBITS_GE_1024-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1]
; VBITS_GE_1024-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
; VBITS_GE_1024-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_1024-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
; VBITS_GE_1024-NEXT: ret
  %a = load <32 x float>, <32 x float>* %ap
  %b = load <32 x float>, <32 x float>* %bp
  %mask = fcmp oeq <32 x float> %a, %b
  %load = call <32 x float> @llvm.masked.load.v32f32(<32 x float>* %ap, i32 8, <32 x i1> %mask, <32 x float> zeroinitializer)
  ret <32 x float> %load
}

define <64 x float> @masked_load_v64f32(<64 x float>* %ap, <64 x float>* %bp) #0 {
; CHECK-LABEL: masked_load_v64f32:
; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl[[#min(div(VBYTES,4),64)]]
; VBITS_GE_2048-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0]
; VBITS_GE_2048-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1]
; VBITS_GE_2048-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
; VBITS_GE_2048-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_2048-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
; VBITS_GE_2048-NEXT: ret

  %a = load <64 x float>, <64 x float>* %ap
  %b = load <64 x float>, <64 x float>* %bp
  %mask = fcmp oeq <64 x float> %a, %b
  %load = call <64 x float> @llvm.masked.load.v64f32(<64 x float>* %ap, i32 8, <64 x i1> %mask, <64 x float> zeroinitializer)
  ret <64 x float> %load
}

define <64 x i8> @masked_load_v64i8(<64 x i8>* %ap, <64 x i8>* %bp) #0 {
; CHECK-LABEL: masked_load_v64i8:
; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].b, vl64
; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].b }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1b { [[Z1:z[0-9]+]].b }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, [[Z1]].b
; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: st1b { [[Z0]].b }, [[PG0]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <64 x i8>, <64 x i8>* %ap
  %b = load <64 x i8>, <64 x i8>* %bp
  %mask = icmp eq <64 x i8> %a, %b
  %load = call <64 x i8> @llvm.masked.load.v64i8(<64 x i8>* %ap, i32 8, <64 x i1> %mask, <64 x i8> undef)
  ret <64 x i8> %load
}

define <32 x i16> @masked_load_v32i16(<32 x i16>* %ap, <32 x i16>* %bp) #0 {
; CHECK-LABEL: masked_load_v32i16:
; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl32
; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].h }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1h { [[Z1:z[0-9]+]].h }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, [[Z1]].h
; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG0]], [x8]
; VBITS_GE_512: ret
  %a = load <32 x i16>, <32 x i16>* %ap
  %b = load <32 x i16>, <32 x i16>* %bp
  %mask = icmp eq <32 x i16> %a, %b
  %load = call <32 x i16> @llvm.masked.load.v32i16(<32 x i16>* %ap, i32 8, <32 x i1> %mask, <32 x i16> undef)
  ret <32 x i16> %load
}

define <16 x i32> @masked_load_v16i32(<16 x i32>* %ap, <16 x i32>* %bp) #0 {
; CHECK-LABEL: masked_load_v16i32:
; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl16
; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
; VBITS_GE_512-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <16 x i32>, <16 x i32>* %ap
  %b = load <16 x i32>, <16 x i32>* %bp
  %mask = icmp eq <16 x i32> %a, %b
  %load = call <16 x i32> @llvm.masked.load.v16i32(<16 x i32>* %ap, i32 8, <16 x i1> %mask, <16 x i32> undef)
  ret <16 x i32> %load
}

define <8 x i64> @masked_load_v8i64(<8 x i64>* %ap, <8 x i64>* %bp) #0 {
; CHECK-LABEL: masked_load_v8i64:
; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d
; VBITS_GE_512-NEXT: ld1d { [[Z0]].d }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <8 x i64>, <8 x i64>* %ap
  %b = load <8 x i64>, <8 x i64>* %bp
  %mask = icmp eq <8 x i64> %a, %b
  %load = call <8 x i64> @llvm.masked.load.v8i64(<8 x i64>* %ap, i32 8, <8 x i1> %mask, <8 x i64> undef)
  ret <8 x i64> %load
}

define <8 x i64> @masked_load_passthru_v8i64(<8 x i64>* %ap, <8 x i64>* %bp) #0 {
; CHECK-LABEL: masked_load_passthru_v8i64:
; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d
; VBITS_GE_512-NEXT: ld1d { [[Z0]].d }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: sel [[Z2:z[0-9]+]].d, [[PG1]], [[Z0]].d, [[Z1]].d
; VBITS_GE_512-NEXT: st1d { [[Z2]].d }, [[PG0]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <8 x i64>, <8 x i64>* %ap
  %b = load <8 x i64>, <8 x i64>* %bp
  %mask = icmp eq <8 x i64> %a, %b
  %load = call <8 x i64> @llvm.masked.load.v8i64(<8 x i64>* %ap, i32 8, <8 x i1> %mask, <8 x i64> %b)
  ret <8 x i64> %load
}

define <8 x double> @masked_load_passthru_v8f64(<8 x double>* %ap, <8 x double>* %bp) #0 {
; CHECK-LABEL: masked_load_passthru_v8f64:
; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1]
; VBITS_GE_512-NEXT: fcmeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d
; VBITS_GE_512-NEXT: ld1d { [[Z0]].d }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: sel [[Z2:z[0-9]+]].d, [[PG1]], [[Z0]].d, [[Z1]].d
; VBITS_GE_512-NEXT: st1d { [[Z2]].d }, [[PG0]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <8 x double>, <8 x double>* %ap
  %b = load <8 x double>, <8 x double>* %bp
  %mask = fcmp oeq <8 x double> %a, %b
  %load = call <8 x double> @llvm.masked.load.v8f64(<8 x double>* %ap, i32 8, <8 x i1> %mask, <8 x double> %b)
  ret <8 x double> %load
}

define <32 x i16> @masked_load_sext_v32i8i16(<32 x i8>* %ap, <32 x i8>* %bp) #0 {
; CHECK-LABEL: masked_load_sext_v32i8i16:
; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].b, vl32
; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].b }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1b { [[Z1:z[0-9]+]].b }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, [[Z1]].b
; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].h, vl32
; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b
; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG1]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <32 x i8>, <32 x i8>* %ap
  %b = load <32 x i8>, <32 x i8>* %bp
  %mask = icmp eq <32 x i8> %a, %b
  %load = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %ap, i32 8, <32 x i1> %mask, <32 x i8> undef)
  %ext = sext <32 x i8> %load to <32 x i16>
  ret <32 x i16> %ext
}

define <16 x i32> @masked_load_sext_v16i8i32(<16 x i8>* %ap, <16 x i8>* %bp) #0 {
; CHECK-LABEL: masked_load_sext_v16i8i32:
; VBITS_GE_512: ldr q0, [x0]
; VBITS_GE_512-NEXT: ldr q1, [x1]
; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].b, vl16
; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; VBITS_GE_512-NEXT: cmpne [[PG2:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, #0
; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG2]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16
; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b
; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h
; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG2]], [x8]
; VBITS_GE_512: ret
  %a = load <16 x i8>, <16 x i8>* %ap
  %b = load <16 x i8>, <16 x i8>* %bp
  %mask = icmp eq <16 x i8> %a, %b
  %load = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %ap, i32 8, <16 x i1> %mask, <16 x i8> undef)
  %ext = sext <16 x i8> %load to <16 x i32>
  ret <16 x i32> %ext
}

define <8 x i64> @masked_load_sext_v8i8i64(<8 x i8>* %ap, <8 x i8>* %bp) #0 {
; CHECK-LABEL: masked_load_sext_v8i8i64:
; VBITS_GE_512: ldr d0, [x0]
; VBITS_GE_512-NEXT: ldr d1, [x1]
; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].b, vl8
; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].b, p0/z, z[[V]].b, #0
; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, p[[PG]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8
; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b
; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h
; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s
; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <8 x i8>, <8 x i8>* %ap
  %b = load <8 x i8>, <8 x i8>* %bp
  %mask = icmp eq <8 x i8> %a, %b
  %load = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %ap, i32 8, <8 x i1> %mask, <8 x i8> undef)
  %ext = sext <8 x i8> %load to <8 x i64>
  ret <8 x i64> %ext
}

define <16 x i32> @masked_load_sext_v16i16i32(<16 x i16>* %ap, <16 x i16>* %bp) #0 {
; CHECK-LABEL: masked_load_sext_v16i16i32:
; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl16
; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].h }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1h { [[Z1:z[0-9]+]].h }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, [[Z1]].h
; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16
; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h
; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG1]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <16 x i16>, <16 x i16>* %ap
  %b = load <16 x i16>, <16 x i16>* %bp
  %mask = icmp eq <16 x i16> %a, %b
  %load = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %ap, i32 8, <16 x i1> %mask, <16 x i16> undef)
  %ext = sext <16 x i16> %load to <16 x i32>
  ret <16 x i32> %ext
}

define <8 x i64> @masked_load_sext_v8i16i64(<8 x i16>* %ap, <8 x i16>* %bp) #0 {
; CHECK-LABEL: masked_load_sext_v8i16i64:
; VBITS_GE_512: ldr q0, [x0]
; VBITS_GE_512-NEXT: ldr q1, [x1]
; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].h, vl8
; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].h, p0/z, z[[V]].h, #0
; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, p[[PG]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8
; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h
; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s
; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <8 x i16>, <8 x i16>* %ap
  %b = load <8 x i16>, <8 x i16>* %bp
  %mask = icmp eq <8 x i16> %a, %b
  %load = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %ap, i32 8, <8 x i1> %mask, <8 x i16> undef)
  %ext = sext <8 x i16> %load to <8 x i64>
  ret <8 x i64> %ext
}

define <8 x i64> @masked_load_sext_v8i32i64(<8 x i32>* %ap, <8 x i32>* %bp) #0 {
; CHECK-LABEL: masked_load_sext_v8i32i64:
; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8
; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
; VBITS_GE_512-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8
; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s
; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG1]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <8 x i32>, <8 x i32>* %ap
  %b = load <8 x i32>, <8 x i32>* %bp
  %mask = icmp eq <8 x i32> %a, %b
  %load = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %ap, i32 8, <8 x i1> %mask, <8 x i32> undef)
  %ext = sext <8 x i32> %load to <8 x i64>
  ret <8 x i64> %ext
}

define <32 x i16> @masked_load_zext_v32i8i16(<32 x i8>* %ap, <32 x i8>* %bp) #0 {
; CHECK-LABEL: masked_load_zext_v32i8i16:
; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].b, vl32
; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].b }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1b { [[Z1:z[0-9]+]].b }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, [[Z1]].b
; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].h, vl32
; VBITS_GE_512-NEXT: uunpklo [[Z0]].h, [[Z0]].b
; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG1]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <32 x i8>, <32 x i8>* %ap
  %b = load <32 x i8>, <32 x i8>* %bp
  %mask = icmp eq <32 x i8> %a, %b
  %load = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %ap, i32 8, <32 x i1> %mask, <32 x i8> undef)
  %ext = zext <32 x i8> %load to <32 x i16>
  ret <32 x i16> %ext
}

define <16 x i32> @masked_load_zext_v16i8i32(<16 x i8>* %ap, <16 x i8>* %bp) #0 {
; CHECK-LABEL: masked_load_zext_v16i8i32:
; VBITS_GE_512: ldr q0, [x0]
; VBITS_GE_512-NEXT: ldr q1, [x1]
; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].b, vl16
; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; VBITS_GE_512-NEXT: cmpne [[PG2:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, #0
; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG2]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16
; VBITS_GE_512-NEXT: uunpklo [[Z0]].h, [[Z0]].b
; VBITS_GE_512-NEXT: uunpklo [[Z0]].s, [[Z0]].h
; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG2]], [x8]
; VBITS_GE_512: ret
  %a = load <16 x i8>, <16 x i8>* %ap
  %b = load <16 x i8>, <16 x i8>* %bp
  %mask = icmp eq <16 x i8> %a, %b
  %load = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %ap, i32 8, <16 x i1> %mask, <16 x i8> undef)
  %ext = zext <16 x i8> %load to <16 x i32>
  ret <16 x i32> %ext
}

define <8 x i64> @masked_load_zext_v8i8i64(<8 x i8>* %ap, <8 x i8>* %bp) #0 {
; CHECK-LABEL: masked_load_zext_v8i8i64:
; VBITS_GE_512: ldr d0, [x0]
; VBITS_GE_512-NEXT: ldr d1, [x1]
; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].b, vl8
; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].b, p0/z, z[[V]].b, #0
; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, p[[PG]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8
; VBITS_GE_512-NEXT: uunpklo [[Z0]].h, [[Z0]].b
; VBITS_GE_512-NEXT: uunpklo [[Z0]].s, [[Z0]].h
; VBITS_GE_512-NEXT: uunpklo [[Z0]].d, [[Z0]].s
; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <8 x i8>, <8 x i8>* %ap
  %b = load <8 x i8>, <8 x i8>* %bp
  %mask = icmp eq <8 x i8> %a, %b
  %load = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %ap, i32 8, <8 x i1> %mask, <8 x i8> undef)
  %ext = zext <8 x i8> %load to <8 x i64>
  ret <8 x i64> %ext
}

define <16 x i32> @masked_load_zext_v16i16i32(<16 x i16>* %ap, <16 x i16>* %bp) #0 {
; CHECK-LABEL: masked_load_zext_v16i16i32:
; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl16
; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].h }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1h { [[Z1:z[0-9]+]].h }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, [[Z1]].h
; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16
; VBITS_GE_512-NEXT: uunpklo [[Z0]].s, [[Z0]].h
; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG1]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <16 x i16>, <16 x i16>* %ap
  %b = load <16 x i16>, <16 x i16>* %bp
  %mask = icmp eq <16 x i16> %a, %b
  %load = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %ap, i32 8, <16 x i1> %mask, <16 x i16> undef)
  %ext = zext <16 x i16> %load to <16 x i32>
  ret <16 x i32> %ext
}

define <8 x i64> @masked_load_zext_v8i16i64(<8 x i16>* %ap, <8 x i16>* %bp) #0 {
; CHECK-LABEL: masked_load_zext_v8i16i64:
; VBITS_GE_512: ldr q0, [x0]
; VBITS_GE_512-NEXT: ldr q1, [x1]
; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].h, vl8
; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].h, p0/z, z[[V]].h, #0
; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, p[[PG]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8
; VBITS_GE_512-NEXT: uunpklo [[Z0]].s, [[Z0]].h
; VBITS_GE_512-NEXT: uunpklo [[Z0]].d, [[Z0]].s
; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <8 x i16>, <8 x i16>* %ap
  %b = load <8 x i16>, <8 x i16>* %bp
  %mask = icmp eq <8 x i16> %a, %b
  %load = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %ap, i32 8, <8 x i1> %mask, <8 x i16> undef)
  %ext = zext <8 x i16> %load to <8 x i64>
  ret <8 x i64> %ext
}

define <8 x i64> @masked_load_zext_v8i32i64(<8 x i32>* %ap, <8 x i32>* %bp) #0 {
; CHECK-LABEL: masked_load_zext_v8i32i64:
; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8
; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0]
; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1]
; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
; VBITS_GE_512-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}]
; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8
; VBITS_GE_512-NEXT: uunpklo [[Z0]].d, [[Z0]].s
; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG1]], [x8]
; VBITS_GE_512-NEXT: ret
  %a = load <8 x i32>, <8 x i32>* %ap
  %b = load <8 x i32>, <8 x i32>* %bp
  %mask = icmp eq <8 x i32> %a, %b
  %load = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %ap, i32 8, <8 x i1> %mask, <8 x i32> undef)
  %ext = zext <8 x i32> %load to <8 x i64>
  ret <8 x i64> %ext
}

declare <2 x half> @llvm.masked.load.v2f16(<2 x half>*, i32, <2 x i1>, <2 x half>)
declare <2 x float> @llvm.masked.load.v2f32(<2 x float>*, i32, <2 x i1>, <2 x float>)
declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
declare <8 x float> @llvm.masked.load.v8f32(<8 x float>*, i32, <8 x i1>, <8 x float>)
declare <16 x float> @llvm.masked.load.v16f32(<16 x float>*, i32, <16 x i1>, <16 x float>)
declare <32 x float> @llvm.masked.load.v32f32(<32 x float>*, i32, <32 x i1>, <32 x float>)
declare <64 x float> @llvm.masked.load.v64f32(<64 x float>*, i32, <64 x i1>, <64 x float>)

declare <64 x i8> @llvm.masked.load.v64i8(<64 x i8>*, i32, <64 x i1>, <64 x i8>)
declare <32 x i8> @llvm.masked.load.v32i8(<32 x i8>*, i32, <32 x i1>, <32 x i8>)
declare <16 x i8> @llvm.masked.load.v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
declare <16 x i16> @llvm.masked.load.v16i16(<16 x i16>*, i32, <16 x i1>, <16 x i16>)
declare <8 x i8> @llvm.masked.load.v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>)
declare <8 x i16> @llvm.masked.load.v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
declare <8 x i32> @llvm.masked.load.v8i32(<8 x i32>*, i32, <8 x i1>, <8 x i32>)
declare <32 x i16> @llvm.masked.load.v32i16(<32 x i16>*, i32, <32 x i1>, <32 x i16>)
declare <16 x i32> @llvm.masked.load.v16i32(<16 x i32>*, i32, <16 x i1>, <16 x i32>)
declare <8 x i64> @llvm.masked.load.v8i64(<8 x i64>*, i32, <8 x i1>, <8 x i64>)
declare <8 x double> @llvm.masked.load.v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>)

attributes #0 = { "target-features"="+sve" }