File: avx512-masked_memop-16-8.ll

package info (click to toggle)
llvm-toolchain-7 1%3A7.0.1-8
  • links: PTS, VCS
  • area: main
  • in suites: buster
  • size: 733,456 kB
  • sloc: cpp: 3,776,651; ansic: 633,271; asm: 350,301; python: 142,716; objc: 107,612; sh: 22,626; lisp: 11,056; perl: 7,999; pascal: 6,742; ml: 5,537; awk: 3,536; makefile: 2,557; cs: 2,027; xml: 841; ruby: 156
file content (153 lines) | stat: -rw-r--r-- 6,609 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f,avx512bw,avx512vl < %s | FileCheck %s

; Skylake-avx512 target supports masked load/store for i8 and i16 vectors

define <16 x i8> @test_mask_load_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
; CHECK-LABEL: test_mask_load_16xi8:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT:    vpmovb2m %xmm0, %k1
; CHECK-NEXT:    vmovdqu8 (%rdi), %xmm0 {%k1} {z}
; CHECK-NEXT:    retq
  %res = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %addr, i32 4, <16 x i1>%mask, <16 x i8> undef)
  ret <16 x i8> %res
}
declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)

define <32 x i8> @test_mask_load_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
; CHECK-LABEL: test_mask_load_32xi8:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT:    vpmovb2m %ymm0, %k1
; CHECK-NEXT:    vpblendmb (%rdi), %ymm1, %ymm0 {%k1}
; CHECK-NEXT:    retq
  %res = call <32 x i8> @llvm.masked.load.v32i8.p0v32i8(<32 x i8>* %addr, i32 4, <32 x i1>%mask, <32 x i8> %val)
  ret <32 x i8> %res
}
declare <32 x i8> @llvm.masked.load.v32i8.p0v32i8(<32 x i8>*, i32, <32 x i1>, <32 x i8>)

define <64 x i8> @test_mask_load_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8> %val) {
; CHECK-LABEL: test_mask_load_64xi8:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    vpsllw $7, %zmm0, %zmm0
; CHECK-NEXT:    vpmovb2m %zmm0, %k1
; CHECK-NEXT:    vpblendmb (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT:    retq
  %res = call <64 x i8> @llvm.masked.load.v64i8.p0v64i8(<64 x i8>* %addr, i32 4, <64 x i1>%mask, <64 x i8> %val)
  ret <64 x i8> %res
}
declare <64 x i8> @llvm.masked.load.v64i8.p0v64i8(<64 x i8>*, i32, <64 x i1>, <64 x i8>)

define <8 x i16> @test_mask_load_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
; CHECK-LABEL: test_mask_load_8xi16:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    vpsllw $15, %xmm0, %xmm0
; CHECK-NEXT:    vpmovw2m %xmm0, %k1
; CHECK-NEXT:    vmovdqu16 (%rdi), %xmm0 {%k1} {z}
; CHECK-NEXT:    retq
  %res = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %addr, i32 4, <8 x i1>%mask, <8 x i16> undef)
  ret <8 x i16> %res
}
declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)

define <16 x i16> @test_mask_load_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
; CHECK-LABEL: test_mask_load_16xi16:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT:    vpmovb2m %xmm0, %k1
; CHECK-NEXT:    vmovdqu16 (%rdi), %ymm0 {%k1} {z}
; CHECK-NEXT:    retq
  %res = call <16 x i16> @llvm.masked.load.v16i16.p0v16i16(<16 x i16>* %addr, i32 4, <16 x i1>%mask, <16 x i16> zeroinitializer)
  ret <16 x i16> %res
}
declare <16 x i16> @llvm.masked.load.v16i16.p0v16i16(<16 x i16>*, i32, <16 x i1>, <16 x i16>)

define <32 x i16> @test_mask_load_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i16> %val) {
; CHECK-LABEL: test_mask_load_32xi16:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT:    vpmovb2m %ymm0, %k1
; CHECK-NEXT:    vpblendmw (%rdi), %zmm1, %zmm0 {%k1}
; CHECK-NEXT:    retq
  %res = call <32 x i16> @llvm.masked.load.v32i16.p0v32i16(<32 x i16>* %addr, i32 4, <32 x i1>%mask, <32 x i16> %val)
  ret <32 x i16> %res
}
declare <32 x i16> @llvm.masked.load.v32i16.p0v32i16(<32 x i16>*, i32, <32 x i1>, <32 x i16>)

define void @test_mask_store_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
; CHECK-LABEL: test_mask_store_16xi8:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT:    vpmovb2m %xmm0, %k1
; CHECK-NEXT:    vmovdqu8 %xmm1, (%rdi) {%k1}
; CHECK-NEXT:    retq
  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %val, <16 x i8>* %addr, i32 4, <16 x i1>%mask)
  ret void
}
declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)

define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
; CHECK-LABEL: test_mask_store_32xi8:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT:    vpmovb2m %ymm0, %k1
; CHECK-NEXT:    vmovdqu8 %ymm1, (%rdi) {%k1}
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    retq
  call void @llvm.masked.store.v32i8.p0v32i8(<32 x i8> %val, <32 x i8>* %addr, i32 4, <32 x i1>%mask)
  ret void
}
declare void @llvm.masked.store.v32i8.p0v32i8(<32 x i8>, <32 x i8>*, i32, <32 x i1>)

define void @test_mask_store_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8> %val) {
; CHECK-LABEL: test_mask_store_64xi8:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    vpsllw $7, %zmm0, %zmm0
; CHECK-NEXT:    vpmovb2m %zmm0, %k1
; CHECK-NEXT:    vmovdqu8 %zmm1, (%rdi) {%k1}
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    retq
  call void @llvm.masked.store.v64i8.p0v64i8(<64 x i8> %val, <64 x i8>* %addr, i32 4, <64 x i1>%mask)
  ret void
}
declare void @llvm.masked.store.v64i8.p0v64i8(<64 x i8>, <64 x i8>*, i32, <64 x i1>)

define void @test_mask_store_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
; CHECK-LABEL: test_mask_store_8xi16:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    vpsllw $15, %xmm0, %xmm0
; CHECK-NEXT:    vpmovw2m %xmm0, %k1
; CHECK-NEXT:    vmovdqu16 %xmm1, (%rdi) {%k1}
; CHECK-NEXT:    retq
  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %val, <8 x i16>* %addr, i32 4, <8 x i1>%mask)
  ret void
}
declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)

define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
; CHECK-LABEL: test_mask_store_16xi16:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT:    vpmovb2m %xmm0, %k1
; CHECK-NEXT:    vmovdqu16 %ymm1, (%rdi) {%k1}
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    retq
  call void @llvm.masked.store.v16i16.p0v16i16(<16 x i16> %val, <16 x i16>* %addr, i32 4, <16 x i1>%mask)
  ret void
}
declare void @llvm.masked.store.v16i16.p0v16i16(<16 x i16>, <16 x i16>*, i32, <16 x i1>)

define void @test_mask_store_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i16> %val) {
; CHECK-LABEL: test_mask_store_32xi16:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT:    vpmovb2m %ymm0, %k1
; CHECK-NEXT:    vmovdqu16 %zmm1, (%rdi) {%k1}
; CHECK-NEXT:    vzeroupper
; CHECK-NEXT:    retq
  call void @llvm.masked.store.v32i16.p0v32i16(<32 x i16> %val, <32 x i16>* %addr, i32 4, <32 x i1>%mask)
  ret void
}

declare void @llvm.masked.store.v32i16.p0v32i16(<32 x i16>, <32 x i16>*, i32, <32 x i1>)