File: sve-intrinsics-int-arith-imm-zero.ll

package info (click to toggle)
llvm-toolchain-20 1%3A20.1.6-1~exp1
  • links: PTS, VCS
  • area: main
  • in suites: experimental
  • size: 2,111,304 kB
  • sloc: cpp: 7,438,677; ansic: 1,393,822; asm: 1,012,926; python: 241,650; f90: 86,635; objc: 75,479; lisp: 42,144; pascal: 17,286; sh: 10,027; ml: 5,082; perl: 4,730; awk: 3,523; makefile: 3,349; javascript: 2,251; xml: 892; fortran: 672
file content (152 lines) | stat: -rw-r--r-- 7,848 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -mattr=+use-experimental-zeroing-pseudos < %s | FileCheck %s

;; ASR
define <vscale x 16 x i8> @asr_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg) {
; CHECK-LABEL: asr_i8:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movprfx z0.b, p0/z, z0.b
; CHECK-NEXT:    asr z0.b, p0/m, z0.b, #8
; CHECK-NEXT:    ret
  %vsel = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %vsel, <vscale x 16 x i8> splat(i8 8))
  ret <vscale x 16 x i8> %res
}

define <vscale x 8 x i16> @asr_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg) {
; CHECK-LABEL: asr_i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
; CHECK-NEXT:    asr z0.h, p0/m, z0.h, #16
; CHECK-NEXT:    ret
  %vsel = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %vsel, <vscale x 8 x i16> splat(i16 16))
  ret <vscale x 8 x i16> %res
}

define <vscale x 4 x i32> @asr_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg) local_unnamed_addr #0 {
; CHECK-LABEL: asr_i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
; CHECK-NEXT:    asr z0.s, p0/m, z0.s, #32
; CHECK-NEXT:    ret
  %vsel = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %vsel, <vscale x 4 x i32> splat(i32 32))
  ret <vscale x 4 x i32> %res
}

define <vscale x 2 x i64> @asr_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg) {
; CHECK-LABEL: asr_i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
; CHECK-NEXT:    asr z0.d, p0/m, z0.d, #64
; CHECK-NEXT:    ret
  %vsel = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %vsel, <vscale x 2 x i64> splat(i64 64))
  ret <vscale x 2 x i64> %res
}

;; LSL
define <vscale x 16 x i8> @lsl_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg) {
; CHECK-LABEL: lsl_i8:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movprfx z0.b, p0/z, z0.b
; CHECK-NEXT:    lsl z0.b, p0/m, z0.b, #7
; CHECK-NEXT:    ret
  %vsel = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %vsel, <vscale x 16 x i8> splat(i8 7))
  ret <vscale x 16 x i8> %res
}

define <vscale x 8 x i16> @lsl_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg) {
; CHECK-LABEL: lsl_i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, #15
; CHECK-NEXT:    ret
  %vsel = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %vsel, <vscale x 8 x i16> splat(i16 15))
  ret <vscale x 8 x i16> %res
}

define <vscale x 4 x i32> @lsl_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg) local_unnamed_addr #0 {
; CHECK-LABEL: lsl_i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, #31
; CHECK-NEXT:    ret
  %vsel = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %vsel, <vscale x 4 x i32> splat(i32 31))
  ret <vscale x 4 x i32> %res
}

define <vscale x 2 x i64> @lsl_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg) {
; CHECK-LABEL: lsl_i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, #63
; CHECK-NEXT:    ret
  %vsel = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %vsel, <vscale x 2 x i64> splat(i64 63))
  ret <vscale x 2 x i64> %res
}

;; LSR
define <vscale x 16 x i8> @lsr_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg) {
; CHECK-LABEL: lsr_i8:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movprfx z0.b, p0/z, z0.b
; CHECK-NEXT:    lsr z0.b, p0/m, z0.b, #8
; CHECK-NEXT:    ret
  %vsel = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %vsel, <vscale x 16 x i8> splat(i8 8))
  ret <vscale x 16 x i8> %res
}

define <vscale x 8 x i16> @lsr_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg) {
; CHECK-LABEL: lsr_i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, #16
; CHECK-NEXT:    ret
  %vsel = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %vsel, <vscale x 8 x i16> splat(i16 16))
  ret <vscale x 8 x i16> %res
}

define <vscale x 4 x i32> @lsr_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg) local_unnamed_addr #0 {
; CHECK-LABEL: lsr_i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, #32
; CHECK-NEXT:    ret
  %vsel = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %vsel, <vscale x 4 x i32> splat(i32 32))
  ret <vscale x 4 x i32> %res
}

define <vscale x 2 x i64> @lsr_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg) {
; CHECK-LABEL: lsr_i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
; CHECK-NEXT:    lsr z0.d, p0/m, z0.d, #64
; CHECK-NEXT:    ret
  %vsel = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %vsel, <vscale x 2 x i64> splat(i64 64))
  ret <vscale x 2 x i64> %res
}

declare <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)

declare <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)

declare <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)