File: sve-split-load.ll

package info (click to toggle)
llvm-toolchain-14 1%3A14.0.6-12
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 1,496,180 kB
  • sloc: cpp: 5,593,972; ansic: 986,872; asm: 585,869; python: 184,223; objc: 72,530; lisp: 31,119; f90: 27,793; javascript: 9,780; pascal: 9,762; sh: 9,482; perl: 7,468; ml: 5,432; awk: 3,523; makefile: 2,538; xml: 953; cs: 573; fortran: 567
file content (143 lines) | stat: -rw-r--r-- 5,962 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s

; UNPREDICATED

define <vscale x 4 x i16> @load_promote_4i16(<vscale x 4 x i16>* %a) {
; CHECK-LABEL: load_promote_4i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
  %load = load <vscale x 4 x i16>, <vscale x 4 x i16>* %a
  ret <vscale x 4 x i16> %load
}

define <vscale x 16 x i16> @load_split_i16(<vscale x 16 x i16>* %a) {
; CHECK-LABEL: load_split_i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT:    ret
  %load = load <vscale x 16 x i16>, <vscale x 16 x i16>* %a
  ret <vscale x 16 x i16> %load
}

define <vscale x 24 x i16> @load_split_24i16(<vscale x 24 x i16>* %a) {
; CHECK-LABEL: load_split_24i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x0, #2, mul vl]
; CHECK-NEXT:    ret
  %load = load <vscale x 24 x i16>, <vscale x 24 x i16>* %a
  ret <vscale x 24 x i16> %load
}

define <vscale x 32 x i16> @load_split_32i16(<vscale x 32 x i16>* %a) {
; CHECK-LABEL: load_split_32i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x0, #2, mul vl]
; CHECK-NEXT:    ld1h { z3.h }, p0/z, [x0, #3, mul vl]
; CHECK-NEXT:    ret
  %load = load <vscale x 32 x i16>, <vscale x 32 x i16>* %a
  ret <vscale x 32 x i16> %load
}

define <vscale x 16 x i64> @load_split_16i64(<vscale x 16 x i64>* %a) {
; CHECK-LABEL: load_split_16i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x0, #2, mul vl]
; CHECK-NEXT:    ld1d { z3.d }, p0/z, [x0, #3, mul vl]
; CHECK-NEXT:    ld1d { z4.d }, p0/z, [x0, #4, mul vl]
; CHECK-NEXT:    ld1d { z5.d }, p0/z, [x0, #5, mul vl]
; CHECK-NEXT:    ld1d { z6.d }, p0/z, [x0, #6, mul vl]
; CHECK-NEXT:    ld1d { z7.d }, p0/z, [x0, #7, mul vl]
; CHECK-NEXT:    ret
  %load = load <vscale x 16 x i64>, <vscale x 16 x i64>* %a
  ret <vscale x 16 x i64> %load
}

; MASKED

define <vscale x 2 x i32> @masked_load_promote_2i32(<vscale x 2 x i32> *%a, <vscale x 2 x i1> %pg) {
; CHECK-LABEL: masked_load_promote_2i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32> *%a, i32 1, <vscale x 2 x i1> %pg, <vscale x 2 x i32> undef)
  ret <vscale x 2 x i32> %load
}

define <vscale x 32 x i8> @masked_load_split_32i8(<vscale x 32 x i8> *%a, <vscale x 32 x i1> %pg) {
; CHECK-LABEL: masked_load_split_32i8:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
; CHECK-NEXT:    ld1b { z1.b }, p1/z, [x0, #1, mul vl]
; CHECK-NEXT:    ret
  %load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8> *%a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i8> undef)
  ret <vscale x 32 x i8> %load
}

define <vscale x 32 x i16> @masked_load_split_32i16(<vscale x 32 x i16> *%a, <vscale x 32 x i1> %pg) {
; CHECK-LABEL: masked_load_split_32i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    punpklo p2.h, p0.b
; CHECK-NEXT:    punpkhi p0.h, p0.b
; CHECK-NEXT:    punpklo p3.h, p1.b
; CHECK-NEXT:    punpkhi p1.h, p1.b
; CHECK-NEXT:    ld1h { z0.h }, p2/z, [x0]
; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT:    ld1h { z2.h }, p3/z, [x0, #2, mul vl]
; CHECK-NEXT:    ld1h { z3.h }, p1/z, [x0, #3, mul vl]
; CHECK-NEXT:    ret
  %load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16> *%a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i16> undef)
  ret <vscale x 32 x i16> %load
}

define <vscale x 8 x i32> @masked_load_split_8i32(<vscale x 8 x i32> *%a, <vscale x 8 x i1> %pg) {
; CHECK-LABEL: masked_load_split_8i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    punpklo p1.h, p0.b
; CHECK-NEXT:    punpkhi p0.h, p0.b
; CHECK-NEXT:    ld1w { z0.s }, p1/z, [x0]
; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT:    ret
  %load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32> *%a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i32> undef)
  ret <vscale x 8 x i32> %load
}

define <vscale x 8 x i64> @masked_load_split_8i64(<vscale x 8 x i64> *%a, <vscale x 8 x i1> %pg) {
; CHECK-LABEL: masked_load_split_8i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    punpklo p1.h, p0.b
; CHECK-NEXT:    punpkhi p0.h, p0.b
; CHECK-NEXT:    punpklo p2.h, p1.b
; CHECK-NEXT:    punpkhi p1.h, p1.b
; CHECK-NEXT:    ld1d { z0.d }, p2/z, [x0]
; CHECK-NEXT:    punpklo p2.h, p0.b
; CHECK-NEXT:    punpkhi p0.h, p0.b
; CHECK-NEXT:    ld1d { z1.d }, p1/z, [x0, #1, mul vl]
; CHECK-NEXT:    ld1d { z2.d }, p2/z, [x0, #2, mul vl]
; CHECK-NEXT:    ld1d { z3.d }, p0/z, [x0, #3, mul vl]
; CHECK-NEXT:    ret
  %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64> *%a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i64> undef)
  ret <vscale x 8 x i64> %load
}

declare <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8>*, i32, <vscale x 32 x i1>, <vscale x 32 x i8>)

declare <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16>*, i32, <vscale x 32 x i1>, <vscale x 32 x i16>)

declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>*, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
declare <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32>*, i32, <vscale x 8 x i1>, <vscale x 8 x i32>)

declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64>*, i32, <vscale x 8 x i1>, <vscale x 8 x i64>)