1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
|
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
define <vscale x 16 x i8> @and_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
; CHECK-LABEL: and_pred_i8:
; CHECK: and z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.and.nxv2i8(<vscale x 16 x i1> %pg,
<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %out
}
define <vscale x 8 x i16> @and_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: and_pred_i16:
; CHECK: and z0.h, p0/m, z0.h, z1.h
; CHECK-NEXT: ret
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.and.nxv2i16(<vscale x 8 x i1> %pg,
<vscale x 8 x i16> %a,
<vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %out
}
define <vscale x 4 x i32> @and_pred_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: and_pred_i32:
; CHECK: and z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.and.nxv2i32(<vscale x 4 x i1> %pg,
<vscale x 4 x i32> %a,
<vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %out
}
define <vscale x 2 x i64> @and_pred_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
; CHECK-LABEL: and_pred_i64:
; CHECK: and z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.and.nxv2i64(<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %a,
<vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %out
}
define <vscale x 16 x i8> @or_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
; CHECK-LABEL: or_pred_i8:
; CHECK: orr z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.orr.nxv2i8(<vscale x 16 x i1> %pg,
<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %out
}
define <vscale x 8 x i16> @or_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: or_pred_i16:
; CHECK: orr z0.h, p0/m, z0.h, z1.h
; CHECK-NEXT: ret
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.orr.nxv2i16(<vscale x 8 x i1> %pg,
<vscale x 8 x i16> %a,
<vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %out
}
define <vscale x 4 x i32> @or_pred_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: or_pred_i32:
; CHECK: orr z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.orr.nxv2i32(<vscale x 4 x i1> %pg,
<vscale x 4 x i32> %a,
<vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %out
}
define <vscale x 2 x i64> @or_pred_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
; CHECK-LABEL: or_pred_i64:
; CHECK: orr z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.orr.nxv2i64(<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %a,
<vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %out
}
define <vscale x 16 x i8> @xor_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
; CHECK-LABEL: xor_pred_i8:
; CHECK: eor z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv2i8(<vscale x 16 x i1> %pg,
<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %out
}
define <vscale x 8 x i16> @xor_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: xor_pred_i16:
; CHECK: eor z0.h, p0/m, z0.h, z1.h
; CHECK-NEXT: ret
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv2i16(<vscale x 8 x i1> %pg,
<vscale x 8 x i16> %a,
<vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %out
}
define <vscale x 4 x i32> @xor_pred_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: xor_pred_i32:
; CHECK: eor z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv2i32(<vscale x 4 x i1> %pg,
<vscale x 4 x i32> %a,
<vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %out
}
define <vscale x 2 x i64> @xor_pred_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
; CHECK-LABEL: xor_pred_i64:
; CHECK: eor z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %a,
<vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %out
}
define <vscale x 16 x i8> @bic_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
; CHECK-LABEL: bic_pred_i8:
; CHECK: bic z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.bic.nxv2i8(<vscale x 16 x i1> %pg,
<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %out
}
define <vscale x 8 x i16> @bic_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: bic_pred_i16:
; CHECK: bic z0.h, p0/m, z0.h, z1.h
; CHECK-NEXT: ret
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.bic.nxv2i16(<vscale x 8 x i1> %pg,
<vscale x 8 x i16> %a,
<vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %out
}
define <vscale x 4 x i32> @bic_pred_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: bic_pred_i32:
; CHECK: bic z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.bic.nxv2i32(<vscale x 4 x i1> %pg,
<vscale x 4 x i32> %a,
<vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %out
}
define <vscale x 2 x i64> @bic_pred_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
; CHECK-LABEL: bic_pred_i64:
; CHECK: bic z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.bic.nxv2i64(<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %a,
<vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %out
}
declare <vscale x 16 x i8> @llvm.aarch64.sve.and.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.and.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.and.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.and.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>)
declare <vscale x 16 x i8> @llvm.aarch64.sve.orr.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.orr.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.orr.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.orr.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>)
declare <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>)
declare <vscale x 16 x i8> @llvm.aarch64.sve.bic.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.bic.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.bic.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.bic.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>)
|