1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+fma -O3 | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+fma -O3 | FileCheck %s
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s
declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata)
declare <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float>, <8 x float>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double>, <4 x double>, metadata, metadata)
declare <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float>, <8 x float>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double>, <4 x double>, metadata, metadata)
declare <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float>, <8 x float>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double>, <4 x double>, metadata, metadata)
declare <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float>, <8 x float>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.sqrt.v4f64(<4 x double>, metadata, metadata)
declare <8 x float> @llvm.experimental.constrained.sqrt.v8f32(<8 x float>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata)
declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata)
declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata)
declare <8 x float> @llvm.experimental.constrained.ceil.v8f32(<8 x float>, metadata)
declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata)
declare <8 x float> @llvm.experimental.constrained.floor.v8f32(<8 x float>, metadata)
declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata)
declare <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float>, metadata)
declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata)
declare <8 x float> @llvm.experimental.constrained.rint.v8f32(<8 x float>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.rint.v4f64(<4 x double>, metadata, metadata)
declare <8 x float> @llvm.experimental.constrained.nearbyint.v8f32(<8 x float>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double>, metadata, metadata)
define <4 x double> @f1(<4 x double> %a, <4 x double> %b) #0 {
; CHECK-LABEL: f1:
; CHECK: # %bb.0:
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double> %a, <4 x double> %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <4 x double> %ret
}
define <8 x float> @f2(<8 x float> %a, <8 x float> %b) #0 {
; CHECK-LABEL: f2:
; CHECK: # %bb.0:
; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float> %a, <8 x float> %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <8 x float> %ret
}
define <4 x double> @f3(<4 x double> %a, <4 x double> %b) #0 {
; CHECK-LABEL: f3:
; CHECK: # %bb.0:
; CHECK-NEXT: vsubpd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double> %a, <4 x double> %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <4 x double> %ret
}
define <8 x float> @f4(<8 x float> %a, <8 x float> %b) #0 {
; CHECK-LABEL: f4:
; CHECK: # %bb.0:
; CHECK-NEXT: vsubps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float> %a, <8 x float> %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <8 x float> %ret
}
define <4 x double> @f5(<4 x double> %a, <4 x double> %b) #0 {
; CHECK-LABEL: f5:
; CHECK: # %bb.0:
; CHECK-NEXT: vmulpd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double> %a, <4 x double> %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <4 x double> %ret
}
define <8 x float> @f6(<8 x float> %a, <8 x float> %b) #0 {
; CHECK-LABEL: f6:
; CHECK: # %bb.0:
; CHECK-NEXT: vmulps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float> %a, <8 x float> %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <8 x float> %ret
}
define <4 x double> @f7(<4 x double> %a, <4 x double> %b) #0 {
; CHECK-LABEL: f7:
; CHECK: # %bb.0:
; CHECK-NEXT: vdivpd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double> %a, <4 x double> %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <4 x double> %ret
}
define <8 x float> @f8(<8 x float> %a, <8 x float> %b) #0 {
; CHECK-LABEL: f8:
; CHECK: # %bb.0:
; CHECK-NEXT: vdivps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float> %a, <8 x float> %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <8 x float> %ret
}
define <4 x double> @f9(<4 x double> %a) #0 {
; CHECK-LABEL: f9:
; CHECK: # %bb.0:
; CHECK-NEXT: vsqrtpd %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <4 x double> @llvm.experimental.constrained.sqrt.v4f64(
<4 x double> %a,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <4 x double> %ret
}
define <8 x float> @f10(<8 x float> %a) #0 {
; CHECK-LABEL: f10:
; CHECK: # %bb.0:
; CHECK-NEXT: vsqrtps %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <8 x float> @llvm.experimental.constrained.sqrt.v8f32(
<8 x float> %a,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <8 x float > %ret
}
define <4 x double> @f11(<4 x float> %a) #0 {
; CHECK-LABEL: f11:
; CHECK: # %bb.0:
; CHECK-NEXT: vcvtps2pd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(
<4 x float> %a,
metadata !"fpexcept.strict") #0
ret <4 x double> %ret
}
define <4 x float> @f12(<4 x double> %a) #0 {
; CHECK-LABEL: f12:
; CHECK: # %bb.0:
; CHECK-NEXT: vcvtpd2ps %ymm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(
<4 x double> %a,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
define <8 x float> @f13(<8 x float> %a, <8 x float> %b, <8 x float> %c) #0 {
; CHECK-LABEL: f13:
; CHECK: # %bb.0:
; CHECK-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <8 x float> %res
}
define <4 x double> @f14(<4 x double> %a, <4 x double> %b, <4 x double> %c) #0 {
; CHECK-LABEL: f14:
; CHECK: # %bb.0:
; CHECK-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <4 x double> %res
}
define <8 x float> @fceilv8f32(<8 x float> %f) #0 {
; CHECK-LABEL: fceilv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vroundps $10, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x float> @llvm.experimental.constrained.ceil.v8f32(
<8 x float> %f, metadata !"fpexcept.strict") #0
ret <8 x float> %res
}
define <4 x double> @fceilv4f64(<4 x double> %f) #0 {
; CHECK-LABEL: fceilv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vroundpd $10, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.experimental.constrained.ceil.v4f64(
<4 x double> %f, metadata !"fpexcept.strict") #0
ret <4 x double> %res
}
define <8 x float> @ffloorv8f32(<8 x float> %f) #0 {
; CHECK-LABEL: ffloorv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vroundps $9, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x float> @llvm.experimental.constrained.floor.v8f32(
<8 x float> %f, metadata !"fpexcept.strict") #0
ret <8 x float> %res
}
define <4 x double> @ffloorv4f64(<4 x double> %f) #0 {
; CHECK-LABEL: ffloorv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vroundpd $9, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.experimental.constrained.floor.v4f64(
<4 x double> %f, metadata !"fpexcept.strict") #0
ret <4 x double> %res
}
define <8 x float> @ftruncv8f32(<8 x float> %f) #0 {
; CHECK-LABEL: ftruncv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vroundps $11, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x float> @llvm.experimental.constrained.trunc.v8f32(
<8 x float> %f, metadata !"fpexcept.strict") #0
ret <8 x float> %res
}
define <4 x double> @ftruncv4f64(<4 x double> %f) #0 {
; CHECK-LABEL: ftruncv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vroundpd $11, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.experimental.constrained.trunc.v4f64(
<4 x double> %f, metadata !"fpexcept.strict") #0
ret <4 x double> %res
}
define <8 x float> @frintv8f32(<8 x float> %f) #0 {
; CHECK-LABEL: frintv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vroundps $4, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x float> @llvm.experimental.constrained.rint.v8f32(
<8 x float> %f,
metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret <8 x float> %res
}
define <4 x double> @frintv4f64(<4 x double> %f) #0 {
; CHECK-LABEL: frintv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vroundpd $4, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.experimental.constrained.rint.v4f64(
<4 x double> %f,
metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret <4 x double> %res
}
define <8 x float> @fnearbyintv8f32(<8 x float> %f) #0 {
; CHECK-LABEL: fnearbyintv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vroundps $12, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x float> @llvm.experimental.constrained.nearbyint.v8f32(
<8 x float> %f,
metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret <8 x float> %res
}
define <4 x double> @fnearbyintv4f64(<4 x double> %f) #0 {
; CHECK-LABEL: fnearbyintv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vroundpd $12, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(
<4 x double> %f,
metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret <4 x double> %res
}
attributes #0 = { strictfp }
|