1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512fp16 -mattr=+avx512vl -O3 | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16 -mattr=+avx512vl -O3 | FileCheck %s
declare <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half>, <16 x half>, metadata, metadata)
declare <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half>, <16 x half>, metadata, metadata)
declare <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half>, <16 x half>, metadata, metadata)
declare <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half>, <16 x half>, metadata, metadata)
declare <16 x half> @llvm.experimental.constrained.sqrt.v16f16(<16 x half>, metadata, metadata)
declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f16(<4 x half>, metadata)
declare <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half>, metadata)
declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata)
declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(<8 x float>, metadata, metadata)
declare <16 x half> @llvm.experimental.constrained.ceil.v16f16(<16 x half>, metadata)
declare <16 x half> @llvm.experimental.constrained.floor.v16f16(<16 x half>, metadata)
declare <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half>, metadata)
declare <16 x half> @llvm.experimental.constrained.rint.v16f16(<16 x half>, metadata, metadata)
declare <16 x half> @llvm.experimental.constrained.nearbyint.v16f16(<16 x half>, metadata, metadata)
define <16 x half> @f2(<16 x half> %a, <16 x half> %b) #0 {
; CHECK-LABEL: f2:
; CHECK: # %bb.0:
; CHECK-NEXT: vaddph %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half> %a, <16 x half> %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <16 x half> %ret
}
define <16 x half> @f4(<16 x half> %a, <16 x half> %b) #0 {
; CHECK-LABEL: f4:
; CHECK: # %bb.0:
; CHECK-NEXT: vsubph %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half> %a, <16 x half> %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <16 x half> %ret
}
define <16 x half> @f6(<16 x half> %a, <16 x half> %b) #0 {
; CHECK-LABEL: f6:
; CHECK: # %bb.0:
; CHECK-NEXT: vmulph %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half> %a, <16 x half> %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <16 x half> %ret
}
define <16 x half> @f8(<16 x half> %a, <16 x half> %b) #0 {
; CHECK-LABEL: f8:
; CHECK: # %bb.0:
; CHECK-NEXT: vdivph %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half> %a, <16 x half> %b,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <16 x half> %ret
}
define <16 x half> @f10(<16 x half> %a) #0 {
; CHECK-LABEL: f10:
; CHECK: # %bb.0:
; CHECK-NEXT: vsqrtph %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <16 x half> @llvm.experimental.constrained.sqrt.v16f16(
<16 x half> %a,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <16 x half > %ret
}
define <4 x double> @f11(<4 x half> %a) #0 {
; CHECK-LABEL: f11:
; CHECK: # %bb.0:
; CHECK-NEXT: vcvtph2pd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f16(
<4 x half> %a,
metadata !"fpexcept.strict") #0
ret <4 x double> %ret
}
define <4 x half> @f12(<4 x double> %a) #0 {
; CHECK-LABEL: f12:
; CHECK: # %bb.0:
; CHECK-NEXT: vcvtpd2ph %ymm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(
<4 x double> %a,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <4 x half> %ret
}
define <16 x half> @f13(<16 x half> %a, <16 x half> %b, <16 x half> %c) #0 {
; CHECK-LABEL: f13:
; CHECK: # %bb.0:
; CHECK-NEXT: vfmadd213ph %ymm2, %ymm1, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <16 x half> %res
}
define <8 x float> @f14(<8 x half> %a) #0 {
; CHECK-LABEL: f14:
; CHECK: # %bb.0:
; CHECK-NEXT: vcvtph2psx %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(
<8 x half> %a,
metadata !"fpexcept.strict") #0
ret <8 x float> %ret
}
define <8 x half> @f15(<8 x float> %a) #0 {
; CHECK-LABEL: f15:
; CHECK: # %bb.0:
; CHECK-NEXT: vcvtps2phx %ymm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
%ret = call <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(
<8 x float> %a,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret <8 x half> %ret
}
define <16 x half> @fceilv16f16(<16 x half> %f) #0 {
; CHECK-LABEL: fceilv16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vrndscaleph $10, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x half> @llvm.experimental.constrained.ceil.v16f16(
<16 x half> %f, metadata !"fpexcept.strict") #0
ret <16 x half> %res
}
define <16 x half> @ffloorv16f16(<16 x half> %f) #0 {
; CHECK-LABEL: ffloorv16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vrndscaleph $9, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x half> @llvm.experimental.constrained.floor.v16f16(
<16 x half> %f, metadata !"fpexcept.strict") #0
ret <16 x half> %res
}
define <16 x half> @ftruncv16f16(<16 x half> %f) #0 {
; CHECK-LABEL: ftruncv16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vrndscaleph $11, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x half> @llvm.experimental.constrained.trunc.v16f16(
<16 x half> %f, metadata !"fpexcept.strict") #0
ret <16 x half> %res
}
define <16 x half> @frintv16f16(<16 x half> %f) #0 {
; CHECK-LABEL: frintv16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vrndscaleph $4, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x half> @llvm.experimental.constrained.rint.v16f16(
<16 x half> %f,
metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret <16 x half> %res
}
define <16 x half> @fnearbyintv16f16(<16 x half> %f) #0 {
; CHECK-LABEL: fnearbyintv16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vrndscaleph $12, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x half> @llvm.experimental.constrained.nearbyint.v16f16(
<16 x half> %f,
metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret <16 x half> %res
}
attributes #0 = { strictfp }
|