1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 -O3 | FileCheck %s --check-prefixes=SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 -O3 | FileCheck %s --check-prefixes=SSE41
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX
declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metadata)
declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata)
declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
declare <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata)
declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata)
define <4 x float> @fceilv4f32(<4 x float> %f) #0 {
; SSE41-LABEL: fceilv4f32:
; SSE41: # %bb.0:
; SSE41-NEXT: roundps $10, %xmm0, %xmm0
; SSE41-NEXT: ret{{[l|q]}}
;
; AVX-LABEL: fceilv4f32:
; AVX: # %bb.0:
; AVX-NEXT: vroundps $10, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
%res = call <4 x float> @llvm.experimental.constrained.ceil.v4f32(
<4 x float> %f, metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
define <2 x double> @fceilv2f64(<2 x double> %f) #0 {
; SSE41-LABEL: fceilv2f64:
; SSE41: # %bb.0:
; SSE41-NEXT: roundpd $10, %xmm0, %xmm0
; SSE41-NEXT: ret{{[l|q]}}
;
; AVX-LABEL: fceilv2f64:
; AVX: # %bb.0:
; AVX-NEXT: vroundpd $10, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(
<2 x double> %f, metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
define <4 x float> @ffloorv4f32(<4 x float> %f) #0 {
; SSE41-LABEL: ffloorv4f32:
; SSE41: # %bb.0:
; SSE41-NEXT: roundps $9, %xmm0, %xmm0
; SSE41-NEXT: ret{{[l|q]}}
;
; AVX-LABEL: ffloorv4f32:
; AVX: # %bb.0:
; AVX-NEXT: vroundps $9, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
%res = call <4 x float> @llvm.experimental.constrained.floor.v4f32(
<4 x float> %f, metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
define <2 x double> @ffloorv2f64(<2 x double> %f) #0 {
; SSE41-LABEL: ffloorv2f64:
; SSE41: # %bb.0:
; SSE41-NEXT: roundpd $9, %xmm0, %xmm0
; SSE41-NEXT: ret{{[l|q]}}
;
; AVX-LABEL: ffloorv2f64:
; AVX: # %bb.0:
; AVX-NEXT: vroundpd $9, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.experimental.constrained.floor.v2f64(
<2 x double> %f, metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
define <4 x float> @ftruncv4f32(<4 x float> %f) #0 {
; SSE41-LABEL: ftruncv4f32:
; SSE41: # %bb.0:
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
; SSE41-NEXT: ret{{[l|q]}}
;
; AVX-LABEL: ftruncv4f32:
; AVX: # %bb.0:
; AVX-NEXT: vroundps $11, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
%res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(
<4 x float> %f, metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
define <2 x double> @ftruncv2f64(<2 x double> %f) #0 {
; SSE41-LABEL: ftruncv2f64:
; SSE41: # %bb.0:
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: ret{{[l|q]}}
;
; AVX-LABEL: ftruncv2f64:
; AVX: # %bb.0:
; AVX-NEXT: vroundpd $11, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
<2 x double> %f, metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
define <4 x float> @frintv4f32(<4 x float> %f) #0 {
; SSE41-LABEL: frintv4f32:
; SSE41: # %bb.0:
; SSE41-NEXT: roundps $4, %xmm0, %xmm0
; SSE41-NEXT: ret{{[l|q]}}
;
; AVX-LABEL: frintv4f32:
; AVX: # %bb.0:
; AVX-NEXT: vroundps $4, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
%res = call <4 x float> @llvm.experimental.constrained.rint.v4f32(
<4 x float> %f,
metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
define <2 x double> @frintv2f64(<2 x double> %f) #0 {
; SSE41-LABEL: frintv2f64:
; SSE41: # %bb.0:
; SSE41-NEXT: roundpd $4, %xmm0, %xmm0
; SSE41-NEXT: ret{{[l|q]}}
;
; AVX-LABEL: frintv2f64:
; AVX: # %bb.0:
; AVX-NEXT: vroundpd $4, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.experimental.constrained.rint.v2f64(
<2 x double> %f,
metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
define <4 x float> @fnearbyintv4f32(<4 x float> %f) #0 {
; SSE41-LABEL: fnearbyintv4f32:
; SSE41: # %bb.0:
; SSE41-NEXT: roundps $12, %xmm0, %xmm0
; SSE41-NEXT: ret{{[l|q]}}
;
; AVX-LABEL: fnearbyintv4f32:
; AVX: # %bb.0:
; AVX-NEXT: vroundps $12, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
%res = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(
<4 x float> %f,
metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
define <2 x double> @fnearbyintv2f64(<2 x double> %f) #0 {
; SSE41-LABEL: fnearbyintv2f64:
; SSE41: # %bb.0:
; SSE41-NEXT: roundpd $12, %xmm0, %xmm0
; SSE41-NEXT: ret{{[l|q]}}
;
; AVX-LABEL: fnearbyintv2f64:
; AVX: # %bb.0:
; AVX-NEXT: vroundpd $12, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(
<2 x double> %f,
metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
attributes #0 = { strictfp }
|