1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
|
// RUN: %clang_cc1 -std=hlsl202x -finclude-default-header -x hlsl -triple \
// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
// RUN: -o - | FileCheck %s --check-prefixes=CHECK \
// RUN: -DFNATTRS="hidden noundef nofpclass(nan inf)" -DTARGET=dx
// RUN: %clang_cc1 -std=hlsl202x -finclude-default-header -x hlsl -triple \
// RUN: spirv-unknown-vulkan-compute %s -emit-llvm -disable-llvm-passes \
// RUN: -o - | FileCheck %s --check-prefixes=CHECK \
// RUN: -DFNATTRS="hidden spir_func noundef nofpclass(nan inf)" -DTARGET=spv
// CHECK: define [[FNATTRS]] float @
// CHECK: call reassoc nnan ninf nsz arcp afn float @llvm.[[TARGET]].step.f32(float
// CHECK: ret float
float test_step_double(double p0, double p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <2 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <2 x float> @llvm.[[TARGET]].step.v2f32(
// CHECK: ret <2 x float> %hlsl.step
float2 test_step_double2(double2 p0, double2 p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <3 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <3 x float> @llvm.[[TARGET]].step.v3f32(
// CHECK: ret <3 x float> %hlsl.step
float3 test_step_double3(double3 p0, double3 p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <4 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.[[TARGET]].step.v4f32(
// CHECK: ret <4 x float> %hlsl.step
float4 test_step_double4(double4 p0, double4 p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] float @
// CHECK: call reassoc nnan ninf nsz arcp afn float @llvm.[[TARGET]].step.f32(float
// CHECK: ret float
float test_step_int(int p0, int p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <2 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <2 x float> @llvm.[[TARGET]].step.v2f32(
// CHECK: ret <2 x float> %hlsl.step
float2 test_step_int2(int2 p0, int2 p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <3 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <3 x float> @llvm.[[TARGET]].step.v3f32(
// CHECK: ret <3 x float> %hlsl.step
float3 test_step_int3(int3 p0, int3 p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <4 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.[[TARGET]].step.v4f32(
// CHECK: ret <4 x float> %hlsl.step
float4 test_step_int4(int4 p0, int4 p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] float @
// CHECK: call reassoc nnan ninf nsz arcp afn float @llvm.[[TARGET]].step.f32(float
// CHECK: ret float
float test_step_uint(uint p0, uint p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <2 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <2 x float> @llvm.[[TARGET]].step.v2f32(
// CHECK: ret <2 x float> %hlsl.step
float2 test_step_uint2(uint2 p0, uint2 p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <3 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <3 x float> @llvm.[[TARGET]].step.v3f32(
// CHECK: ret <3 x float> %hlsl.step
float3 test_step_uint3(uint3 p0, uint3 p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <4 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.[[TARGET]].step.v4f32(
// CHECK: ret <4 x float> %hlsl.step
float4 test_step_uint4(uint4 p0, uint4 p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] float @
// CHECK: call reassoc nnan ninf nsz arcp afn float @llvm.[[TARGET]].step.f32(float
// CHECK: ret float
float test_step_int64_t(int64_t p0, int64_t p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <2 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <2 x float> @llvm.[[TARGET]].step.v2f32(
// CHECK: ret <2 x float> %hlsl.step
float2 test_step_int64_t2(int64_t2 p0, int64_t2 p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <3 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <3 x float> @llvm.[[TARGET]].step.v3f32(
// CHECK: ret <3 x float> %hlsl.step
float3 test_step_int64_t3(int64_t3 p0, int64_t3 p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <4 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.[[TARGET]].step.v4f32(
// CHECK: ret <4 x float> %hlsl.step
float4 test_step_int64_t4(int64_t4 p0, int64_t4 p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] float @
// CHECK: call reassoc nnan ninf nsz arcp afn float @llvm.[[TARGET]].step.f32(float
// CHECK: ret float
float test_step_uint64_t(uint64_t p0, uint64_t p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <2 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <2 x float> @llvm.[[TARGET]].step.v2f32(
// CHECK: ret <2 x float> %hlsl.step
float2 test_step_uint64_t2(uint64_t2 p0, uint64_t2 p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <3 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <3 x float> @llvm.[[TARGET]].step.v3f32(
// CHECK: ret <3 x float> %hlsl.step
float3 test_step_uint64_t3(uint64_t3 p0, uint64_t3 p1)
{
return step(p0, p1);
}
// CHECK: define [[FNATTRS]] <4 x float> @
// CHECK: %hlsl.step = call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.[[TARGET]].step.v4f32(
// CHECK: ret <4 x float> %hlsl.step
float4 test_step_uint64_t4(uint64_t4 p0, uint64_t4 p1)
{
return step(p0, p1);
}
|