1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 | FileCheck %s -check-prefixes=GFX11,GFX11-TRUE16
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
define amdgpu_ps i32 @uniform_v_to_s_i32(float inreg %a, float inreg %b) {
; GFX11-LABEL: uniform_v_to_s_i32:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_max_f32_e64 v0, s0, s1
; GFX11-NEXT: v_cmp_o_f32_e64 vcc_lo, s0, s1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e32 v0, 0x7fc00000, v0, vcc_lo
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
; GFX11-NEXT: ; return to shader part epilog
%max0 = call float @llvm.maximum.f32(float %a, float %b)
%cast = bitcast float %max0 to i32
ret i32 %cast
}
define amdgpu_ps i64 @uniform_v_to_s_i64(double inreg %a, double inreg %b) {
; GFX11-LABEL: uniform_v_to_s_i64:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_max_f64 v[0:1], s[0:1], s[2:3]
; GFX11-NEXT: v_cmp_u_f64_e64 s0, s[0:1], s[2:3]
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s0
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s1, v1
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
; GFX11-NEXT: ; return to shader part epilog
%max0 = call double @llvm.maximum.f64(double %a, double %b)
%cast = bitcast double %max0 to i64
ret i64 %cast
}
define amdgpu_ps <2 x i32> @uniform_v_to_s_2_i32(<2 x float> inreg %a, <2 x float> inreg %b) {
; GFX11-LABEL: uniform_v_to_s_2_i32:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_max_f32_e64 v0, s0, s2
; GFX11-NEXT: v_cmp_o_f32_e64 vcc_lo, s0, s2
; GFX11-NEXT: v_max_f32_e64 v1, s1, s3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cndmask_b32_e32 v0, 0x7fc00000, v0, vcc_lo
; GFX11-NEXT: v_cmp_o_f32_e64 vcc_lo, s1, s3
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e32 v1, 0x7fc00000, v1, vcc_lo
; GFX11-NEXT: v_readfirstlane_b32 s1, v1
; GFX11-NEXT: ; return to shader part epilog
%max0 = call <2 x float> @llvm.maximum.f32(<2 x float> %a, <2 x float> %b)
%cast = bitcast <2 x float> %max0 to <2 x i32>
ret <2 x i32> %cast
}
define amdgpu_ps ptr @uniform_v_to_s_ptr(ptr inreg %x) {
; GFX11-LABEL: uniform_v_to_s_ptr:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: flat_load_b32 v0, v[0:1]
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_max_f32_e32 v1, 1.0, v0
; GFX11-NEXT: v_cmp_o_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e32 v0, 0x7fc00000, v1, vcc_lo
; GFX11-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
; GFX11-NEXT: ; return to shader part epilog
%val = load float, ptr %x, align 4
%max = call float @llvm.maximum.f32(float %val, float 1.0)
%int = fptoui float %max to i32
%ptr = inttoptr i32 %int to ptr
ret ptr %ptr
}
define amdgpu_ps double @uniform_v_to_s_double(double inreg %a, double inreg %b) {
; GFX11-LABEL: uniform_v_to_s_double:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_max_f64 v[0:1], s[0:1], s[2:3]
; GFX11-NEXT: v_cmp_u_f64_e64 s0, s[0:1], s[2:3]
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s0
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s1, v1
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
; GFX11-NEXT: ; return to shader part epilog
%max0 = call double @llvm.maximum.f64(double %a, double %b)
ret double %max0
}
define amdgpu_ps <2 x i16> @uniform_v_to_s_2_i16(float inreg %a, float inreg %b) {
; GFX11-LABEL: uniform_v_to_s_2_i16:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_max_f32_e64 v0, s0, s1
; GFX11-NEXT: v_cmp_o_f32_e64 vcc_lo, s0, s1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e32 v0, 0x7fc00000, v0, vcc_lo
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
; GFX11-NEXT: ; return to shader part epilog
%max0 = call float @llvm.maximum.f32(float %a, float %b)
%cast = bitcast float %max0 to <2 x i16>
ret <2 x i16> %cast
}
define amdgpu_ps i16 @uniform_v_to_s_i16(half inreg %a, half inreg %b) {
; GFX11-TRUE16-LABEL: uniform_v_to_s_i16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: v_cmp_o_f16_e64 s2, s0, s1
; GFX11-TRUE16-NEXT: v_max_f16_e64 v0.l, s0, s1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, v0.l, s2
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s0, v0
; GFX11-TRUE16-NEXT: ; return to shader part epilog
;
; GFX11-FAKE16-LABEL: uniform_v_to_s_i16:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: v_max_f16_e64 v0, s0, s1
; GFX11-FAKE16-NEXT: v_cmp_o_f16_e64 vcc_lo, s0, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x7e00, v0, vcc_lo
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s0, v0
; GFX11-FAKE16-NEXT: ; return to shader part epilog
%max = call half @llvm.maximum.f16(half %a, half %b)
%cast = bitcast half %max to i16
ret i16 %cast
}
|