1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
|
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
; GCN-LABEL: {{^}}test_fmin3_olt_0_f32:
; GCN: buffer_load_dword [[REGC:v[0-9]+]]
; GCN: buffer_load_dword [[REGB:v[0-9]+]]
; GCN: buffer_load_dword [[REGA:v[0-9]+]]
; GCN: v_min3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
; GCN: buffer_store_dword [[RESULT]],
define amdgpu_kernel void @test_fmin3_olt_0_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%a = load volatile float, ptr addrspace(1) %aptr, align 4
%b = load volatile float, ptr addrspace(1) %bptr, align 4
%c = load volatile float, ptr addrspace(1) %cptr, align 4
%f0 = call float @llvm.minnum.f32(float %a, float %b)
%f1 = call float @llvm.minnum.f32(float %f0, float %c)
store float %f1, ptr addrspace(1) %out, align 4
ret void
}
; Commute operand of second fmin
; GCN-LABEL: {{^}}test_fmin3_olt_1_f32:
; GCN: buffer_load_dword [[REGB:v[0-9]+]]
; GCN: buffer_load_dword [[REGA:v[0-9]+]]
; GCN: buffer_load_dword [[REGC:v[0-9]+]]
; GCN: v_min3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
; GCN: buffer_store_dword [[RESULT]],
define amdgpu_kernel void @test_fmin3_olt_1_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%a = load volatile float, ptr addrspace(1) %aptr, align 4
%b = load volatile float, ptr addrspace(1) %bptr, align 4
%c = load volatile float, ptr addrspace(1) %cptr, align 4
%f0 = call float @llvm.minnum.f32(float %a, float %b)
%f1 = call float @llvm.minnum.f32(float %c, float %f0)
store float %f1, ptr addrspace(1) %out, align 4
ret void
}
; GCN-LABEL: {{^}}test_fmin3_olt_0_f16:
; GCN: buffer_load_ushort [[REGC:v[0-9]+]]
; GCN: buffer_load_ushort [[REGB:v[0-9]+]]
; GCN: buffer_load_ushort [[REGA:v[0-9]+]]
; SI: v_min3_f32 [[RESULT_F32:v[0-9]+]],
; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[RESULT]]
; VI: v_min_f16_e32
; VI: v_min_f16_e32 [[RESULT:v[0-9]+]],
; GFX9: v_min3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
; GCN: buffer_store_short [[RESULT]],
define amdgpu_kernel void @test_fmin3_olt_0_f16(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%a = load volatile half, ptr addrspace(1) %aptr, align 2
%b = load volatile half, ptr addrspace(1) %bptr, align 2
%c = load volatile half, ptr addrspace(1) %cptr, align 2
%f0 = call half @llvm.minnum.f16(half %a, half %b)
%f1 = call half @llvm.minnum.f16(half %f0, half %c)
store half %f1, ptr addrspace(1) %out, align 2
ret void
}
; Commute operand of second fmin
; GCN-LABEL: {{^}}test_fmin3_olt_1_f16:
; GCN: buffer_load_ushort [[REGA:v[0-9]+]]
; GCN: buffer_load_ushort [[REGB:v[0-9]+]]
; GCN: buffer_load_ushort [[REGC:v[0-9]+]]
; SI-DAG: v_cvt_f32_f16_e32 [[CVT_A:v[0-9]+]], [[REGA]]
; SI-DAG: v_cvt_f32_f16_e32 [[CVT_B:v[0-9]+]], [[REGB]]
; SI-DAG: v_cvt_f32_f16_e32 [[CVT_C:v[0-9]+]], [[REGC]]
; SI: v_min3_f32 [[RESULT_F32:v[0-9]+]], [[CVT_C]], [[CVT_A]], [[CVT_B]]
; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[RESULT_F32]]
; VI: v_min_f16_e32
; VI: v_min_f16_e32 [[RESULT:v[0-9]+]],
; GFX9: v_min3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGA]], [[REGB]]
; GCN: buffer_store_short [[RESULT]],
define amdgpu_kernel void @test_fmin3_olt_1_f16(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%a = load volatile half, ptr addrspace(1) %aptr, align 2
%b = load volatile half, ptr addrspace(1) %bptr, align 2
%c = load volatile half, ptr addrspace(1) %cptr, align 2
%f0 = call half @llvm.minnum.f16(half %a, half %b)
%f1 = call half @llvm.minnum.f16(half %c, half %f0)
store half %f1, ptr addrspace(1) %out, align 2
ret void
}
; Checks whether the test passes; performMinMaxCombine() should not optimize vector patterns of min3
; since there are no pack instructions for fmin3.
; GCN-LABEL: {{^}}no_fmin3_v2f16:
; SI: v_cvt_f16_f32_e32
; SI: v_min_f32_e32
; SI-NEXT: v_min_f32_e32
; SI-NEXT: v_min3_f32
; SI-NEXT: v_min3_f32
; VI: s_waitcnt
; VI-NEXT: v_min_f16_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_min_f16_e32 v0, v0, v1
; VI-NEXT: v_min_f16_sdwa v1, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_min_f16_e32 v0, v2, v0
; VI-NEXT: v_min_f16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_min_f16_e32 v0, v0, v3
; VI-NEXT: v_or_b32_e32 v0, v0, v1
; VI-NEXT: s_setpc_b64
; GFX9: s_waitcnt
; GFX9-NEXT: v_pk_min_f16 v0, v0, v1
; GFX9-NEXT: v_pk_min_f16 v0, v2, v0
; GFX9-NEXT: v_pk_min_f16 v0, v0, v3
; GFX9-NEXT: s_setpc_b64
define <2 x half> @no_fmin3_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x half> %d) #2 {
entry:
%min = call <2 x half> @llvm.minnum.v2f16(<2 x half> %a, <2 x half> %b)
%min1 = call <2 x half> @llvm.minnum.v2f16(<2 x half> %c, <2 x half> %min)
%res = call <2 x half> @llvm.minnum.v2f16(<2 x half> %min1, <2 x half> %d)
ret <2 x half> %res
}
; GCN-LABEL: {{^}}test_fmin3_olt_0_f64:
; GCN-NOT: v_min3
define amdgpu_kernel void @test_fmin3_olt_0_f64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%a = load volatile double, ptr addrspace(1) %aptr, align 4
%b = load volatile double, ptr addrspace(1) %bptr, align 4
%c = load volatile double, ptr addrspace(1) %cptr, align 4
%f0 = call double @llvm.minnum.f64(double %a, double %b)
%f1 = call double @llvm.minnum.f64(double %f0, double %c)
store double %f1, ptr addrspace(1) %out, align 4
ret void
}
; Commute operand of second fmin
; GCN-LABEL: {{^}}test_fmin3_olt_1_f64:
; GCN-NOT: v_min3
define amdgpu_kernel void @test_fmin3_olt_1_f64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%a = load volatile double, ptr addrspace(1) %aptr, align 4
%b = load volatile double, ptr addrspace(1) %bptr, align 4
%c = load volatile double, ptr addrspace(1) %cptr, align 4
%f0 = call double @llvm.minnum.f64(double %a, double %b)
%f1 = call double @llvm.minnum.f64(double %c, double %f0)
store double %f1, ptr addrspace(1) %out, align 4
ret void
}
declare i32 @llvm.amdgcn.workitem.id.x() #1
declare double @llvm.minnum.f64(double, double) #1
declare float @llvm.minnum.f32(float, float) #1
declare half @llvm.minnum.f16(half, half) #1
declare <2 x half> @llvm.minnum.v2f16(<2 x half>, <2 x half>)
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone speculatable }
attributes #2 = { nounwind "no-nans-fp-math"="true" }
|