1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -denormal-fp-math-f32=preserve-sign -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 %s
; Make sure that AMDGPUCodeGenPrepare introduces mul24 intrinsics
; after SLSR, as the intrinsics would interfere. It's unclear if these
; should be introduced before LSR or not. It seems to help in some
; cases, and hurt others.
define void @lsr_order_mul24_0(i32 %arg, i32 %arg2, i32 %arg6, i32 %arg13, i32 %arg16) #0 {
; GFX9-LABEL: lsr_order_mul24_0:
; GFX9: ; %bb.0: ; %bb
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: global_load_dword v5, v[0:1], off
; GFX9-NEXT: v_and_b32_e32 v2, 0xffffff, v2
; GFX9-NEXT: v_sub_u32_e32 v4, v4, v1
; GFX9-NEXT: s_mov_b64 s[4:5], 0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: ds_write_b32 v0, v5
; GFX9-NEXT: .LBB0_1: ; %bb23
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: v_mul_u32_u24_e32 v5, v0, v2
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
; GFX9-NEXT: v_sub_u32_e32 v5, v4, v5
; GFX9-NEXT: v_add_u32_e32 v5, v5, v0
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v5, v3
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_cbranch_execnz .LBB0_1
; GFX9-NEXT: ; %bb.2: ; %.loopexit
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
bb:
%tmp22 = and i32 %arg6, 16777215
br label %bb23
.loopexit: ; preds = %bb23
ret void
bb23: ; preds = %bb23, %bb
%tmp24 = phi i32 [ %arg, %bb ], [ %tmp47, %bb23 ]
%tmp28 = and i32 %tmp24, 16777215
%tmp29 = mul i32 %tmp28, %tmp22
%tmp30 = sub i32 %tmp24, %tmp29
%tmp31 = add i32 %tmp30, %arg16
%tmp37 = icmp ult i32 %tmp31, %arg13
%tmp44 = load float, ptr addrspace(1) undef, align 4
store float %tmp44, ptr addrspace(3) undef, align 4
%tmp47 = add i32 %tmp24, %arg2
br i1 %tmp37, label %bb23, label %.loopexit
}
define void @lsr_order_mul24_1(i32 %arg, i32 %arg1, i32 %arg2, ptr addrspace(3) nocapture %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, ptr addrspace(1) nocapture readonly %arg10, i32 %arg11, i32 %arg12, i32 %arg13, i32 %arg14, i32 %arg15, i32 %arg16, i1 zeroext %arg17, i1 zeroext %arg18) #0 {
; GFX9-LABEL: lsr_order_mul24_1:
; GFX9: ; %bb.0: ; %bb
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_and_b32_e32 v5, 1, v18
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v5
; GFX9-NEXT: v_cmp_lt_u32_e64 s[4:5], v0, v1
; GFX9-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB1_3
; GFX9-NEXT: ; %bb.1: ; %bb19
; GFX9-NEXT: v_cvt_f32_u32_e32 v7, v6
; GFX9-NEXT: v_add_u32_e32 v4, v4, v0
; GFX9-NEXT: v_and_b32_e32 v5, 0xffffff, v6
; GFX9-NEXT: v_lshl_add_u32 v6, v4, 2, v3
; GFX9-NEXT: v_rcp_iflag_f32_e32 v7, v7
; GFX9-NEXT: v_lshlrev_b32_e32 v8, 2, v2
; GFX9-NEXT: v_add_u32_e32 v9, v17, v12
; GFX9-NEXT: s_mov_b64 s[10:11], 0
; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: ; implicit-def: $vgpr3
; GFX9-NEXT: .LBB1_2: ; %bb23
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: v_cvt_f32_u32_e32 v3, v0
; GFX9-NEXT: v_add_u32_e32 v18, v9, v0
; GFX9-NEXT: v_add_u32_e32 v12, v17, v0
; GFX9-NEXT: v_add_u32_e32 v0, v0, v2
; GFX9-NEXT: v_madak_f32 v3, v3, v7, 0x3727c5ac
; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v3
; GFX9-NEXT: v_mul_u32_u24_e32 v19, v3, v5
; GFX9-NEXT: v_add_u32_e32 v20, v3, v16
; GFX9-NEXT: v_sub_u32_e32 v3, v18, v19
; GFX9-NEXT: v_sub_u32_e32 v12, v12, v19
; GFX9-NEXT: v_mad_u64_u32 v[18:19], s[6:7], v20, v15, v[3:4]
; GFX9-NEXT: v_cmp_lt_u32_e64 s[4:5], v20, v13
; GFX9-NEXT: v_cmp_lt_u32_e64 s[6:7], v12, v14
; GFX9-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
; GFX9-NEXT: s_and_b64 s[4:5], s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, v18, s[4:5]
; GFX9-NEXT: v_lshlrev_b64 v[18:19], 2, v[3:4]
; GFX9-NEXT: v_add_co_u32_e64 v18, s[6:7], v10, v18
; GFX9-NEXT: v_addc_co_u32_e64 v19, s[6:7], v11, v19, s[6:7]
; GFX9-NEXT: global_load_dword v3, v[18:19], off
; GFX9-NEXT: v_cmp_ge_u32_e64 s[6:7], v0, v1
; GFX9-NEXT: s_or_b64 s[10:11], s[6:7], s[10:11]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, v3, s[4:5]
; GFX9-NEXT: ds_write_b32 v6, v3
; GFX9-NEXT: v_add_u32_e32 v6, v6, v8
; GFX9-NEXT: s_andn2_b64 exec, exec, s[10:11]
; GFX9-NEXT: s_cbranch_execnz .LBB1_2
; GFX9-NEXT: .LBB1_3: ; %Flow2
; GFX9-NEXT: s_or_b64 exec, exec, s[8:9]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
bb:
%tmp = icmp ult i32 %arg, %arg1
br i1 %tmp, label %bb19, label %.loopexit
bb19: ; preds = %bb
%tmp20 = uitofp i32 %arg6 to float
%tmp21 = fdiv float 1.000000e+00, %tmp20, !fpmath !0
%tmp22 = and i32 %arg6, 16777215
br label %bb23
.loopexit: ; preds = %bb23, %bb
ret void
bb23: ; preds = %bb19, %bb23
%tmp24 = phi i32 [ %arg, %bb19 ], [ %tmp47, %bb23 ]
%tmp25 = uitofp i32 %tmp24 to float
%tmp26 = tail call float @llvm.fmuladd.f32(float %tmp25, float %tmp21, float 0x3EE4F8B580000000) #2
%tmp27 = fptoui float %tmp26 to i32
%tmp28 = and i32 %tmp27, 16777215
%tmp29 = mul i32 %tmp28, %tmp22
%tmp30 = sub i32 %tmp24, %tmp29
%tmp31 = add i32 %tmp30, %arg16
%tmp32 = add i32 %tmp27, %arg15
%tmp33 = mul i32 %tmp32, %arg14
%tmp34 = add i32 %tmp33, %arg11
%tmp35 = add i32 %tmp34, %tmp31
%tmp36 = add i32 %tmp24, %arg4
%tmp37 = icmp ult i32 %tmp31, %arg13
%tmp38 = icmp ult i32 %tmp32, %arg12
%tmp39 = and i1 %tmp38, %tmp37
%tmp40 = and i1 %tmp39, %arg17
%tmp41 = zext i32 %tmp35 to i64
%tmp42 = select i1 %tmp40, i64 %tmp41, i64 0
%tmp43 = getelementptr inbounds float, ptr addrspace(1) %arg10, i64 %tmp42
%tmp44 = load float, ptr addrspace(1) %tmp43, align 4
%tmp45 = select i1 %tmp40, float %tmp44, float 0.000000e+00
%tmp46 = getelementptr inbounds float, ptr addrspace(3) %arg3, i32 %tmp36
store float %tmp45, ptr addrspace(3) %tmp46, align 4
%tmp47 = add i32 %tmp24, %arg2
%tmp48 = icmp ult i32 %tmp47, %arg1
br i1 %tmp48, label %bb23, label %.loopexit
}
define void @slsr1_0(i32 %b.arg, i32 %s.arg) #0 {
; GFX9-LABEL: slsr1_0:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_and_b32_e32 v2, 0xffffff, v1
; GFX9-NEXT: v_mul_u32_u24_e32 v3, v0, v1
; GFX9-NEXT: global_store_dword v[0:1], v3, off
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_mad_u32_u24 v0, v0, v1, v2
; GFX9-NEXT: global_store_dword v[0:1], v0, off
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v0, v0, v2
; GFX9-NEXT: global_store_dword v[0:1], v0, off
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
%b = and i32 %b.arg, 16777215
%s = and i32 %s.arg, 16777215
; CHECK-LABEL: @slsr1(
; foo(b * s);
%mul0 = mul i32 %b, %s
; CHECK: mul i32
; CHECK-NOT: mul i32
store volatile i32 %mul0, ptr addrspace(1) undef
; foo((b + 1) * s);
%b1 = add i32 %b, 1
%mul1 = mul i32 %b1, %s
store volatile i32 %mul1, ptr addrspace(1) undef
; foo((b + 2) * s);
%b2 = add i32 %b, 2
%mul2 = mul i32 %b2, %s
store volatile i32 %mul2, ptr addrspace(1) undef
ret void
}
define void @slsr1_1(i32 %b.arg, i32 %s.arg) #0 {
; GFX9-LABEL: slsr1_1:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_mov_b32 s4, s33
; GFX9-NEXT: s_mov_b32 s33, s32
; GFX9-NEXT: s_or_saveexec_b64 s[6:7], -1
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: s_mov_b64 exec, s[6:7]
; GFX9-NEXT: v_writelane_b32 v43, s4, 5
; GFX9-NEXT: v_writelane_b32 v43, s30, 0
; GFX9-NEXT: v_writelane_b32 v43, s31, 1
; GFX9-NEXT: s_addk_i32 s32, 0x800
; GFX9-NEXT: v_writelane_b32 v43, s34, 2
; GFX9-NEXT: v_writelane_b32 v43, s36, 3
; GFX9-NEXT: s_getpc_b64 s[4:5]
; GFX9-NEXT: s_add_u32 s4, s4, foo@gotpcrel32@lo+4
; GFX9-NEXT: s_addc_u32 s5, s5, foo@gotpcrel32@hi+12
; GFX9-NEXT: v_writelane_b32 v43, s37, 4
; GFX9-NEXT: s_load_dwordx2 s[36:37], s[4:5], 0x0
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s33 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v40, v1
; GFX9-NEXT: v_mov_b32_e32 v41, v0
; GFX9-NEXT: v_mul_u32_u24_e32 v0, v41, v40
; GFX9-NEXT: s_mov_b32 s34, s15
; GFX9-NEXT: v_and_b32_e32 v42, 0xffffff, v40
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_swappc_b64 s[30:31], s[36:37]
; GFX9-NEXT: v_mad_u32_u24 v40, v41, v40, v42
; GFX9-NEXT: s_mov_b32 s15, s34
; GFX9-NEXT: v_mov_b32_e32 v0, v40
; GFX9-NEXT: s_swappc_b64 s[30:31], s[36:37]
; GFX9-NEXT: v_add_u32_e32 v0, v40, v42
; GFX9-NEXT: s_mov_b32 s15, s34
; GFX9-NEXT: s_swappc_b64 s[30:31], s[36:37]
; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s33 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload
; GFX9-NEXT: v_readlane_b32 s37, v43, 4
; GFX9-NEXT: v_readlane_b32 s36, v43, 3
; GFX9-NEXT: v_readlane_b32 s34, v43, 2
; GFX9-NEXT: v_readlane_b32 s31, v43, 1
; GFX9-NEXT: v_readlane_b32 s30, v43, 0
; GFX9-NEXT: v_readlane_b32 s4, v43, 5
; GFX9-NEXT: s_or_saveexec_b64 s[6:7], -1
; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s33 offset:12 ; 4-byte Folded Reload
; GFX9-NEXT: s_mov_b64 exec, s[6:7]
; GFX9-NEXT: s_addk_i32 s32, 0xf800
; GFX9-NEXT: s_mov_b32 s33, s4
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
%b = and i32 %b.arg, 16777215
%s = and i32 %s.arg, 16777215
; CHECK-LABEL: @slsr1(
; foo(b * s);
%mul0 = mul i32 %b, %s
; CHECK: mul i32
; CHECK-NOT: mul i32
call void @foo(i32 %mul0)
; foo((b + 1) * s);
%b1 = add i32 %b, 1
%mul1 = mul i32 %b1, %s
call void @foo(i32 %mul1)
; foo((b + 2) * s);
%b2 = add i32 %b, 2
%mul2 = mul i32 %b2, %s
call void @foo(i32 %mul2)
ret void
}
declare void @foo(i32) #2
declare float @llvm.fmuladd.f32(float, float, float) #1
attributes #0 = { nounwind willreturn "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
attributes #1 = { nounwind readnone speculatable }
attributes #2 = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
!0 = !{float 2.500000e+00}
|