1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 --verify-machineinstrs -o - %s | FileCheck -check-prefix=GCN %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -O0 --verify-machineinstrs -o - %s | FileCheck -check-prefix=GCN-O0 %s
; Test whole-wave register spilling.
; In this testcase, the return address registers, PC value (SGPR30_SGPR31) and the scratch SGPR used in
; the inline asm statements should be preserved across the call. Since the test limits the VGPR numbers,
; the PC will be spilled to the only available CSR VGPR (VGPR40) as we spill CSR SGPRs including the PC
; directly to the physical VGPR lane to correctly generate the CFIs. The SGPR20 will get spilled to the
; virtual VGPR lane and that would be allocated by regalloc. Since there is no free VGPR to allocate, RA
; must spill a scratch VGPR. The writelane/readlane instructions that spill/restore SGPRs into/from VGPR
; are whole-wave operations and hence the VGPRs involved in such operations require whole-wave spilling.
define void @test() #0 {
; GCN-LABEL: test:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: s_mov_b32 s16, s33
; GCN-NEXT: s_mov_b32 s33, s32
; GCN-NEXT: s_xor_saveexec_b64 s[18:19], -1
; GCN-NEXT: buffer_store_dword v39, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
; GCN-NEXT: s_mov_b64 exec, -1
; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
; GCN-NEXT: s_mov_b64 exec, s[18:19]
; GCN-NEXT: v_writelane_b32 v40, s16, 4
; GCN-NEXT: v_writelane_b32 v40, s28, 2
; GCN-NEXT: v_writelane_b32 v40, s29, 3
; GCN-NEXT: v_writelane_b32 v40, s30, 0
; GCN-NEXT: ; implicit-def: $vgpr39 : SGPR spill to VGPR lane
; GCN-NEXT: s_addk_i32 s32, 0x400
; GCN-NEXT: v_writelane_b32 v40, s31, 1
; GCN-NEXT: ;;#ASMSTART
; GCN-NEXT: ; def s16
; GCN-NEXT: ;;#ASMEND
; GCN-NEXT: v_writelane_b32 v39, s16, 0
; GCN-NEXT: s_or_saveexec_b64 s[28:29], -1
; GCN-NEXT: buffer_store_dword v39, off, s[0:3], s33 ; 4-byte Folded Spill
; GCN-NEXT: s_mov_b64 exec, s[28:29]
; GCN-NEXT: s_getpc_b64 s[16:17]
; GCN-NEXT: s_add_u32 s16, s16, ext_func@gotpcrel32@lo+4
; GCN-NEXT: s_addc_u32 s17, s17, ext_func@gotpcrel32@hi+12
; GCN-NEXT: s_load_dwordx2 s[16:17], s[16:17], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GCN-NEXT: s_or_saveexec_b64 s[28:29], -1
; GCN-NEXT: buffer_load_dword v39, off, s[0:3], s33 ; 4-byte Folded Reload
; GCN-NEXT: s_mov_b64 exec, s[28:29]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_readlane_b32 s4, v39, 0
; GCN-NEXT: v_mov_b32_e32 v0, s4
; GCN-NEXT: global_store_dword v[0:1], v0, off
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_readlane_b32 s31, v40, 1
; GCN-NEXT: v_readlane_b32 s30, v40, 0
; GCN-NEXT: s_mov_b32 s32, s33
; GCN-NEXT: v_readlane_b32 s4, v40, 4
; GCN-NEXT: v_readlane_b32 s28, v40, 2
; GCN-NEXT: v_readlane_b32 s29, v40, 3
; GCN-NEXT: s_xor_saveexec_b64 s[6:7], -1
; GCN-NEXT: buffer_load_dword v39, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload
; GCN-NEXT: s_mov_b64 exec, -1
; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
; GCN-NEXT: s_mov_b64 exec, s[6:7]
; GCN-NEXT: s_mov_b32 s33, s4
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GCN-O0-LABEL: test:
; GCN-O0: ; %bb.0:
; GCN-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-O0-NEXT: s_mov_b32 s16, s33
; GCN-O0-NEXT: s_mov_b32 s33, s32
; GCN-O0-NEXT: s_xor_saveexec_b64 s[18:19], -1
; GCN-O0-NEXT: buffer_store_dword v39, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
; GCN-O0-NEXT: s_mov_b64 exec, -1
; GCN-O0-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
; GCN-O0-NEXT: s_mov_b64 exec, s[18:19]
; GCN-O0-NEXT: v_writelane_b32 v40, s16, 4
; GCN-O0-NEXT: v_writelane_b32 v40, s28, 2
; GCN-O0-NEXT: v_writelane_b32 v40, s29, 3
; GCN-O0-NEXT: s_add_i32 s32, s32, 0x400
; GCN-O0-NEXT: v_writelane_b32 v40, s30, 0
; GCN-O0-NEXT: v_writelane_b32 v40, s31, 1
; GCN-O0-NEXT: ;;#ASMSTART
; GCN-O0-NEXT: ; def s16
; GCN-O0-NEXT: ;;#ASMEND
; GCN-O0-NEXT: ; implicit-def: $vgpr39 : SGPR spill to VGPR lane
; GCN-O0-NEXT: v_writelane_b32 v39, s16, 0
; GCN-O0-NEXT: s_or_saveexec_b64 s[28:29], -1
; GCN-O0-NEXT: buffer_store_dword v39, off, s[0:3], s33 ; 4-byte Folded Spill
; GCN-O0-NEXT: s_mov_b64 exec, s[28:29]
; GCN-O0-NEXT: s_getpc_b64 s[16:17]
; GCN-O0-NEXT: s_add_u32 s16, s16, ext_func@gotpcrel32@lo+4
; GCN-O0-NEXT: s_addc_u32 s17, s17, ext_func@gotpcrel32@hi+12
; GCN-O0-NEXT: s_load_dwordx2 s[16:17], s[16:17], 0x0
; GCN-O0-NEXT: s_mov_b64 s[22:23], s[2:3]
; GCN-O0-NEXT: s_mov_b64 s[20:21], s[0:1]
; GCN-O0-NEXT: s_mov_b64 s[0:1], s[20:21]
; GCN-O0-NEXT: s_mov_b64 s[2:3], s[22:23]
; GCN-O0-NEXT: s_waitcnt lgkmcnt(0)
; GCN-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GCN-O0-NEXT: s_or_saveexec_b64 s[28:29], -1
; GCN-O0-NEXT: buffer_load_dword v39, off, s[0:3], s33 ; 4-byte Folded Reload
; GCN-O0-NEXT: s_mov_b64 exec, s[28:29]
; GCN-O0-NEXT: s_waitcnt vmcnt(0)
; GCN-O0-NEXT: v_readlane_b32 s4, v39, 0
; GCN-O0-NEXT: ; implicit-def: $sgpr6_sgpr7
; GCN-O0-NEXT: v_mov_b32_e32 v0, s6
; GCN-O0-NEXT: v_mov_b32_e32 v1, s7
; GCN-O0-NEXT: v_mov_b32_e32 v2, s4
; GCN-O0-NEXT: global_store_dword v[0:1], v2, off
; GCN-O0-NEXT: s_waitcnt vmcnt(0)
; GCN-O0-NEXT: v_readlane_b32 s31, v40, 1
; GCN-O0-NEXT: v_readlane_b32 s30, v40, 0
; GCN-O0-NEXT: s_mov_b32 s32, s33
; GCN-O0-NEXT: v_readlane_b32 s4, v40, 4
; GCN-O0-NEXT: v_readlane_b32 s28, v40, 2
; GCN-O0-NEXT: v_readlane_b32 s29, v40, 3
; GCN-O0-NEXT: s_xor_saveexec_b64 s[6:7], -1
; GCN-O0-NEXT: buffer_load_dword v39, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload
; GCN-O0-NEXT: s_mov_b64 exec, -1
; GCN-O0-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
; GCN-O0-NEXT: s_mov_b64 exec, s[6:7]
; GCN-O0-NEXT: s_mov_b32 s33, s4
; GCN-O0-NEXT: s_waitcnt vmcnt(0)
; GCN-O0-NEXT: s_setpc_b64 s[30:31]
%sgpr = call i32 asm sideeffect "; def $0", "=s" () #0
call void @ext_func()
store volatile i32 %sgpr, ptr addrspace(1) poison
ret void
}
declare void @ext_func();
attributes #0 = { nounwind "amdgpu-num-vgpr"="41" "amdgpu-num-sgpr"="34"}
|