File: need-fp-from-csr-vgpr-spill.ll

package info (click to toggle)
llvm-toolchain-13 1%3A13.0.1-6~deb11u1
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 1,418,812 kB
  • sloc: cpp: 5,290,827; ansic: 996,570; asm: 544,593; python: 188,212; objc: 72,027; lisp: 30,291; f90: 25,395; sh: 24,900; javascript: 9,780; pascal: 9,398; perl: 7,484; ml: 5,432; awk: 3,523; makefile: 2,892; xml: 953; cs: 573; fortran: 539
file content (118 lines) | stat: -rw-r--r-- 4,705 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s

; FP is in CSR range, modified.
define hidden fastcc void @callee_has_fp() #1 {
; CHECK-LABEL: callee_has_fp:
; CHECK:       ; %bb.0:
; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT:    s_mov_b32 s4, s33
; CHECK-NEXT:    s_mov_b32 s33, s32
; CHECK-NEXT:    s_addk_i32 s32, 0x200
; CHECK-NEXT:    v_mov_b32_e32 v0, 1
; CHECK-NEXT:    buffer_store_dword v0, off, s[0:3], s33 offset:4
; CHECK-NEXT:    s_waitcnt vmcnt(0)
; CHECK-NEXT:    s_addk_i32 s32, 0xfe00
; CHECK-NEXT:    s_mov_b32 s33, s4
; CHECK-NEXT:    s_setpc_b64 s[30:31]
  %alloca = alloca i32, addrspace(5)
  store volatile i32 1, i32 addrspace(5)* %alloca
  ret void
}

; Has no stack objects, but introduces them due to the CSR spill. We
; see the FP modified in the callee with IPRA. We should not have
; redundant spills of s33 or assert.
define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
; CHECK-LABEL: csr_vgpr_spill_fp_callee:
; CHECK:       ; %bb.0: ; %bb
; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT:    s_mov_b32 s8, s33
; CHECK-NEXT:    s_mov_b32 s33, s32
; CHECK-NEXT:    s_addk_i32 s32, 0x400
; CHECK-NEXT:    s_getpc_b64 s[4:5]
; CHECK-NEXT:    s_add_u32 s4, s4, callee_has_fp@rel32@lo+4
; CHECK-NEXT:    s_addc_u32 s5, s5, callee_has_fp@rel32@hi+12
; CHECK-NEXT:    buffer_store_dword v40, off, s[0:3], s33 ; 4-byte Folded Spill
; CHECK-NEXT:    s_mov_b64 s[6:7], s[30:31]
; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
; CHECK-NEXT:    ;;#ASMSTART
; CHECK-NEXT:    ; clobber csr v40
; CHECK-NEXT:    ;;#ASMEND
; CHECK-NEXT:    buffer_load_dword v40, off, s[0:3], s33 ; 4-byte Folded Reload
; CHECK-NEXT:    s_addk_i32 s32, 0xfc00
; CHECK-NEXT:    s_mov_b32 s33, s8
; CHECK-NEXT:    s_waitcnt vmcnt(0)
; CHECK-NEXT:    s_setpc_b64 s[6:7]
bb:
  call fastcc void @callee_has_fp()
  call void asm sideeffect "; clobber csr v40", "~{v40}"()
  ret void
}

define amdgpu_kernel void @kernel_call() {
; CHECK-LABEL: kernel_call:
; CHECK:       ; %bb.0: ; %bb
; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s4, s7
; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s5, 0
; CHECK-NEXT:    s_add_u32 s0, s0, s7
; CHECK-NEXT:    s_addc_u32 s1, s1, 0
; CHECK-NEXT:    s_getpc_b64 s[4:5]
; CHECK-NEXT:    s_add_u32 s4, s4, csr_vgpr_spill_fp_callee@rel32@lo+4
; CHECK-NEXT:    s_addc_u32 s5, s5, csr_vgpr_spill_fp_callee@rel32@hi+12
; CHECK-NEXT:    s_mov_b32 s32, 0
; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
; CHECK-NEXT:    s_endpgm
bb:
  tail call fastcc void @csr_vgpr_spill_fp_callee()
  ret void
}

; Same, except with a tail call.
define internal fastcc void @csr_vgpr_spill_fp_tailcall_callee() #0 {
; CHECK-LABEL: csr_vgpr_spill_fp_tailcall_callee:
; CHECK:       ; %bb.0: ; %bb
; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1
; CHECK-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
; CHECK-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
; CHECK-NEXT:    ;;#ASMSTART
; CHECK-NEXT:    ; clobber csr v40
; CHECK-NEXT:    ;;#ASMEND
; CHECK-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
; CHECK-NEXT:    v_writelane_b32 v1, s33, 0
; CHECK-NEXT:    s_getpc_b64 s[4:5]
; CHECK-NEXT:    s_add_u32 s4, s4, callee_has_fp@rel32@lo+4
; CHECK-NEXT:    s_addc_u32 s5, s5, callee_has_fp@rel32@hi+12
; CHECK-NEXT:    v_readlane_b32 s33, v1, 0
; CHECK-NEXT:    s_or_saveexec_b64 s[6:7], -1
; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
; CHECK-NEXT:    s_mov_b64 exec, s[6:7]
; CHECK-NEXT:    s_setpc_b64 s[4:5]
bb:
  call void asm sideeffect "; clobber csr v40", "~{v40}"()
  tail call fastcc void @callee_has_fp()
  ret void
}

define amdgpu_kernel void @kernel_tailcall() {
; CHECK-LABEL: kernel_tailcall:
; CHECK:       ; %bb.0: ; %bb
; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s4, s7
; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s5, 0
; CHECK-NEXT:    s_add_u32 s0, s0, s7
; CHECK-NEXT:    s_addc_u32 s1, s1, 0
; CHECK-NEXT:    s_getpc_b64 s[4:5]
; CHECK-NEXT:    s_add_u32 s4, s4, csr_vgpr_spill_fp_tailcall_callee@rel32@lo+4
; CHECK-NEXT:    s_addc_u32 s5, s5, csr_vgpr_spill_fp_tailcall_callee@rel32@hi+12
; CHECK-NEXT:    s_mov_b32 s32, 0
; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
; CHECK-NEXT:    s_endpgm
bb:
  tail call fastcc void @csr_vgpr_spill_fp_tailcall_callee()
  ret void
}

attributes #0 = { "frame-pointer"="none" noinline }
attributes #1 = { "frame-pointer"="all" noinline }