File: loop_exit_with_xor.ll

package info (click to toggle)
llvm-toolchain-17 1%3A17.0.6-22
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,799,624 kB
  • sloc: cpp: 6,428,607; ansic: 1,383,196; asm: 793,408; python: 223,504; objc: 75,364; f90: 60,502; lisp: 33,869; pascal: 15,282; sh: 9,684; perl: 7,453; ml: 4,937; awk: 3,523; makefile: 2,889; javascript: 2,149; xml: 888; fortran: 619; cs: 573
file content (151 lines) | stat: -rw-r--r-- 5,492 bytes parent folder | download | duplicates (7)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx803 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s

; Where the mask of lanes wanting to exit the loop on this iteration is not
; obviously already masked by exec (in this case, the xor with -1 inserted by
; control flow annotation), then lower control flow must insert an S_AND_B64
; with exec.

define void @needs_and(i32 %arg) {
; GCN-LABEL: needs_and:
; GCN:       ; %bb.0: ; %entry
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    s_mov_b32 s10, 1
; GCN-NEXT:    s_mov_b64 s[6:7], 0
; GCN-NEXT:    s_branch .LBB0_2
; GCN-NEXT:  .LBB0_1: ; %endif
; GCN-NEXT:    ; in Loop: Header=BB0_2 Depth=1
; GCN-NEXT:    s_or_b64 exec, exec, s[8:9]
; GCN-NEXT:    s_and_b64 s[4:5], exec, vcc
; GCN-NEXT:    s_or_b64 s[6:7], s[4:5], s[6:7]
; GCN-NEXT:    s_add_i32 s10, s10, 1
; GCN-NEXT:    s_andn2_b64 exec, exec, s[6:7]
; GCN-NEXT:    s_cbranch_execz .LBB0_4
; GCN-NEXT:  .LBB0_2: ; %loop
; GCN-NEXT:    ; =>This Inner Loop Header: Depth=1
; GCN-NEXT:    v_cmp_gt_u32_e64 s[4:5], s10, v0
; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s10, v0
; GCN-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
; GCN-NEXT:    s_cbranch_execz .LBB0_1
; GCN-NEXT:  ; %bb.3: ; %then
; GCN-NEXT:    ; in Loop: Header=BB0_2 Depth=1
; GCN-NEXT:    s_nop 1
; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], s4
; GCN-NEXT:    s_branch .LBB0_1
; GCN-NEXT:  .LBB0_4: ; %loopexit
; GCN-NEXT:    s_or_b64 exec, exec, s[6:7]
; GCN-NEXT:    s_waitcnt vmcnt(0)
; GCN-NEXT:    s_setpc_b64 s[30:31]
entry:
  br label %loop

loop:
  %tmp23phi = phi i32 [ %tmp23, %endif ], [ 0, %entry ]
  %tmp23 = add nuw i32 %tmp23phi, 1
  %tmp27 = icmp ult i32 %arg, %tmp23
  br i1 %tmp27, label %then, label %endif

then:                                             ; preds = %bb
  call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float undef, ptr addrspace(8) undef, i32 0, i32 undef, i32 0)
  br label %endif

endif:                                             ; preds = %bb28, %bb
  br i1 %tmp27, label %loop, label %loopexit

loopexit:
  ret void
}

; Where the mask of lanes wanting to exit the loop on this iteration is
; obviously already masked by exec (a V_CMP), then lower control flow can omit
; the S_AND_B64 to avoid an unnecessary instruction.

define void @doesnt_need_and(i32 %arg) {
; GCN-LABEL: doesnt_need_and:
; GCN:       ; %bb.0: ; %entry
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    s_mov_b32 s6, 0
; GCN-NEXT:    s_mov_b64 s[4:5], 0
; GCN-NEXT:  .LBB1_1: ; %loop
; GCN-NEXT:    ; =>This Inner Loop Header: Depth=1
; GCN-NEXT:    s_add_i32 s6, s6, 1
; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s6, v0
; GCN-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], s4
; GCN-NEXT:    s_andn2_b64 exec, exec, s[4:5]
; GCN-NEXT:    s_cbranch_execnz .LBB1_1
; GCN-NEXT:  ; %bb.2: ; %loopexit
; GCN-NEXT:    s_or_b64 exec, exec, s[4:5]
; GCN-NEXT:    s_waitcnt vmcnt(0)
; GCN-NEXT:    s_setpc_b64 s[30:31]
entry:
  br label %loop

loop:
  %tmp23phi = phi i32 [ %tmp23, %loop ], [ 0, %entry ]
  %tmp23 = add nuw i32 %tmp23phi, 1
  %tmp27 = icmp ult i32 %arg, %tmp23
  call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float undef, ptr addrspace(8) undef, i32 0, i32 undef, i32 0)
  br i1 %tmp27, label %loop, label %loopexit

loopexit:
  ret void
}

; Another case where the mask of lanes wanting to exit the loop is not masked
; by exec, because it is a function parameter.

define void @break_cond_is_arg(i32 %arg, i1 %breakcond) {
; GCN-LABEL: break_cond_is_arg:
; GCN:       ; %bb.0: ; %entry
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
; GCN-NEXT:    s_xor_b64 s[4:5], vcc, -1
; GCN-NEXT:    s_mov_b32 s10, 1
; GCN-NEXT:    s_mov_b64 s[6:7], 0
; GCN-NEXT:    s_branch .LBB2_2
; GCN-NEXT:  .LBB2_1: ; %endif
; GCN-NEXT:    ; in Loop: Header=BB2_2 Depth=1
; GCN-NEXT:    s_or_b64 exec, exec, s[8:9]
; GCN-NEXT:    s_and_b64 s[8:9], exec, s[4:5]
; GCN-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
; GCN-NEXT:    s_add_i32 s10, s10, 1
; GCN-NEXT:    s_andn2_b64 exec, exec, s[6:7]
; GCN-NEXT:    s_cbranch_execz .LBB2_4
; GCN-NEXT:  .LBB2_2: ; %loop
; GCN-NEXT:    ; =>This Inner Loop Header: Depth=1
; GCN-NEXT:    v_cmp_gt_u32_e32 vcc, s10, v0
; GCN-NEXT:    s_and_saveexec_b64 s[8:9], vcc
; GCN-NEXT:    s_cbranch_execz .LBB2_1
; GCN-NEXT:  ; %bb.3: ; %then
; GCN-NEXT:    ; in Loop: Header=BB2_2 Depth=1
; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], s4
; GCN-NEXT:    s_branch .LBB2_1
; GCN-NEXT:  .LBB2_4: ; %loopexit
; GCN-NEXT:    s_or_b64 exec, exec, s[6:7]
; GCN-NEXT:    s_waitcnt vmcnt(0)
; GCN-NEXT:    s_setpc_b64 s[30:31]
entry:
  br label %loop

loop:
  %tmp23phi = phi i32 [ %tmp23, %endif ], [ 0, %entry ]
  %tmp23 = add nuw i32 %tmp23phi, 1
  %tmp27 = icmp ult i32 %arg, %tmp23
  br i1 %tmp27, label %then, label %endif

then:                                             ; preds = %bb
  call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float undef, ptr addrspace(8) undef, i32 0, i32 undef, i32 0)
  br label %endif

endif:                                             ; preds = %bb28, %bb
  br i1 %breakcond, label %loop, label %loopexit

loopexit:
  ret void
}

declare void @llvm.amdgcn.raw.ptr.buffer.store.f32(float, ptr addrspace(8), i32, i32, i32 immarg) #0

attributes #0 = { nounwind writeonly }