File: ds-combine-with-dependence.ll

package info (click to toggle)
llvm-toolchain-17 1%3A17.0.6-22
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,799,624 kB
  • sloc: cpp: 6,428,607; ansic: 1,383,196; asm: 793,408; python: 223,504; objc: 75,364; f90: 60,502; lisp: 33,869; pascal: 15,282; sh: 9,684; perl: 7,453; ml: 4,937; awk: 3,523; makefile: 2,889; javascript: 2,149; xml: 888; fortran: 619; cs: 573
file content (117 lines) | stat: -rw-r--r-- 4,582 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s


; There is no dependence between the store and the two loads. So we can combine
; the loads and schedule it freely.

; GCN-LABEL: {{^}}ds_combine_nodep

; GCN-DAG: ds_write2_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset0:26 offset1:27
; GCN-DAG: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset0:7 offset1:8
; GCN: s_waitcnt lgkmcnt({{[0-9]+}})
define amdgpu_kernel void @ds_combine_nodep(ptr addrspace(1) %out, ptr addrspace(3) %inptr) {

  %addr0 = getelementptr i8, ptr addrspace(3) %inptr, i32 24
  %load0 = load <3 x float>, ptr addrspace(3) %addr0, align 4
  %v0 = extractelement <3 x float> %load0, i32 2

  %tmp1 = insertelement <2 x float> undef, float 1.0, i32 0
  %data = insertelement <2 x float> %tmp1, float 2.0, i32 1

  %tmp2 = getelementptr float, ptr addrspace(3) %inptr, i32 26
  store <2 x float> %data, ptr addrspace(3) %tmp2, align 4

  %vaddr1 = getelementptr float, ptr addrspace(3) %inptr, i32 7
  %v1 = load float, ptr addrspace(3) %vaddr1, align 4

  %sum = fadd float %v0, %v1
  store float %sum, ptr addrspace(1) %out, align 4
  ret void
}


; The store depends on the first load, so we could not move the first load down to combine with
; the second load directly. However, we can move the store after the combined load.

; GCN-LABEL: {{^}}ds_combine_WAR

; GCN:      ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset0:7 offset1:27
; GCN-NEXT: ds_write2_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset0:26 offset1:27
define amdgpu_kernel void @ds_combine_WAR(ptr addrspace(1) %out, ptr addrspace(3) %inptr) {

  %addr0 = getelementptr i8, ptr addrspace(3) %inptr, i32 100
  %load0 = load <3 x float>, ptr addrspace(3) %addr0, align 4
  %v0 = extractelement <3 x float> %load0, i32 2

  %tmp1 = insertelement <2 x float> undef, float 1.0, i32 0
  %data = insertelement <2 x float> %tmp1, float 2.0, i32 1

  %tmp2 = getelementptr float, ptr addrspace(3) %inptr, i32 26
  store <2 x float> %data, ptr addrspace(3) %tmp2, align 4

  %vaddr1 = getelementptr float, ptr addrspace(3) %inptr, i32 7
  %v1 = load float, ptr addrspace(3) %vaddr1, align 4

  %sum = fadd float %v0, %v1
  store float %sum, ptr addrspace(1) %out, align 4
  ret void
}


; The second load depends on the store. We could combine the two loads, putting
; the combined load at the original place of the second load, but we prefer to
; leave the first load near the start of the function to hide its latency.

; GCN-LABEL: {{^}}ds_combine_RAW

; GCN:      ds_write2_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset0:26 offset1:27
; GCN-NEXT: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:32
; GCN-NEXT: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:104
define amdgpu_kernel void @ds_combine_RAW(ptr addrspace(1) %out, ptr addrspace(3) %inptr) {

  %addr0 = getelementptr i8, ptr addrspace(3) %inptr, i32 24
  %load0 = load <3 x float>, ptr addrspace(3) %addr0, align 4
  %v0 = extractelement <3 x float> %load0, i32 2

  %tmp1 = insertelement <2 x float> undef, float 1.0, i32 0
  %data = insertelement <2 x float> %tmp1, float 2.0, i32 1

  %tmp2 = getelementptr float, ptr addrspace(3) %inptr, i32 26
  store <2 x float> %data, ptr addrspace(3) %tmp2, align 4

  %vaddr1 = getelementptr float, ptr addrspace(3) %inptr, i32 26
  %v1 = load float, ptr addrspace(3) %vaddr1, align 4

  %sum = fadd float %v0, %v1
  store float %sum, ptr addrspace(1) %out, align 4
  ret void
}


; The store depends on the first load, also the second load depends on the store.
; So we can not combine the two loads.

; GCN-LABEL: {{^}}ds_combine_WAR_RAW

; GCN:      ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:108
; GCN-NEXT: ds_write2_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset0:26 offset1:27
; GCN-NEXT: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:104
define amdgpu_kernel void @ds_combine_WAR_RAW(ptr addrspace(1) %out, ptr addrspace(3) %inptr) {

  %addr0 = getelementptr i8, ptr addrspace(3) %inptr, i32 100
  %load0 = load <3 x float>, ptr addrspace(3) %addr0, align 4
  %v0 = extractelement <3 x float> %load0, i32 2

  %tmp1 = insertelement <2 x float> undef, float 1.0, i32 0
  %data = insertelement <2 x float> %tmp1, float 2.0, i32 1

  %tmp2 = getelementptr float, ptr addrspace(3) %inptr, i32 26
  store <2 x float> %data, ptr addrspace(3) %tmp2, align 4

  %vaddr1 = getelementptr float, ptr addrspace(3) %inptr, i32 26
  %v1 = load float, ptr addrspace(3) %vaddr1, align 4

  %sum = fadd float %v0, %v1
  store float %sum, ptr addrspace(1) %out, align 4
  ret void
}