1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --prefix-filecheck-ir-name _ --version 5
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=infer-address-spaces %s | FileCheck %s
; Trivial optimization of generic addressing
define float @load_global_from_flat(ptr %generic_scalar) #0 {
; CHECK-LABEL: define float @load_global_from_flat(
; CHECK-SAME: ptr [[GENERIC_SCALAR:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[_TMP0:%.*]] = addrspacecast ptr [[GENERIC_SCALAR]] to ptr addrspace(1)
; CHECK-NEXT: [[_TMP1:%.*]] = load float, ptr addrspace(1) [[_TMP0]], align 4
; CHECK-NEXT: ret float [[_TMP1]]
;
%tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(1)
%tmp1 = load float, ptr addrspace(1) %tmp0
ret float %tmp1
}
define float @load_constant_from_flat(ptr %generic_scalar) #0 {
; CHECK-LABEL: define float @load_constant_from_flat(
; CHECK-SAME: ptr [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[_TMP0:%.*]] = addrspacecast ptr [[GENERIC_SCALAR]] to ptr addrspace(4)
; CHECK-NEXT: [[_TMP1:%.*]] = load float, ptr addrspace(4) [[_TMP0]], align 4
; CHECK-NEXT: ret float [[_TMP1]]
;
%tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(4)
%tmp1 = load float, ptr addrspace(4) %tmp0
ret float %tmp1
}
define float @load_group_from_flat(ptr %generic_scalar) #0 {
; CHECK-LABEL: define float @load_group_from_flat(
; CHECK-SAME: ptr [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[_TMP0:%.*]] = addrspacecast ptr [[GENERIC_SCALAR]] to ptr addrspace(3)
; CHECK-NEXT: [[_TMP1:%.*]] = load float, ptr addrspace(3) [[_TMP0]], align 4
; CHECK-NEXT: ret float [[_TMP1]]
;
%tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(3)
%tmp1 = load float, ptr addrspace(3) %tmp0
ret float %tmp1
}
define float @load_private_from_flat(ptr %generic_scalar) #0 {
; CHECK-LABEL: define float @load_private_from_flat(
; CHECK-SAME: ptr [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[_TMP0:%.*]] = addrspacecast ptr [[GENERIC_SCALAR]] to ptr addrspace(5)
; CHECK-NEXT: [[_TMP1:%.*]] = load float, ptr addrspace(5) [[_TMP0]], align 4
; CHECK-NEXT: ret float [[_TMP1]]
;
%tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(5)
%tmp1 = load float, ptr addrspace(5) %tmp0
ret float %tmp1
}
define amdgpu_kernel void @store_global_from_flat(ptr %generic_scalar) #0 {
; CHECK-LABEL: define amdgpu_kernel void @store_global_from_flat(
; CHECK-SAME: ptr [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[_TMP0:%.*]] = addrspacecast ptr [[GENERIC_SCALAR]] to ptr addrspace(1)
; CHECK-NEXT: store float 0.000000e+00, ptr addrspace(1) [[_TMP0]], align 4
; CHECK-NEXT: ret void
;
%tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(1)
store float 0.0, ptr addrspace(1) %tmp0
ret void
}
define amdgpu_kernel void @store_group_from_flat(ptr %generic_scalar) #0 {
; CHECK-LABEL: define amdgpu_kernel void @store_group_from_flat(
; CHECK-SAME: ptr [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[GENERIC_SCALAR]] to ptr addrspace(1)
; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(1) [[TMP1]] to ptr
; CHECK-NEXT: [[_TMP0:%.*]] = addrspacecast ptr [[TMP2]] to ptr addrspace(3)
; CHECK-NEXT: store float 0.000000e+00, ptr addrspace(3) [[_TMP0]], align 4
; CHECK-NEXT: ret void
;
%tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(3)
store float 0.0, ptr addrspace(3) %tmp0
ret void
}
define amdgpu_kernel void @store_private_from_flat(ptr %generic_scalar) #0 {
; CHECK-LABEL: define amdgpu_kernel void @store_private_from_flat(
; CHECK-SAME: ptr [[GENERIC_SCALAR:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[GENERIC_SCALAR]] to ptr addrspace(1)
; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(1) [[TMP1]] to ptr
; CHECK-NEXT: [[_TMP0:%.*]] = addrspacecast ptr [[TMP2]] to ptr addrspace(5)
; CHECK-NEXT: store float 0.000000e+00, ptr addrspace(5) [[_TMP0]], align 4
; CHECK-NEXT: ret void
;
%tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(5)
store float 0.0, ptr addrspace(5) %tmp0
ret void
}
; optimized to global load/store.
define amdgpu_kernel void @load_store_global(ptr addrspace(1) nocapture %input, ptr addrspace(1) nocapture %output) #0 {
; CHECK-LABEL: define amdgpu_kernel void @load_store_global(
; CHECK-SAME: ptr addrspace(1) captures(none) [[INPUT:%.*]], ptr addrspace(1) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(1) [[INPUT]], align 4
; CHECK-NEXT: store i32 [[VAL]], ptr addrspace(1) [[OUTPUT]], align 4
; CHECK-NEXT: ret void
;
%tmp0 = addrspacecast ptr addrspace(1) %input to ptr
%tmp1 = addrspacecast ptr addrspace(1) %output to ptr
%val = load i32, ptr %tmp0, align 4
store i32 %val, ptr %tmp1, align 4
ret void
}
; Optimized to group load/store.
define amdgpu_kernel void @load_store_group(ptr addrspace(3) nocapture %input, ptr addrspace(3) nocapture %output) #0 {
; CHECK-LABEL: define amdgpu_kernel void @load_store_group(
; CHECK-SAME: ptr addrspace(3) captures(none) [[INPUT:%.*]], ptr addrspace(3) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(3) [[INPUT]], align 4
; CHECK-NEXT: store i32 [[VAL]], ptr addrspace(3) [[OUTPUT]], align 4
; CHECK-NEXT: ret void
;
%tmp0 = addrspacecast ptr addrspace(3) %input to ptr
%tmp1 = addrspacecast ptr addrspace(3) %output to ptr
%val = load i32, ptr %tmp0, align 4
store i32 %val, ptr %tmp1, align 4
ret void
}
; Optimized to private load/store.
define amdgpu_kernel void @load_store_private(ptr addrspace(5) nocapture %input, ptr addrspace(5) nocapture %output) #0 {
; CHECK-LABEL: define amdgpu_kernel void @load_store_private(
; CHECK-SAME: ptr addrspace(5) captures(none) [[INPUT:%.*]], ptr addrspace(5) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(5) [[INPUT]], align 4
; CHECK-NEXT: store i32 [[VAL]], ptr addrspace(5) [[OUTPUT]], align 4
; CHECK-NEXT: ret void
;
%tmp0 = addrspacecast ptr addrspace(5) %input to ptr
%tmp1 = addrspacecast ptr addrspace(5) %output to ptr
%val = load i32, ptr %tmp0, align 4
store i32 %val, ptr %tmp1, align 4
ret void
}
; No optimization. flat load/store.
define amdgpu_kernel void @load_store_flat(ptr nocapture %input, ptr nocapture %output) #0 {
; CHECK-LABEL: define amdgpu_kernel void @load_store_flat(
; CHECK-SAME: ptr captures(none) [[INPUT:%.*]], ptr captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[INPUT]] to ptr addrspace(1)
; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr [[OUTPUT]] to ptr addrspace(1)
; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(1) [[TMP1]], align 4
; CHECK-NEXT: store i32 [[VAL]], ptr addrspace(1) [[TMP2]], align 4
; CHECK-NEXT: ret void
;
%val = load i32, ptr %input, align 4
store i32 %val, ptr %output, align 4
ret void
}
define amdgpu_kernel void @store_addrspacecast_ptr_value(ptr addrspace(1) nocapture %input, ptr addrspace(1) nocapture %output) #0 {
; CHECK-LABEL: define amdgpu_kernel void @store_addrspacecast_ptr_value(
; CHECK-SAME: ptr addrspace(1) captures(none) [[INPUT:%.*]], ptr addrspace(1) captures(none) [[OUTPUT:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[CAST:%.*]] = addrspacecast ptr addrspace(1) [[INPUT]] to ptr
; CHECK-NEXT: store ptr [[CAST]], ptr addrspace(1) [[OUTPUT]], align 4
; CHECK-NEXT: ret void
;
%cast = addrspacecast ptr addrspace(1) %input to ptr
store ptr %cast, ptr addrspace(1) %output, align 4
ret void
}
define i32 @atomicrmw_add_global_to_flat(ptr addrspace(1) %global.ptr, i32 %y) #0 {
; CHECK-LABEL: define i32 @atomicrmw_add_global_to_flat(
; CHECK-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[RET:%.*]] = atomicrmw add ptr addrspace(1) [[GLOBAL_PTR]], i32 [[Y]] seq_cst, align 4
; CHECK-NEXT: ret i32 [[RET]]
;
%cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
%ret = atomicrmw add ptr %cast, i32 %y seq_cst
ret i32 %ret
}
define i32 @atomicrmw_add_group_to_flat(ptr addrspace(3) %group.ptr, i32 %y) #0 {
; CHECK-LABEL: define i32 @atomicrmw_add_group_to_flat(
; CHECK-SAME: ptr addrspace(3) [[GROUP_PTR:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[RET:%.*]] = atomicrmw add ptr addrspace(3) [[GROUP_PTR]], i32 [[Y]] seq_cst, align 4
; CHECK-NEXT: ret i32 [[RET]]
;
%cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
%ret = atomicrmw add ptr %cast, i32 %y seq_cst
ret i32 %ret
}
define { i32, i1 } @cmpxchg_global_to_flat(ptr addrspace(1) %global.ptr, i32 %cmp, i32 %val) #0 {
; CHECK-LABEL: define { i32, i1 } @cmpxchg_global_to_flat(
; CHECK-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]], i32 [[CMP:%.*]], i32 [[VAL:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[RET:%.*]] = cmpxchg ptr addrspace(1) [[GLOBAL_PTR]], i32 [[CMP]], i32 [[VAL]] seq_cst monotonic, align 4
; CHECK-NEXT: ret { i32, i1 } [[RET]]
;
%cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
%ret = cmpxchg ptr %cast, i32 %cmp, i32 %val seq_cst monotonic
ret { i32, i1 } %ret
}
define { i32, i1 } @cmpxchg_group_to_flat(ptr addrspace(3) %group.ptr, i32 %cmp, i32 %val) #0 {
; CHECK-LABEL: define { i32, i1 } @cmpxchg_group_to_flat(
; CHECK-SAME: ptr addrspace(3) [[GROUP_PTR:%.*]], i32 [[CMP:%.*]], i32 [[VAL:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[RET:%.*]] = cmpxchg ptr addrspace(3) [[GROUP_PTR]], i32 [[CMP]], i32 [[VAL]] seq_cst monotonic, align 4
; CHECK-NEXT: ret { i32, i1 } [[RET]]
;
%cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
%ret = cmpxchg ptr %cast, i32 %cmp, i32 %val seq_cst monotonic
ret { i32, i1 } %ret
}
; Not pointer operand
define { ptr, i1 } @cmpxchg_group_to_flat_wrong_operand(ptr addrspace(3) %cas.ptr, ptr addrspace(3) %cmp.ptr, ptr %val) #0 {
; CHECK-LABEL: define { ptr, i1 } @cmpxchg_group_to_flat_wrong_operand(
; CHECK-SAME: ptr addrspace(3) [[CAS_PTR:%.*]], ptr addrspace(3) [[CMP_PTR:%.*]], ptr [[VAL:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[CAST_CMP:%.*]] = addrspacecast ptr addrspace(3) [[CMP_PTR]] to ptr
; CHECK-NEXT: [[RET:%.*]] = cmpxchg ptr addrspace(3) [[CAS_PTR]], ptr [[CAST_CMP]], ptr [[VAL]] seq_cst monotonic, align 8
; CHECK-NEXT: ret { ptr, i1 } [[RET]]
;
%cast.cmp = addrspacecast ptr addrspace(3) %cmp.ptr to ptr
%ret = cmpxchg ptr addrspace(3) %cas.ptr, ptr %cast.cmp, ptr %val seq_cst monotonic
ret { ptr, i1 } %ret
}
; Null pointer in local addr space
define void @local_nullptr(ptr addrspace(1) nocapture %results, ptr addrspace(3) %a) {
; CHECK-LABEL: define void @local_nullptr(
; CHECK-SAME: ptr addrspace(1) captures(none) [[RESULTS:%.*]], ptr addrspace(3) [[A:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne ptr addrspace(3) [[A]], addrspacecast (ptr addrspace(5) null to ptr addrspace(3))
; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[TOBOOL]] to i32
; CHECK-NEXT: store i32 [[CONV]], ptr addrspace(1) [[RESULTS]], align 4
; CHECK-NEXT: ret void
;
entry:
%tobool = icmp ne ptr addrspace(3) %a, addrspacecast (ptr addrspace(5) null to ptr addrspace(3))
%conv = zext i1 %tobool to i32
store i32 %conv, ptr addrspace(1) %results, align 4
ret void
}
define i32 @atomicrmw_add_global_to_flat_preserve_amdgpu_md(ptr addrspace(1) %global.ptr, i32 %y) #0 {
; CHECK-LABEL: define i32 @atomicrmw_add_global_to_flat_preserve_amdgpu_md(
; CHECK-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[RET:%.*]] = atomicrmw add ptr addrspace(1) [[GLOBAL_PTR]], i32 [[Y]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0:![0-9]+]], !amdgpu.no.remote.memory [[META0]]
; CHECK-NEXT: ret i32 [[RET]]
;
%cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
%ret = atomicrmw add ptr %cast, i32 %y seq_cst, align 4, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
ret i32 %ret
}
; Make sure there's no assert
define ptr @try_infer_getelementptr_constant_null() {
; CHECK-LABEL: define ptr @try_infer_getelementptr_constant_null() {
; CHECK-NEXT: [[CE:%.*]] = getelementptr i8, ptr getelementptr inbounds (i8, ptr null, i64 8), i64 0
; CHECK-NEXT: ret ptr [[CE]]
;
%ce = getelementptr i8, ptr getelementptr inbounds (i8, ptr null, i64 8), i64 0
ret ptr %ce
}
attributes #0 = { nounwind }
!0 = !{}
;.
; CHECK: [[META0]] = !{}
;.
|