1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
|
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; GCN-LABEL: {{^}}store_build_vector_multiple_uses_v4i32:
; GCN: buffer_load_dword
; GCN: buffer_load_dword
; GCN: buffer_load_dword
; GCN: buffer_load_dword
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dword
; GCN: buffer_store_dword
; GCN: buffer_store_dword
; GCN: buffer_store_dword
define amdgpu_kernel void @store_build_vector_multiple_uses_v4i32(<4 x i32> addrspace(1)* noalias %out0,
<4 x i32> addrspace(1)* noalias %out1,
i32 addrspace(1)* noalias %out2,
i32 addrspace(1)* %in) {
%elt0 = load volatile i32, i32 addrspace(1)* %in
%elt1 = load volatile i32, i32 addrspace(1)* %in
%elt2 = load volatile i32, i32 addrspace(1)* %in
%elt3 = load volatile i32, i32 addrspace(1)* %in
%vec0 = insertelement <4 x i32> undef, i32 %elt0, i32 0
%vec1 = insertelement <4 x i32> %vec0, i32 %elt1, i32 1
%vec2 = insertelement <4 x i32> %vec1, i32 %elt2, i32 2
%vec3 = insertelement <4 x i32> %vec2, i32 %elt3, i32 3
store <4 x i32> %vec3, <4 x i32> addrspace(1)* %out0
store <4 x i32> %vec3, <4 x i32> addrspace(1)* %out1
%extract0 = extractelement <4 x i32> %vec3, i32 0
%extract1 = extractelement <4 x i32> %vec3, i32 1
%extract2 = extractelement <4 x i32> %vec3, i32 2
%extract3 = extractelement <4 x i32> %vec3, i32 3
store volatile i32 %extract0, i32 addrspace(1)* %out2
store volatile i32 %extract1, i32 addrspace(1)* %out2
store volatile i32 %extract2, i32 addrspace(1)* %out2
store volatile i32 %extract3, i32 addrspace(1)* %out2
ret void
}
; GCN-LABEL: {{^}}store_build_vector_multiple_extract_uses_v4i32:
; GCN: buffer_load_dword
; GCN: buffer_load_dword
; GCN: buffer_load_dword
; GCN: buffer_load_dword
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dword
; GCN: buffer_store_dword
; GCN: buffer_store_dword
; GCN: buffer_store_dword
define amdgpu_kernel void @store_build_vector_multiple_extract_uses_v4i32(<4 x i32> addrspace(1)* noalias %out0,
<4 x i32> addrspace(1)* noalias %out1,
i32 addrspace(1)* noalias %out2,
i32 addrspace(1)* %in) {
%elt0 = load volatile i32, i32 addrspace(1)* %in
%elt1 = load volatile i32, i32 addrspace(1)* %in
%elt2 = load volatile i32, i32 addrspace(1)* %in
%elt3 = load volatile i32, i32 addrspace(1)* %in
%vec0 = insertelement <4 x i32> undef, i32 %elt0, i32 0
%vec1 = insertelement <4 x i32> %vec0, i32 %elt1, i32 1
%vec2 = insertelement <4 x i32> %vec1, i32 %elt2, i32 2
%vec3 = insertelement <4 x i32> %vec2, i32 %elt3, i32 3
%extract0 = extractelement <4 x i32> %vec3, i32 0
%extract1 = extractelement <4 x i32> %vec3, i32 1
%extract2 = extractelement <4 x i32> %vec3, i32 2
%extract3 = extractelement <4 x i32> %vec3, i32 3
%op0 = add i32 %extract0, 3
%op1 = sub i32 %extract1, 9
%op2 = xor i32 %extract2, 1231412
%op3 = and i32 %extract3, 258233412312
store <4 x i32> %vec3, <4 x i32> addrspace(1)* %out0
store volatile i32 %op0, i32 addrspace(1)* %out2
store volatile i32 %op1, i32 addrspace(1)* %out2
store volatile i32 %op2, i32 addrspace(1)* %out2
store volatile i32 %op3, i32 addrspace(1)* %out2
ret void
}
; GCN-LABEL: {{^}}store_build_vector_multiple_uses_v4i32_bitcast_to_v2i64:
; GCN: buffer_load_dword
; GCN: buffer_load_dword
; GCN: buffer_load_dword
; GCN: buffer_load_dword
; GCN: buffer_store_dwordx4
; GCN: buffer_store_dwordx2
; GCN: buffer_store_dwordx2
define amdgpu_kernel void @store_build_vector_multiple_uses_v4i32_bitcast_to_v2i64(<2 x i64> addrspace(1)* noalias %out0,
<4 x i32> addrspace(1)* noalias %out1,
i64 addrspace(1)* noalias %out2,
i32 addrspace(1)* %in) {
%elt0 = load volatile i32, i32 addrspace(1)* %in
%elt1 = load volatile i32, i32 addrspace(1)* %in
%elt2 = load volatile i32, i32 addrspace(1)* %in
%elt3 = load volatile i32, i32 addrspace(1)* %in
%vec0 = insertelement <4 x i32> undef, i32 %elt0, i32 0
%vec1 = insertelement <4 x i32> %vec0, i32 %elt1, i32 1
%vec2 = insertelement <4 x i32> %vec1, i32 %elt2, i32 2
%vec3 = insertelement <4 x i32> %vec2, i32 %elt3, i32 3
%bc.vec3 = bitcast <4 x i32> %vec3 to <2 x i64>
store <2 x i64> %bc.vec3, <2 x i64> addrspace(1)* %out0
%extract0 = extractelement <2 x i64> %bc.vec3, i32 0
%extract1 = extractelement <2 x i64> %bc.vec3, i32 1
store volatile i64 %extract0, i64 addrspace(1)* %out2
store volatile i64 %extract1, i64 addrspace(1)* %out2
ret void
}
|