1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
|
; Test the MSA intrinsics that are encoded with the I5 instruction format and
; are loads or stores.
; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck %s
@llvm_mips_ld_b_ARG = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
@llvm_mips_ld_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
define void @llvm_mips_ld_b_test() nounwind {
entry:
%0 = bitcast <16 x i8>* @llvm_mips_ld_b_ARG to i8*
%1 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 16)
store <16 x i8> %1, <16 x i8>* @llvm_mips_ld_b_RES
ret void
}
declare <16 x i8> @llvm.mips.ld.b(i8*, i32) nounwind
; CHECK: llvm_mips_ld_b_test:
; CHECK: ld.b [[R1:\$w[0-9]+]], 16(
; CHECK: st.b
; CHECK: .size llvm_mips_ld_b_test
;
@llvm_mips_ld_h_ARG = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
@llvm_mips_ld_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
define void @llvm_mips_ld_h_test() nounwind {
entry:
%0 = bitcast <8 x i16>* @llvm_mips_ld_h_ARG to i8*
%1 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 16)
store <8 x i16> %1, <8 x i16>* @llvm_mips_ld_h_RES
ret void
}
declare <8 x i16> @llvm.mips.ld.h(i8*, i32) nounwind
; CHECK: llvm_mips_ld_h_test:
; CHECK: ld.h [[R1:\$w[0-9]+]], 16(
; CHECK: st.h
; CHECK: .size llvm_mips_ld_h_test
;
@llvm_mips_ld_w_ARG = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
@llvm_mips_ld_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
define void @llvm_mips_ld_w_test() nounwind {
entry:
%0 = bitcast <4 x i32>* @llvm_mips_ld_w_ARG to i8*
%1 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 16)
store <4 x i32> %1, <4 x i32>* @llvm_mips_ld_w_RES
ret void
}
declare <4 x i32> @llvm.mips.ld.w(i8*, i32) nounwind
; CHECK: llvm_mips_ld_w_test:
; CHECK: ld.w [[R1:\$w[0-9]+]], 16(
; CHECK: st.w
; CHECK: .size llvm_mips_ld_w_test
;
@llvm_mips_ld_d_ARG = global <2 x i64> <i64 0, i64 1>, align 16
@llvm_mips_ld_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
define void @llvm_mips_ld_d_test() nounwind {
entry:
%0 = bitcast <2 x i64>* @llvm_mips_ld_d_ARG to i8*
%1 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 16)
store <2 x i64> %1, <2 x i64>* @llvm_mips_ld_d_RES
ret void
}
declare <2 x i64> @llvm.mips.ld.d(i8*, i32) nounwind
; CHECK: llvm_mips_ld_d_test:
; CHECK: ld.d [[R1:\$w[0-9]+]], 16(
; CHECK: st.d
; CHECK: .size llvm_mips_ld_d_test
;
@llvm_mips_st_b_ARG = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
@llvm_mips_st_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
define void @llvm_mips_st_b_test() nounwind {
entry:
%0 = load <16 x i8>, <16 x i8>* @llvm_mips_st_b_ARG
%1 = bitcast <16 x i8>* @llvm_mips_st_b_RES to i8*
tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 16)
ret void
}
declare void @llvm.mips.st.b(<16 x i8>, i8*, i32) nounwind
; CHECK: llvm_mips_st_b_test:
; CHECK: ld.b
; CHECK: st.b [[R1:\$w[0-9]+]], 16(
; CHECK: .size llvm_mips_st_b_test
;
@llvm_mips_st_h_ARG = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
@llvm_mips_st_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
define void @llvm_mips_st_h_test() nounwind {
entry:
%0 = load <8 x i16>, <8 x i16>* @llvm_mips_st_h_ARG
%1 = bitcast <8 x i16>* @llvm_mips_st_h_RES to i8*
tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 16)
ret void
}
declare void @llvm.mips.st.h(<8 x i16>, i8*, i32) nounwind
; CHECK: llvm_mips_st_h_test:
; CHECK: ld.h
; CHECK: st.h [[R1:\$w[0-9]+]], 16(
; CHECK: .size llvm_mips_st_h_test
;
@llvm_mips_st_w_ARG = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
@llvm_mips_st_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
define void @llvm_mips_st_w_test() nounwind {
entry:
%0 = load <4 x i32>, <4 x i32>* @llvm_mips_st_w_ARG
%1 = bitcast <4 x i32>* @llvm_mips_st_w_RES to i8*
tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 16)
ret void
}
declare void @llvm.mips.st.w(<4 x i32>, i8*, i32) nounwind
; CHECK: llvm_mips_st_w_test:
; CHECK: ld.w
; CHECK: st.w [[R1:\$w[0-9]+]], 16(
; CHECK: .size llvm_mips_st_w_test
;
@llvm_mips_st_d_ARG = global <2 x i64> <i64 0, i64 1>, align 16
@llvm_mips_st_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
define void @llvm_mips_st_d_test() nounwind {
entry:
%0 = load <2 x i64>, <2 x i64>* @llvm_mips_st_d_ARG
%1 = bitcast <2 x i64>* @llvm_mips_st_d_RES to i8*
tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 16)
ret void
}
declare void @llvm.mips.st.d(<2 x i64>, i8*, i32) nounwind
; CHECK: llvm_mips_st_d_test:
; CHECK: ld.d
; CHECK: st.d [[R1:\$w[0-9]+]], 16(
; CHECK: .size llvm_mips_st_d_test
;
|