1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
|
; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64 | FileCheck %s
@var32_0 = global i32 0
@var32_1 = global i32 0
@var64_0 = global i64 0
@var64_1 = global i64 0
define void @rorv_i64() {
; CHECK-LABEL: rorv_i64:
%val0_tmp = load i64, i64* @var64_0
%val1_tmp = load i64, i64* @var64_1
%val2_tmp = sub i64 64, %val1_tmp
%val3_tmp = shl i64 %val0_tmp, %val2_tmp
%val4_tmp = lshr i64 %val0_tmp, %val1_tmp
%val5_tmp = or i64 %val3_tmp, %val4_tmp
; CHECK: {{ror|rorv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
store volatile i64 %val5_tmp, i64* @var64_0
ret void
}
define void @asrv_i64() {
; CHECK-LABEL: asrv_i64:
%val0_tmp = load i64, i64* @var64_0
%val1_tmp = load i64, i64* @var64_1
%val4_tmp = ashr i64 %val0_tmp, %val1_tmp
; CHECK: {{asr|asrv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
store volatile i64 %val4_tmp, i64* @var64_1
ret void
}
define void @lsrv_i64() {
; CHECK-LABEL: lsrv_i64:
%val0_tmp = load i64, i64* @var64_0
%val1_tmp = load i64, i64* @var64_1
%val4_tmp = lshr i64 %val0_tmp, %val1_tmp
; CHECK: {{lsr|lsrv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
store volatile i64 %val4_tmp, i64* @var64_0
ret void
}
define void @lslv_i64() {
; CHECK-LABEL: lslv_i64:
%val0_tmp = load i64, i64* @var64_0
%val1_tmp = load i64, i64* @var64_1
%val4_tmp = shl i64 %val0_tmp, %val1_tmp
; CHECK: {{lsl|lslv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
store volatile i64 %val4_tmp, i64* @var64_1
ret void
}
define void @udiv_i64() {
; CHECK-LABEL: udiv_i64:
%val0_tmp = load i64, i64* @var64_0
%val1_tmp = load i64, i64* @var64_1
%val4_tmp = udiv i64 %val0_tmp, %val1_tmp
; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
store volatile i64 %val4_tmp, i64* @var64_0
ret void
}
define void @sdiv_i64() {
; CHECK-LABEL: sdiv_i64:
%val0_tmp = load i64, i64* @var64_0
%val1_tmp = load i64, i64* @var64_1
%val4_tmp = sdiv i64 %val0_tmp, %val1_tmp
; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
store volatile i64 %val4_tmp, i64* @var64_1
ret void
}
define void @lsrv_i32() {
; CHECK-LABEL: lsrv_i32:
%val0_tmp = load i32, i32* @var32_0
%val1_tmp = load i32, i32* @var32_1
%val2_tmp = add i32 1, %val1_tmp
%val4_tmp = lshr i32 %val0_tmp, %val2_tmp
; CHECK: {{lsr|lsrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
store volatile i32 %val4_tmp, i32* @var32_0
ret void
}
define void @lslv_i32() {
; CHECK-LABEL: lslv_i32:
%val0_tmp = load i32, i32* @var32_0
%val1_tmp = load i32, i32* @var32_1
%val2_tmp = add i32 1, %val1_tmp
%val4_tmp = shl i32 %val0_tmp, %val2_tmp
; CHECK: {{lsl|lslv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
store volatile i32 %val4_tmp, i32* @var32_1
ret void
}
define void @rorv_i32() {
; CHECK-LABEL: rorv_i32:
%val0_tmp = load i32, i32* @var32_0
%val6_tmp = load i32, i32* @var32_1
%val1_tmp = add i32 1, %val6_tmp
%val2_tmp = sub i32 32, %val1_tmp
%val3_tmp = shl i32 %val0_tmp, %val2_tmp
%val4_tmp = lshr i32 %val0_tmp, %val1_tmp
%val5_tmp = or i32 %val3_tmp, %val4_tmp
; CHECK: {{ror|rorv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
store volatile i32 %val5_tmp, i32* @var32_0
ret void
}
define void @asrv_i32() {
; CHECK-LABEL: asrv_i32:
%val0_tmp = load i32, i32* @var32_0
%val1_tmp = load i32, i32* @var32_1
%val2_tmp = add i32 1, %val1_tmp
%val4_tmp = ashr i32 %val0_tmp, %val2_tmp
; CHECK: {{asr|asrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
store volatile i32 %val4_tmp, i32* @var32_1
ret void
}
define void @sdiv_i32() {
; CHECK-LABEL: sdiv_i32:
%val0_tmp = load i32, i32* @var32_0
%val1_tmp = load i32, i32* @var32_1
%val4_tmp = sdiv i32 %val0_tmp, %val1_tmp
; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
store volatile i32 %val4_tmp, i32* @var32_1
ret void
}
define void @udiv_i32() {
; CHECK-LABEL: udiv_i32:
%val0_tmp = load i32, i32* @var32_0
%val1_tmp = load i32, i32* @var32_1
%val4_tmp = udiv i32 %val0_tmp, %val1_tmp
; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
store volatile i32 %val4_tmp, i32* @var32_0
ret void
}
; The point of this test is that we may not actually see (shl GPR32:$Val, (zext GPR32:$Val2))
; in the DAG (the RHS may be natively 64-bit), but we should still use the lsl instructions.
define i32 @test_lsl32() {
; CHECK-LABEL: test_lsl32:
%val = load i32, i32* @var32_0
%ret = shl i32 1, %val
; CHECK: {{lsl|lslv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
ret i32 %ret
}
define i32 @test_lsr32() {
; CHECK-LABEL: test_lsr32:
%val = load i32, i32* @var32_0
%ret = lshr i32 1, %val
; CHECK: {{lsr|lsrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
ret i32 %ret
}
define i32 @test_asr32(i32 %in) {
; CHECK-LABEL: test_asr32:
%val = load i32, i32* @var32_0
%ret = ashr i32 %in, %val
; CHECK: {{asr|asrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
ret i32 %ret
}
|