1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
declare i16 @llvm.ushl.sat.i16(i16, i16)
declare <2 x i16> @llvm.ushl.sat.v2i16(<2 x i16>, <2 x i16>)
; fold (shlsat undef, x) -> 0
define i16 @combine_shl_undef(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: combine_shl_undef:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
%tmp = call i16 @llvm.ushl.sat.i16(i16 undef, i16 %y)
ret i16 %tmp
}
; fold (shlsat x, undef) -> undef
define i16 @combine_shl_by_undef(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: combine_shl_by_undef:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
%tmp = call i16 @llvm.ushl.sat.i16(i16 %x, i16 undef)
ret i16 %tmp
}
; fold (shlsat poison, x) -> 0
define i16 @combine_shl_poison(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: combine_shl_poison:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
%tmp = call i16 @llvm.ushl.sat.i16(i16 poison, i16 %y)
ret i16 %tmp
}
; fold (shlsat x, poison) -> undef
define i16 @combine_shl_by_poison(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: combine_shl_by_poison:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
%tmp = call i16 @llvm.ushl.sat.i16(i16 %x, i16 poison)
ret i16 %tmp
}
; fold (shlsat x, bitwidth) -> undef
define i16 @combine_shl_by_bitwidth(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: combine_shl_by_bitwidth:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
%tmp = call i16 @llvm.ushl.sat.i16(i16 %x, i16 16)
ret i16 %tmp
}
; fold (ushlsat 0, x) -> 0
define i16 @combine_shl_zero(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: combine_shl_zero:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
%tmp = call i16 @llvm.ushl.sat.i16(i16 0, i16 %y)
ret i16 %tmp
}
; fold (ushlsat x, 0) -> x
define i16 @combine_shlsat_by_zero(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: combine_shlsat_by_zero:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
%tmp = call i16 @llvm.ushl.sat.i16(i16 %x, i16 0)
ret i16 %tmp
}
; fold (ushlsat c1, c2) -> c3
define i16 @combine_shlsat_constfold(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: combine_shlsat_constfold:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w0, #32
; CHECK-NEXT: ret
%tmp = call i16 @llvm.ushl.sat.i16(i16 8, i16 2)
ret i16 %tmp
}
; fold (ushlsat c1, c2) -> sat max
define i16 @combine_shlsat_satmax(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: combine_shlsat_satmax:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w0, #65535
; CHECK-NEXT: ret
%tmp = call i16 @llvm.ushl.sat.i16(i16 8, i16 15)
ret i16 %tmp
}
declare void @sink2xi16(i16, i16)
; fold (ushlsat c1, c2) -> c3 , c1/c2/c3 being vectors
define void @combine_shlsat_vector() nounwind {
; CHECK-LABEL: combine_shlsat_vector:
; CHECK: // %bb.0:
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: mov w0, #32
; CHECK-NEXT: mov w1, #65535
; CHECK-NEXT: bl sink2xi16
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%tmp = call <2 x i16> @llvm.ushl.sat.v2i16(<2 x i16><i16 8, i16 8>,
<2 x i16><i16 2, i16 15>)
; Pass elements as arguments in a call to get CHECK statements that verify
; the constant folding.
%e0 = extractelement <2 x i16> %tmp, i16 0
%e1 = extractelement <2 x i16> %tmp, i16 1
call void @sink2xi16(i16 %e0, i16 %e1)
ret void
}
|