1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple aarch64-none-linux-gnu < %s | FileCheck %s
declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1 immarg)
declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
define i32 @test_sad_v16i8_zext(ptr nocapture readonly %a, ptr nocapture readonly %b) {
; CHECK-LABEL: test_sad_v16i8_zext:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q1, [x1]
; CHECK-NEXT: uabdl v2.8h, v1.8b, v0.8b
; CHECK-NEXT: uabal2 v2.8h, v1.16b, v0.16b
; CHECK-NEXT: uaddlv s0, v2.8h
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%0 = load <16 x i8>, ptr %a
%1 = zext <16 x i8> %0 to <16 x i32>
%2 = load <16 x i8>, ptr %b
%3 = zext <16 x i8> %2 to <16 x i32>
%4 = sub nsw <16 x i32> %3, %1
%5 = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %4, i1 true)
%6 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
ret i32 %6
}
define i32 @test_sad_v16i8_sext(ptr nocapture readonly %a, ptr nocapture readonly %b) {
; CHECK-LABEL: test_sad_v16i8_sext:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q1, [x1]
; CHECK-NEXT: sabdl v2.8h, v1.8b, v0.8b
; CHECK-NEXT: sabal2 v2.8h, v1.16b, v0.16b
; CHECK-NEXT: uaddlv s0, v2.8h
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%0 = load <16 x i8>, ptr %a
%1 = sext <16 x i8> %0 to <16 x i32>
%2 = load <16 x i8>, ptr %b
%3 = sext <16 x i8> %2 to <16 x i32>
%4 = sub nsw <16 x i32> %3, %1
%5 = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %4, i1 true)
%6 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
ret i32 %6
}
define i32 @test_sad_v16i8_two_step_zext(ptr noundef readonly %a, ptr noundef readonly %b) {
; CHECK-LABEL: test_sad_v16i8_two_step_zext:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q1, [x1]
; CHECK-NEXT: uabdl v2.8h, v1.8b, v0.8b
; CHECK-NEXT: uabal2 v2.8h, v1.16b, v0.16b
; CHECK-NEXT: uaddlv s0, v2.8h
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%0 = load <16 x i8>, ptr %a
%1 = zext <16 x i8> %0 to <16 x i16>
%2 = load <16 x i8>, ptr %b
%3 = zext <16 x i8> %2 to <16 x i16>
%4 = sub nsw <16 x i16> %3, %1
%5 = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %4, i1 false)
%6 = zext <16 x i16> %5 to <16 x i32>
%7 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %6)
ret i32 %7
}
define i32 @test_sad_v16i8_two_step_sext(ptr noundef readonly %a, ptr noundef readonly %b) {
; CHECK-LABEL: test_sad_v16i8_two_step_sext:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ldr q1, [x1]
; CHECK-NEXT: sabdl v2.8h, v1.8b, v0.8b
; CHECK-NEXT: sabal2 v2.8h, v1.16b, v0.16b
; CHECK-NEXT: uaddlv s0, v2.8h
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
%0 = load <16 x i8>, ptr %a
%1 = sext <16 x i8> %0 to <16 x i16>
%2 = load <16 x i8>, ptr %b
%3 = sext <16 x i8> %2 to <16 x i16>
%4 = sub nsw <16 x i16> %3, %1
%5 = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %4, i1 false)
%6 = zext <16 x i16> %5 to <16 x i32>
%7 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %6)
ret i32 %7
}
|