1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
define <8 x i64> @vwadd_wv_mask_v8i32(<8 x i32> %x, <8 x i64> %y) {
; CHECK-LABEL: vwadd_wv_mask_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 42
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vmslt.vx v0, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu
; CHECK-NEXT: vwadd.wv v12, v12, v8, v0.t
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%mask = icmp slt <8 x i32> %x, <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>
%a = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer
%sa = sext <8 x i32> %a to <8 x i64>
%ret = add <8 x i64> %sa, %y
ret <8 x i64> %ret
}
define <8 x i64> @vwaddu_wv_mask_v8i32(<8 x i32> %x, <8 x i64> %y) {
; CHECK-LABEL: vwaddu_wv_mask_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 42
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vmslt.vx v0, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu
; CHECK-NEXT: vwaddu.wv v12, v12, v8, v0.t
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%mask = icmp slt <8 x i32> %x, <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>
%a = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer
%sa = zext <8 x i32> %a to <8 x i64>
%ret = add <8 x i64> %sa, %y
ret <8 x i64> %ret
}
define <8 x i64> @vwaddu_vv_mask_v8i32(<8 x i32> %x, <8 x i32> %y) {
; CHECK-LABEL: vwaddu_vv_mask_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 42
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vmslt.vx v0, v8, a0
; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
; CHECK-NEXT: vwaddu.vv v12, v8, v10
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%mask = icmp slt <8 x i32> %x, <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>
%a = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer
%sa = zext <8 x i32> %a to <8 x i64>
%sy = zext <8 x i32> %y to <8 x i64>
%ret = add <8 x i64> %sa, %sy
ret <8 x i64> %ret
}
define <8 x i64> @vwadd_wv_mask_v8i32_commutative(<8 x i32> %x, <8 x i64> %y) {
; CHECK-LABEL: vwadd_wv_mask_v8i32_commutative:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 42
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vmslt.vx v0, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu
; CHECK-NEXT: vwadd.wv v12, v12, v8, v0.t
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%mask = icmp slt <8 x i32> %x, <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>
%a = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer
%sa = sext <8 x i32> %a to <8 x i64>
%ret = add <8 x i64> %y, %sa
ret <8 x i64> %ret
}
define <8 x i64> @vwadd_wv_mask_v8i32_nonzero(<8 x i32> %x, <8 x i64> %y) {
; CHECK-LABEL: vwadd_wv_mask_v8i32_nonzero:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 42
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vmslt.vx v0, v8, a0
; CHECK-NEXT: vmv.v.i v10, 1
; CHECK-NEXT: vmerge.vvm v16, v10, v8, v0
; CHECK-NEXT: vwadd.wv v8, v12, v16
; CHECK-NEXT: ret
%mask = icmp slt <8 x i32> %x, <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>
%a = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%sa = sext <8 x i32> %a to <8 x i64>
%ret = add <8 x i64> %y, %sa
ret <8 x i64> %ret
}
|