1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=riscv32 -mattr=+v,+xtheadmemidx -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix RV32
; RUN: llc -mtriple=riscv64 -mattr=+v,+xtheadmemidx -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix RV64
define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
; RV32-LABEL: test:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi a3, a2, 1
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: .LBB0_1: # %for.body
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-NEXT: mv a4, a1
; RV32-NEXT: th.lbib a5, (a4), -1, 0
; RV32-NEXT: th.lrb a4, a4, a0, 0
; RV32-NEXT: vmv.v.x v8, a5
; RV32-NEXT: vmv.s.x v9, zero
; RV32-NEXT: vsetvli zero, a3, e8, mf2, tu, ma
; RV32-NEXT: vslideup.vx v8, v9, a2
; RV32-NEXT: vsetivli zero, 8, e8, mf2, tu, ma
; RV32-NEXT: vmv.s.x v8, a4
; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32-NEXT: vmseq.vi v8, v8, 0
; RV32-NEXT: vmv.x.s a4, v8
; RV32-NEXT: andi a4, a4, 255
; RV32-NEXT: bnez a4, .LBB0_1
; RV32-NEXT: # %bb.2: # %if.then381
; RV32-NEXT: li a0, 0
; RV32-NEXT: ret
;
; RV64-LABEL: test:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: addi a3, a2, 1
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: .LBB0_1: # %for.body
; RV64-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NEXT: mv a4, a1
; RV64-NEXT: th.lbib a5, (a4), -1, 0
; RV64-NEXT: th.lrb a4, a4, a0, 0
; RV64-NEXT: vmv.v.x v8, a5
; RV64-NEXT: vmv.s.x v9, zero
; RV64-NEXT: vsetvli zero, a3, e8, mf2, tu, ma
; RV64-NEXT: vslideup.vx v8, v9, a2
; RV64-NEXT: vsetivli zero, 8, e8, mf2, tu, ma
; RV64-NEXT: vmv.s.x v8, a4
; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV64-NEXT: vmseq.vi v8, v8, 0
; RV64-NEXT: vmv.x.s a4, v8
; RV64-NEXT: andi a4, a4, 255
; RV64-NEXT: bnez a4, .LBB0_1
; RV64-NEXT: # %bb.2: # %if.then381
; RV64-NEXT: li a0, 0
; RV64-NEXT: ret
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%add.ptr1 = getelementptr i8, ptr %add.ptr, i32 -1
%add.ptr2 = getelementptr i8, ptr %add.ptr1, i32 %size
%0 = load i8, ptr %add.ptr1, align 1
%1 = load i8, ptr %add.ptr2, align 1
%2 = insertelement <8 x i8> poison, i8 %0, i64 0
%3 = insertelement <8 x i8> %2, i8 0, i64 %const
%4 = insertelement <8 x i8> %3, i8 %1, i64 0
%5 = icmp ult <8 x i8> %4, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%6 = bitcast <8 x i1> %5 to i8
%7 = zext i8 %6 to i32
%cond = icmp eq i32 %7, 0
br i1 %cond, label %if.then381, label %for.body
if.then381: ; preds = %for.body
ret i32 0
}
|