1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple riscv32 -mattr=+v %s -o - \
; RUN: -verify-machineinstrs | FileCheck %s
; RUN: llc -mtriple riscv64 -mattr=+v %s -o - \
; RUN: -verify-machineinstrs | FileCheck %s
define void @test_load_mask_64(<vscale x 64 x i1>* %pa, <vscale x 64 x i1>* %pb) {
; CHECK-LABEL: test_load_mask_64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <vscale x 64 x i1>, <vscale x 64 x i1>* %pa
store <vscale x 64 x i1> %a, <vscale x 64 x i1>* %pb
ret void
}
define void @test_load_mask_32(<vscale x 32 x i1>* %pa, <vscale x 32 x i1>* %pb) {
; CHECK-LABEL: test_load_mask_32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <vscale x 32 x i1>, <vscale x 32 x i1>* %pa
store <vscale x 32 x i1> %a, <vscale x 32 x i1>* %pb
ret void
}
define void @test_load_mask_16(<vscale x 16 x i1>* %pa, <vscale x 16 x i1>* %pb) {
; CHECK-LABEL: test_load_mask_16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <vscale x 16 x i1>, <vscale x 16 x i1>* %pa
store <vscale x 16 x i1> %a, <vscale x 16 x i1>* %pb
ret void
}
define void @test_load_mask_8(<vscale x 8 x i1>* %pa, <vscale x 8 x i1>* %pb) {
; CHECK-LABEL: test_load_mask_8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <vscale x 8 x i1>, <vscale x 8 x i1>* %pa
store <vscale x 8 x i1> %a, <vscale x 8 x i1>* %pb
ret void
}
define void @test_load_mask_4(<vscale x 4 x i1>* %pa, <vscale x 4 x i1>* %pb) {
; CHECK-LABEL: test_load_mask_4:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <vscale x 4 x i1>, <vscale x 4 x i1>* %pa
store <vscale x 4 x i1> %a, <vscale x 4 x i1>* %pb
ret void
}
define void @test_load_mask_2(<vscale x 2 x i1>* %pa, <vscale x 2 x i1>* %pb) {
; CHECK-LABEL: test_load_mask_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <vscale x 2 x i1>, <vscale x 2 x i1>* %pa
store <vscale x 2 x i1> %a, <vscale x 2 x i1>* %pb
ret void
}
define void @test_load_mask_1(<vscale x 1 x i1>* %pa, <vscale x 1 x i1>* %pb) {
; CHECK-LABEL: test_load_mask_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <vscale x 1 x i1>, <vscale x 1 x i1>* %pa
store <vscale x 1 x i1> %a, <vscale x 1 x i1>* %pb
ret void
}
|