File: masked-vslide1down-rv32.ll

package info (click to toggle)
llvm-toolchain-17 1%3A17.0.6-22
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,799,624 kB
  • sloc: cpp: 6,428,607; ansic: 1,383,196; asm: 793,408; python: 223,504; objc: 75,364; f90: 60,502; lisp: 33,869; pascal: 15,282; sh: 9,684; perl: 7,453; ml: 4,937; awk: 3,523; makefile: 2,889; javascript: 2,149; xml: 888; fortran: 619; cs: 573
file content (120 lines) | stat: -rw-r--r-- 4,456 bytes parent folder | download | duplicates (10)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
; RUN:   < %s | FileCheck %s

declare <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
  <vscale x 1 x i64>,
  <vscale x 1 x i64>,
  i64,
  <vscale x 1 x i1>,
  i32,
  i32);

define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, ma
; CHECK-NEXT:    slli a3, a3, 1
; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
; CHECK-NEXT:    vslide1down.vx v9, v9, a0
; CHECK-NEXT:    vslide1down.vx v9, v9, a1
; CHECK-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
    <vscale x 1 x i64> %0,
    <vscale x 1 x i64> %1,
    i64 %2,
    <vscale x 1 x i1> %3,
    i32 %4, i32 0)

  ret <vscale x 1 x i64> %a
}

define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, ma
; CHECK-NEXT:    slli a3, a3, 1
; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
; CHECK-NEXT:    vslide1down.vx v9, v9, a0
; CHECK-NEXT:    vslide1down.vx v9, v9, a1
; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
    <vscale x 1 x i64> %0,
    <vscale x 1 x i64> %1,
    i64 %2,
    <vscale x 1 x i1> %3,
    i32 %4, i32 1)

  ret <vscale x 1 x i64> %a
}


; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic.
define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, ma
; CHECK-NEXT:    slli a3, a3, 1
; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
; CHECK-NEXT:    vslide1down.vx v9, v9, a0
; CHECK-NEXT:    vslide1down.vx v9, v9, a1
; CHECK-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
    <vscale x 1 x i64> %0,
    <vscale x 1 x i64> %1,
    i64 %2,
    <vscale x 1 x i1> %3,
    i32 %4, i32 2)

  ret <vscale x 1 x i64> %a
}

; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic.
define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
; CHECK-NEXT:    slli a2, a2, 1
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT:    vslide1down.vx v8, v8, a0
; CHECK-NEXT:    vslide1down.vx v8, v8, a1
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
    <vscale x 1 x i64> undef,
    <vscale x 1 x i64> %0,
    i64 %1,
    <vscale x 1 x i1> %2,
    i32 %3, i32 3)

  ret <vscale x 1 x i64> %a
}

define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
; CHECK-NEXT:    slli a2, a2, 1
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT:    vslide1down.vx v8, v8, a0
; CHECK-NEXT:    vslide1down.vx v8, v8, a1
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
    <vscale x 1 x i64> undef,
    <vscale x 1 x i64> %0,
    i64 %1,
    <vscale x 1 x i1> undef,
    i32 %2, i32 3)

  ret <vscale x 1 x i64> %a
}