File: vmv0-elimination.ll

package info (click to toggle)
llvm-toolchain-21 1%3A21.1.4-5
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 2,236,516 kB
  • sloc: cpp: 7,619,569; ansic: 1,433,956; asm: 1,058,748; python: 252,181; f90: 94,671; objc: 70,753; lisp: 42,813; pascal: 18,401; sh: 8,601; ml: 5,111; perl: 4,720; makefile: 3,585; awk: 3,523; javascript: 2,272; xml: 892; fortran: 770
file content (29 lines) | stat: -rw-r--r-- 1,537 bytes parent folder | download | duplicates (7)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -o - -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s

; We have an invariant that any vmv0 use won't clobber an existing v0 definition that's used.
; Check that %asm2 has a $v0 = COPY just before it so that %x doesn't clobber it.
define <vscale x 1 x i64> @between_inline_asm(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %mask, ptr %p) {
; CHECK-LABEL: between_inline_asm:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT:    vmv1r.v v10, v0
; CHECK-NEXT:    #APP
; CHECK-NEXT:    vadd.vv v0, v8, v9
; CHECK-NEXT:    #NO_APP
; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT:    vmv1r.v v11, v0
; CHECK-NEXT:    vmv1r.v v0, v10
; CHECK-NEXT:    vadd.vv v9, v8, v9, v0.t
; CHECK-NEXT:    vmv1r.v v0, v11
; CHECK-NEXT:    #APP
; CHECK-NEXT:    vadd.vv v8, v8, v0
; CHECK-NEXT:    #NO_APP
; CHECK-NEXT:    vs1r.v v9, (a0)
; CHECK-NEXT:    ret
  %asm1 = tail call <vscale x 1 x i64> asm "vadd.vv $0, $1, $2", "={v0},^vr,^vr"(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b)
  %x = call <vscale x 1 x i64> @llvm.riscv.vadd.mask(<vscale x 1 x i64> poison, <vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %mask, i64 -1, i64 0)
  store <vscale x 1 x i64> %x, ptr %p
  %asm2 = tail call <vscale x 1 x i64> asm "vadd.vv $0, $1, $2", "=^vr,^vr,{v0}"(<vscale x 1 x i64> %a, <vscale x 1 x i64> %asm1)
  ret <vscale x 1 x i64> %asm2
}