File: remat.ll

package info (click to toggle)
llvm-toolchain-19 1%3A19.1.7-3
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,998,520 kB
  • sloc: cpp: 6,951,680; ansic: 1,486,157; asm: 913,598; python: 232,024; f90: 80,126; objc: 75,281; lisp: 37,276; pascal: 16,990; sh: 10,009; ml: 5,058; perl: 4,724; awk: 3,523; makefile: 3,167; javascript: 2,504; xml: 892; fortran: 664; cs: 573
file content (111 lines) | stat: -rw-r--r-- 4,327 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,POSTRA
; RUN: llc < %s -mtriple=riscv64 -mattr=+v -riscv-vsetvl-after-rvv-regalloc=false -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,PRERA

define void @vid(ptr %p) {
; POSTRA-LABEL: vid:
; POSTRA:       # %bb.0:
; POSTRA-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
; POSTRA-NEXT:    vid.v v8
; POSTRA-NEXT:    vs8r.v v8, (a0)
; POSTRA-NEXT:    vl8re64.v v16, (a0)
; POSTRA-NEXT:    vl8re64.v v24, (a0)
; POSTRA-NEXT:    vl8re64.v v0, (a0)
; POSTRA-NEXT:    vl8re64.v v8, (a0)
; POSTRA-NEXT:    vs8r.v v8, (a0)
; POSTRA-NEXT:    vs8r.v v0, (a0)
; POSTRA-NEXT:    vs8r.v v24, (a0)
; POSTRA-NEXT:    vs8r.v v16, (a0)
; POSTRA-NEXT:    vid.v v8
; POSTRA-NEXT:    vs8r.v v8, (a0)
; POSTRA-NEXT:    ret
;
; PRERA-LABEL: vid:
; PRERA:       # %bb.0:
; PRERA-NEXT:    addi sp, sp, -16
; PRERA-NEXT:    .cfi_def_cfa_offset 16
; PRERA-NEXT:    csrr a1, vlenb
; PRERA-NEXT:    slli a1, a1, 3
; PRERA-NEXT:    sub sp, sp, a1
; PRERA-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; PRERA-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
; PRERA-NEXT:    vid.v v8
; PRERA-NEXT:    vs8r.v v8, (a0)
; PRERA-NEXT:    vl8re64.v v16, (a0)
; PRERA-NEXT:    addi a1, sp, 16
; PRERA-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
; PRERA-NEXT:    vl8re64.v v24, (a0)
; PRERA-NEXT:    vl8re64.v v0, (a0)
; PRERA-NEXT:    vl8re64.v v16, (a0)
; PRERA-NEXT:    vs8r.v v16, (a0)
; PRERA-NEXT:    vs8r.v v0, (a0)
; PRERA-NEXT:    vs8r.v v24, (a0)
; PRERA-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
; PRERA-NEXT:    vs8r.v v16, (a0)
; PRERA-NEXT:    vs8r.v v8, (a0)
; PRERA-NEXT:    csrr a0, vlenb
; PRERA-NEXT:    slli a0, a0, 3
; PRERA-NEXT:    add sp, sp, a0
; PRERA-NEXT:    addi sp, sp, 16
; PRERA-NEXT:    ret
  %vid = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(<vscale x 8 x i64> poison, i64 -1)
  store volatile <vscale x 8 x i64> %vid, ptr %p

  %a = load volatile <vscale x 8 x i64>, ptr %p
  %b = load volatile <vscale x 8 x i64>, ptr %p
  %c = load volatile <vscale x 8 x i64>, ptr %p
  %d = load volatile <vscale x 8 x i64>, ptr %p
  store volatile <vscale x 8 x i64> %d, ptr %p
  store volatile <vscale x 8 x i64> %c, ptr %p
  store volatile <vscale x 8 x i64> %b, ptr %p
  store volatile <vscale x 8 x i64> %a, ptr %p

  store volatile <vscale x 8 x i64> %vid, ptr %p
  ret void
}


define void @vid_passthru(ptr %p, <vscale x 8 x i64> %v) {
; CHECK-LABEL: vid_passthru:
; CHECK:       # %bb.0:
; CHECK-NEXT:    addi sp, sp, -16
; CHECK-NEXT:    .cfi_def_cfa_offset 16
; CHECK-NEXT:    csrr a1, vlenb
; CHECK-NEXT:    slli a1, a1, 3
; CHECK-NEXT:    sub sp, sp, a1
; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT:    vsetivli zero, 1, e64, m8, tu, ma
; CHECK-NEXT:    vid.v v8
; CHECK-NEXT:    vs8r.v v8, (a0)
; CHECK-NEXT:    vl8re64.v v16, (a0)
; CHECK-NEXT:    addi a1, sp, 16
; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT:    vl8re64.v v24, (a0)
; CHECK-NEXT:    vl8re64.v v0, (a0)
; CHECK-NEXT:    vl8re64.v v16, (a0)
; CHECK-NEXT:    vs8r.v v16, (a0)
; CHECK-NEXT:    vs8r.v v0, (a0)
; CHECK-NEXT:    vs8r.v v24, (a0)
; CHECK-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT:    vs8r.v v16, (a0)
; CHECK-NEXT:    vs8r.v v8, (a0)
; CHECK-NEXT:    csrr a0, vlenb
; CHECK-NEXT:    slli a0, a0, 3
; CHECK-NEXT:    add sp, sp, a0
; CHECK-NEXT:    addi sp, sp, 16
; CHECK-NEXT:    ret
  %vid = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(<vscale x 8 x i64> %v, i64 1)
  store volatile <vscale x 8 x i64> %vid, ptr %p

  %a = load volatile <vscale x 8 x i64>, ptr %p
  %b = load volatile <vscale x 8 x i64>, ptr %p
  %c = load volatile <vscale x 8 x i64>, ptr %p
  %d = load volatile <vscale x 8 x i64>, ptr %p
  store volatile <vscale x 8 x i64> %d, ptr %p
  store volatile <vscale x 8 x i64> %c, ptr %p
  store volatile <vscale x 8 x i64> %b, ptr %p
  store volatile <vscale x 8 x i64> %a, ptr %p

  store volatile <vscale x 8 x i64> %vid, ptr %p
  ret void
}