File: fixed-vectors-interleaved-access-zve32x.ll

package info (click to toggle)
llvm-toolchain-19 1%3A19.1.7-3
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,998,520 kB
  • sloc: cpp: 6,951,680; ansic: 1,486,157; asm: 913,598; python: 232,024; f90: 80,126; objc: 75,281; lisp: 37,276; pascal: 16,990; sh: 10,009; ml: 5,058; perl: 4,724; awk: 3,523; makefile: 3,167; javascript: 2,504; xml: 892; fortran: 664; cs: 573
file content (80 lines) | stat: -rw-r--r-- 3,309 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zve32x,+zvl1024b -O2 | FileCheck %s -check-prefix=ZVE32X
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zve64x,+zvl1024b -O2 | FileCheck %s -check-prefix=ZVE64X

; TODO: Currently we don't lower interleaved accesses of ptr types if XLEN isn't
; a supported SEW. We should improve this with a wide load and a set of shuffles.
define <4 x i1> @load_large_vector(ptr %p) {
; ZVE32X-LABEL: load_large_vector:
; ZVE32X:       # %bb.0:
; ZVE32X-NEXT:    ld a1, 80(a0)
; ZVE32X-NEXT:    ld a2, 72(a0)
; ZVE32X-NEXT:    ld a3, 56(a0)
; ZVE32X-NEXT:    ld a4, 32(a0)
; ZVE32X-NEXT:    ld a5, 24(a0)
; ZVE32X-NEXT:    ld a6, 48(a0)
; ZVE32X-NEXT:    ld a7, 8(a0)
; ZVE32X-NEXT:    ld a0, 0(a0)
; ZVE32X-NEXT:    xor a4, a5, a4
; ZVE32X-NEXT:    snez a4, a4
; ZVE32X-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
; ZVE32X-NEXT:    vmv.s.x v8, a4
; ZVE32X-NEXT:    vand.vi v8, v8, 1
; ZVE32X-NEXT:    vmsne.vi v0, v8, 0
; ZVE32X-NEXT:    vmv.s.x v9, zero
; ZVE32X-NEXT:    vmerge.vim v8, v9, 1, v0
; ZVE32X-NEXT:    xor a0, a0, a7
; ZVE32X-NEXT:    snez a0, a0
; ZVE32X-NEXT:    vmv.s.x v10, a0
; ZVE32X-NEXT:    vand.vi v10, v10, 1
; ZVE32X-NEXT:    vmsne.vi v0, v10, 0
; ZVE32X-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
; ZVE32X-NEXT:    vmv.v.i v10, 0
; ZVE32X-NEXT:    vmerge.vim v11, v10, 1, v0
; ZVE32X-NEXT:    vsetivli zero, 2, e8, mf4, tu, ma
; ZVE32X-NEXT:    vslideup.vi v11, v8, 1
; ZVE32X-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
; ZVE32X-NEXT:    vmsne.vi v0, v11, 0
; ZVE32X-NEXT:    xor a0, a6, a3
; ZVE32X-NEXT:    snez a0, a0
; ZVE32X-NEXT:    vmv.s.x v8, a0
; ZVE32X-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
; ZVE32X-NEXT:    vand.vi v8, v8, 1
; ZVE32X-NEXT:    vmsne.vi v8, v8, 0
; ZVE32X-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
; ZVE32X-NEXT:    vmerge.vim v11, v10, 1, v0
; ZVE32X-NEXT:    vmv1r.v v0, v8
; ZVE32X-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
; ZVE32X-NEXT:    vmerge.vim v8, v9, 1, v0
; ZVE32X-NEXT:    vsetivli zero, 3, e8, mf4, tu, ma
; ZVE32X-NEXT:    vslideup.vi v11, v8, 2
; ZVE32X-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
; ZVE32X-NEXT:    vmsne.vi v0, v11, 0
; ZVE32X-NEXT:    xor a1, a2, a1
; ZVE32X-NEXT:    snez a0, a1
; ZVE32X-NEXT:    vmv.s.x v8, a0
; ZVE32X-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
; ZVE32X-NEXT:    vand.vi v8, v8, 1
; ZVE32X-NEXT:    vmsne.vi v8, v8, 0
; ZVE32X-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
; ZVE32X-NEXT:    vmerge.vim v10, v10, 1, v0
; ZVE32X-NEXT:    vmv1r.v v0, v8
; ZVE32X-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
; ZVE32X-NEXT:    vmerge.vim v8, v9, 1, v0
; ZVE32X-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
; ZVE32X-NEXT:    vslideup.vi v10, v8, 3
; ZVE32X-NEXT:    vmsne.vi v0, v10, 0
; ZVE32X-NEXT:    ret
;
; ZVE64X-LABEL: load_large_vector:
; ZVE64X:       # %bb.0:
; ZVE64X-NEXT:    vsetivli zero, 4, e64, m1, ta, ma
; ZVE64X-NEXT:    vlseg3e64.v v8, (a0)
; ZVE64X-NEXT:    vmsne.vv v0, v8, v9
; ZVE64X-NEXT:    ret
  %l = load <12 x ptr>, ptr %p
  %s1 = shufflevector <12 x ptr> %l, <12 x ptr> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
  %s2 = shufflevector <12 x ptr> %l, <12 x ptr> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
  %ret = icmp ne <4 x ptr> %s1, %s2
  ret <4 x i1> %ret
}