File: combine-splats.ll

package info (click to toggle)
llvm-toolchain-19 1%3A19.1.7-3
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,998,520 kB
  • sloc: cpp: 6,951,680; ansic: 1,486,157; asm: 913,598; python: 232,024; f90: 80,126; objc: 75,281; lisp: 37,276; pascal: 16,990; sh: 10,009; ml: 5,058; perl: 4,724; awk: 3,523; makefile: 3,167; javascript: 2,504; xml: 892; fortran: 664; cs: 573
file content (100 lines) | stat: -rw-r--r-- 3,339 bytes parent folder | download | duplicates (7)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s

; fold (and (or x, C), D) -> D if (C & D) == D

define <vscale x 4 x i32> @and_or_nxv4i32(<vscale x 4 x i32> %A) {
; CHECK-LABEL: and_or_nxv4i32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 8
; CHECK-NEXT:    ret
  %v1 = or <vscale x 4 x i32> %A, splat (i32 255)
  %v2 = and <vscale x 4 x i32> %v1, splat (i32 8)
  ret <vscale x 4 x i32> %v2
}

; (or (and X, c1), c2) -> (and (or X, c2), c1|c2) iff (c1 & c2) != 0

define <vscale x 2 x i64> @or_and_nxv2i64(<vscale x 2 x i64> %a0) {
; CHECK-LABEL: or_and_nxv2i64:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
; CHECK-NEXT:    vor.vi v8, v8, 3
; CHECK-NEXT:    vand.vi v8, v8, 7
; CHECK-NEXT:    ret
  %v1 = and <vscale x 2 x i64> %a0, splat (i64 7)
  %v2 = or <vscale x 2 x i64> %v1, splat (i64 3)
  ret <vscale x 2 x i64> %v2
}

; If all masked bits are going to be set, that's a constant fold.

define <vscale x 2 x i64> @or_and_nxv2i64_fold(<vscale x 2 x i64> %a0) {
; CHECK-LABEL: or_and_nxv2i64_fold:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 3
; CHECK-NEXT:    ret
  %v1 = and <vscale x 2 x i64> %a0, splat (i64 1)
  %v2 = or <vscale x 2 x i64> %v1, splat (i64 3)
  ret <vscale x 2 x i64> %v2
}

; fold (shl (shl x, c1), c2) -> (shl x, (add c1, c2))

define <vscale x 4 x i32> @combine_vec_shl_shl(<vscale x 4 x i32> %x) {
; CHECK-LABEL: combine_vec_shl_shl:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT:    vsll.vi v8, v8, 6
; CHECK-NEXT:    ret
  %v1 = shl <vscale x 4 x i32> %x, splat (i32 2)
  %v2 = shl <vscale x 4 x i32> %v1, splat (i32 4)
  ret <vscale x 4 x i32> %v2
}

; fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))

define <vscale x 2 x i32> @combine_vec_ashr_ashr(<vscale x 2 x i32> %x) {
; CHECK-LABEL: combine_vec_ashr_ashr:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT:    vsra.vi v8, v8, 6
; CHECK-NEXT:    ret
  %v1 = ashr <vscale x 2 x i32> %x, splat (i32 2)
  %v2 = ashr <vscale x 2 x i32> %v1, splat (i32 4)
  ret <vscale x 2 x i32> %v2
}

; fold (srl (srl x, c1), c2) -> (srl x, (add c1, c2))

define <vscale x 8 x i16> @combine_vec_lshr_lshr(<vscale x 8 x i16> %x) {
; CHECK-LABEL: combine_vec_lshr_lshr:
; CHECK:       # %bb.0:
; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
; CHECK-NEXT:    vsrl.vi v8, v8, 8
; CHECK-NEXT:    ret
  %v1 = lshr <vscale x 8 x i16> %x, splat (i16 4)
  %v2 = lshr <vscale x 8 x i16> %v1, splat (i16 4)
  ret <vscale x 8 x i16> %v2
}

; fold (fmul x, 1.0) -> x
define <vscale x 2 x float> @combine_fmul_one(<vscale x 2 x float> %x) {
; CHECK-LABEL: combine_fmul_one:
; CHECK:       # %bb.0:
; CHECK-NEXT:    ret
  %v = fmul <vscale x 2 x float> %x, splat (float 1.0)
  ret <vscale x 2 x float> %v
}

; fold (fmul 1.0, x) -> x
define <vscale x 2 x float> @combine_fmul_one_commuted(<vscale x 2 x float> %x) {
; CHECK-LABEL: combine_fmul_one_commuted:
; CHECK:       # %bb.0:
; CHECK-NEXT:    ret
  %v = fmul <vscale x 2 x float> splat (float 1.0), %x
  ret <vscale x 2 x float> %v
}