File: vecreduce-of-cast.ll

package info (click to toggle)
llvm-toolchain-19 1%3A19.1.7-3~deb12u1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm-proposed-updates
  • size: 1,998,492 kB
  • sloc: cpp: 6,951,680; ansic: 1,486,157; asm: 913,598; python: 232,024; f90: 80,126; objc: 75,281; lisp: 37,276; pascal: 16,990; sh: 10,009; ml: 5,058; perl: 4,724; awk: 3,523; makefile: 3,167; javascript: 2,504; xml: 892; fortran: 664; cs: 573
file content (140 lines) | stat: -rw-r--r-- 5,350 bytes parent folder | download | duplicates (7)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=vector-combine -S -mtriple=riscv32 -mattr=+v | FileCheck  %s
; RUN: opt < %s -passes=vector-combine -S -mtriple=riscv64 -mattr=+v | FileCheck  %s

;
; Fold reduce(cast(X)) -> trunc(cast(X)) if more cost efficient
;

define i32 @reduce_add_trunc_v8i64_to_v8i32(<8 x i64> %a0)  {
; CHECK-LABEL: @reduce_add_trunc_v8i64_to_v8i32(
; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[A0:%.*]])
; CHECK-NEXT:    [[RED:%.*]] = trunc i64 [[TMP1]] to i32
; CHECK-NEXT:    ret i32 [[RED]]
;
  %tr = trunc <8 x i64> %a0 to <8 x i32>
  %red = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %tr)
  ret i32 %red
}

define i16 @reduce_add_trunc_v8i64_to_v8i16(<8 x i64> %a0)  {
; CHECK-LABEL: @reduce_add_trunc_v8i64_to_v8i16(
; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[A0:%.*]])
; CHECK-NEXT:    [[RED:%.*]] = trunc i64 [[TMP1]] to i16
; CHECK-NEXT:    ret i16 [[RED]]
;
  %tr = trunc <8 x i64> %a0 to <8 x i16>
  %red = tail call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %tr)
  ret i16 %red
}

define i8 @reduce_add_trunc_v8i64_to_v8i8(<8 x i64> %a0)  {
; CHECK-LABEL: @reduce_add_trunc_v8i64_to_v8i8(
; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[A0:%.*]])
; CHECK-NEXT:    [[RED:%.*]] = trunc i64 [[TMP1]] to i8
; CHECK-NEXT:    ret i8 [[RED]]
;
  %tr = trunc <8 x i64> %a0 to <8 x i8>
  %red = tail call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %tr)
  ret i8 %red
}

define i8 @reduce_or_trunc_v8i32_i8(<8 x i32> %a0)  {
; CHECK-LABEL: @reduce_or_trunc_v8i32_i8(
; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> [[A0:%.*]])
; CHECK-NEXT:    [[RED:%.*]] = trunc i32 [[TMP1]] to i8
; CHECK-NEXT:    ret i8 [[RED]]
;
  %tr = trunc <8 x i32> %a0 to <8 x i8>
  %red = tail call i8 @llvm.vector.reduce.or.v8i32(<8 x i8> %tr)
  ret i8 %red
}

define i8 @reduce_xor_trunc_v16i64_i8(<16 x i64> %a0)  {
; CHECK-LABEL: @reduce_xor_trunc_v16i64_i8(
; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.xor.v16i64(<16 x i64> [[A0:%.*]])
; CHECK-NEXT:    [[RED:%.*]] = trunc i64 [[TMP1]] to i8
; CHECK-NEXT:    ret i8 [[RED]]
;
  %tr = trunc <16 x i64> %a0 to <16 x i8>
  %red = tail call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %tr)
  ret i8 %red
}

define i16 @reduce_mul_trunc_v8i64_i16(<8 x i64> %a0)  {
; CHECK-LABEL: @reduce_mul_trunc_v8i64_i16(
; CHECK-NEXT:    [[TR:%.*]] = trunc <8 x i64> [[A0:%.*]] to <8 x i16>
; CHECK-NEXT:    [[RED:%.*]] = tail call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> [[TR]])
; CHECK-NEXT:    ret i16 [[RED]]
;
  %tr = trunc <8 x i64> %a0 to <8 x i16>
  %red = tail call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %tr)
  ret i16 %red
}

define i32 @reduce_or_sext_v8i8_to_v8i32(<8 x i8> %a0)  {
; CHECK-LABEL: @reduce_or_sext_v8i8_to_v8i32(
; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[A0:%.*]])
; CHECK-NEXT:    [[RED:%.*]] = sext i8 [[TMP1]] to i32
; CHECK-NEXT:    ret i32 [[RED]]
;
  %tr = sext <8 x i8> %a0 to <8 x i32>
  %red = tail call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %tr)
  ret i32 %red
}

define i32 @reduce_or_sext_v8i16_to_v8i32(<8 x i16> %a0)  {
; CHECK-LABEL: @reduce_or_sext_v8i16_to_v8i32(
; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[A0:%.*]])
; CHECK-NEXT:    [[RED:%.*]] = sext i16 [[TMP1]] to i32
; CHECK-NEXT:    ret i32 [[RED]]
;
  %tr = sext <8 x i16> %a0 to <8 x i32>
  %red = tail call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %tr)
  ret i32 %red
}

define i32 @reduce_or_zext_v8i8_to_v8i32(<8 x i8> %a0)  {
; CHECK-LABEL: @reduce_or_zext_v8i8_to_v8i32(
; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[A0:%.*]])
; CHECK-NEXT:    [[RED:%.*]] = zext i8 [[TMP1]] to i32
; CHECK-NEXT:    ret i32 [[RED]]
;
  %tr = zext <8 x i8> %a0 to <8 x i32>
  %red = tail call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %tr)
  ret i32 %red
}

define i32 @reduce_or_zext_v8i16_to_v8i32(<8 x i16> %a0)  {
; CHECK-LABEL: @reduce_or_zext_v8i16_to_v8i32(
; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[A0:%.*]])
; CHECK-NEXT:    [[RED:%.*]] = zext i16 [[TMP1]] to i32
; CHECK-NEXT:    ret i32 [[RED]]
;
  %tr = zext <8 x i16> %a0 to <8 x i32>
  %red = tail call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %tr)
  ret i32 %red
}

; Negative case - narrowing the reduce (to i8) is illegal.
; TODO: We could narrow to i16 instead.
define i32 @reduce_add_trunc_v8i8_to_v8i32(<8 x i8> %a0)  {
; CHECK-LABEL: @reduce_add_trunc_v8i8_to_v8i32(
; CHECK-NEXT:    [[TR:%.*]] = zext <8 x i8> [[A0:%.*]] to <8 x i32>
; CHECK-NEXT:    [[RED:%.*]] = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TR]])
; CHECK-NEXT:    ret i32 [[RED]]
;
  %tr = zext <8 x i8> %a0 to <8 x i32>
  %red = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %tr)
  ret i32 %red
}


declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>)
declare i8 @llvm.vector.reduce.or.v8i8(<8 x i8>)
declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>)
declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>)
declare i16 @llvm.vector.reduce.mul.v8i16(<8 x i16>)