File: gep.ll

package info (click to toggle)
llvm-toolchain-17 1%3A17.0.6-22
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,799,624 kB
  • sloc: cpp: 6,428,607; ansic: 1,383,196; asm: 793,408; python: 223,504; objc: 75,364; f90: 60,502; lisp: 33,869; pascal: 15,282; sh: 9,684; perl: 7,453; ml: 4,937; awk: 3,523; makefile: 2,889; javascript: 2,149; xml: 888; fortran: 619; cs: 573
file content (86 lines) | stat: -rw-r--r-- 2,753 bytes parent folder | download | duplicates (10)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=slp-vectorizer -mtriple=riscv64 -mattr=+v \
; RUN: -riscv-v-slp-max-vf=0 -S | FileCheck %s

; This should not be vectorized, as the cost of computing the offsets nullifies
; the benefits of vectorizing:
;
; copy_with_offset_v2i8:
;         addi    a0, a0, 8
;         vsetivli        zero, 2, e8, mf8, ta, ma
;         vle8.v  v8, (a0)
;         addi    a1, a1, 16
;         vse8.v  v8, (a1)
;         ret
;
; Compared to the scalar version where the offsets can be folded into the
; addressing mode:
;
; copy_with_offset_v2i8:
;         lbu     a2, 8(a0)
;         lbu     a0, 9(a0)
;         sb      a2, 16(a1)
;         sb      a0, 17(a1)
;	  ret

define void @copy_with_offset_v2i8(ptr noalias %p, ptr noalias %q) {
; CHECK-LABEL: @copy_with_offset_v2i8(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[P1:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 8
; CHECK-NEXT:    [[X1:%.*]] = load i8, ptr [[P1]], align 1
; CHECK-NEXT:    [[Q1:%.*]] = getelementptr i8, ptr [[Q:%.*]], i32 16
; CHECK-NEXT:    store i8 [[X1]], ptr [[Q1]], align 1
; CHECK-NEXT:    [[P2:%.*]] = getelementptr i8, ptr [[P]], i32 9
; CHECK-NEXT:    [[X2:%.*]] = load i8, ptr [[P2]], align 1
; CHECK-NEXT:    [[Q2:%.*]] = getelementptr i8, ptr [[Q]], i32 17
; CHECK-NEXT:    store i8 [[X2]], ptr [[Q2]], align 1
; CHECK-NEXT:    ret void
;
entry:
  %p1 = getelementptr i8, ptr %p, i32 8
  %x1 = load i8, ptr %p1
  %q1 = getelementptr i8, ptr %q, i32 16
  store i8 %x1, ptr %q1

  %p2 = getelementptr i8, ptr %p, i32 9
  %x2 = load i8, ptr %p2
  %q2 = getelementptr i8, ptr %q, i32 17
  store i8 %x2, ptr %q2

  ret void
}

; This on the other hand, should be vectorized as the vector savings outweigh
; the GEP costs.
define void @copy_with_offset_v4i8(ptr noalias %p, ptr noalias %q) {
; CHECK-LABEL: @copy_with_offset_v4i8(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[P1:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 8
; CHECK-NEXT:    [[Q1:%.*]] = getelementptr i8, ptr [[Q:%.*]], i32 16
; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr [[P1]], align 1
; CHECK-NEXT:    store <4 x i8> [[TMP0]], ptr [[Q1]], align 1
; CHECK-NEXT:    ret void
;
entry:
  %p1 = getelementptr i8, ptr %p, i32 8
  %x1 = load i8, ptr %p1
  %q1 = getelementptr i8, ptr %q, i32 16
  store i8 %x1, ptr %q1

  %p2 = getelementptr i8, ptr %p, i32 9
  %x2 = load i8, ptr %p2
  %q2 = getelementptr i8, ptr %q, i32 17
  store i8 %x2, ptr %q2

  %p3 = getelementptr i8, ptr %p, i32 10
  %x3 = load i8, ptr %p3
  %q3 = getelementptr i8, ptr %q, i32 18
  store i8 %x3, ptr %q3

  %p4 = getelementptr i8, ptr %p, i32 11
  %x4 = load i8, ptr %p4
  %q4 = getelementptr i8, ptr %q, i32 19
  store i8 %x4, ptr %q4

  ret void
}