File: inlining-alignment-assumptions.ll

package info (click to toggle)
llvm-toolchain-11 1%3A11.0.1-2
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 995,808 kB
  • sloc: cpp: 4,767,656; ansic: 760,916; asm: 477,436; python: 170,940; objc: 69,804; lisp: 29,914; sh: 23,855; f90: 18,173; pascal: 7,551; perl: 7,471; ml: 5,603; awk: 3,489; makefile: 2,573; xml: 915; cs: 573; fortran: 503; javascript: 452
file content (119 lines) | stat: -rw-r--r-- 5,542 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -O2 -preserve-alignment-assumptions-during-inlining=0 < %s | FileCheck %s --check-prefixes=CHECK,ASSUMPTIONS-OFF,FALLBACK-0
; RUN: opt -S -O2 -preserve-alignment-assumptions-during-inlining=1 < %s | FileCheck %s --check-prefixes=CHECK,ASSUMPTIONS-ON,FALLBACK-1
; RUN: opt -S -O2 < %s | FileCheck %s --check-prefixes=CHECK,ASSUMPTIONS-OFF,FALLBACK-DEFAULT

target datalayout = "e-p:64:64-p5:32:32-A5"

; This illustrates an optimization difference caused by instruction counting
; heuristics, which are affected by the additional instructions of the
; alignment assumption.

define internal i1 @callee1(i1 %c, i64* align 8 %ptr) {
  store volatile i64 0, i64* %ptr
  ret i1 %c
}

define void @caller1(i1 %c, i64* align 1 %ptr) {
; ASSUMPTIONS-OFF-LABEL: @caller1(
; ASSUMPTIONS-OFF-NEXT:    br i1 [[C:%.*]], label [[TRUE2:%.*]], label [[FALSE2:%.*]]
; ASSUMPTIONS-OFF:       true2:
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 0, i64* [[PTR:%.*]], align 8
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 2, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT:    ret void
; ASSUMPTIONS-OFF:       false2:
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 0, i64* [[PTR]], align 8
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT:    store volatile i64 3, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT:    ret void
;
; ASSUMPTIONS-ON-LABEL: @caller1(
; ASSUMPTIONS-ON-NEXT:    br i1 [[C:%.*]], label [[TRUE2_CRITEDGE:%.*]], label [[FALSE1:%.*]]
; ASSUMPTIONS-ON:       false1:
; ASSUMPTIONS-ON-NEXT:    store volatile i64 1, i64* [[PTR:%.*]], align 8
; ASSUMPTIONS-ON-NEXT:    [[PTRINT:%.*]] = ptrtoint i64* [[PTR]] to i64
; ASSUMPTIONS-ON-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7
; ASSUMPTIONS-ON-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
; ASSUMPTIONS-ON-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
; ASSUMPTIONS-ON-NEXT:    store volatile i64 0, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT:    store volatile i64 3, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT:    ret void
; ASSUMPTIONS-ON:       true2.critedge:
; ASSUMPTIONS-ON-NEXT:    [[PTRINT_C:%.*]] = ptrtoint i64* [[PTR]] to i64
; ASSUMPTIONS-ON-NEXT:    [[MASKEDPTR_C:%.*]] = and i64 [[PTRINT_C]], 7
; ASSUMPTIONS-ON-NEXT:    [[MASKCOND_C:%.*]] = icmp eq i64 [[MASKEDPTR_C]], 0
; ASSUMPTIONS-ON-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND_C]])
; ASSUMPTIONS-ON-NEXT:    store volatile i64 0, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT:    store volatile i64 2, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT:    ret void
;
  br i1 %c, label %true1, label %false1

true1:
  %c2 = call i1 @callee1(i1 %c, i64* %ptr)
  store volatile i64 -1, i64* %ptr
  store volatile i64 -1, i64* %ptr
  store volatile i64 -1, i64* %ptr
  store volatile i64 -1, i64* %ptr
  store volatile i64 -1, i64* %ptr
  br i1 %c2, label %true2, label %false2

false1:
  store volatile i64 1, i64* %ptr
  br label %true1

true2:
  store volatile i64 2, i64* %ptr
  ret void

false2:
  store volatile i64 3, i64* %ptr
  ret void
}

; This test checks that alignment assumptions do not prevent SROA.
; See PR45763.

define internal void @callee2(i64* noalias sret align 8 %arg) {
  store i64 0, i64* %arg, align 8
  ret void
}

define amdgpu_kernel void @caller2() {
; ASSUMPTIONS-OFF-LABEL: @caller2(
; ASSUMPTIONS-OFF-NEXT:    ret void
;
; ASSUMPTIONS-ON-LABEL: @caller2(
; ASSUMPTIONS-ON-NEXT:    [[ALLOCA:%.*]] = alloca i64, align 8, addrspace(5)
; ASSUMPTIONS-ON-NEXT:    [[CAST:%.*]] = addrspacecast i64 addrspace(5)* [[ALLOCA]] to i64*
; ASSUMPTIONS-ON-NEXT:    [[PTRINT:%.*]] = ptrtoint i64* [[CAST]] to i64
; ASSUMPTIONS-ON-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7
; ASSUMPTIONS-ON-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
; ASSUMPTIONS-ON-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
; ASSUMPTIONS-ON-NEXT:    ret void
;
  %alloca = alloca i64, align 8, addrspace(5)
  %cast = addrspacecast i64 addrspace(5)* %alloca to i64*
  call void @callee2(i64* sret align 8 %cast)
  ret void
}