File: vscale-crashes.ll

package info (click to toggle)
llvm-toolchain-19 1%3A19.1.7-3~deb12u1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm-proposed-updates
  • size: 1,998,492 kB
  • sloc: cpp: 6,951,680; ansic: 1,486,157; asm: 913,598; python: 232,024; f90: 80,126; objc: 75,281; lisp: 37,276; pascal: 16,990; sh: 10,009; ml: 5,058; perl: 4,724; awk: 3,523; makefile: 3,167; javascript: 2,504; xml: 892; fortran: 664; cs: 573
file content (127 lines) | stat: -rw-r--r-- 5,858 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=memcpyopt -S -verify-memoryssa | FileCheck %s

; Check that a call featuring a scalable-vector byval argument fed by a memcpy
; doesn't crash the compiler. It previously assumed the byval type's size could
; be represented as a known constant amount.
define void @byval_caller(ptr %P) {
; CHECK-LABEL: @byval_caller(
; CHECK-NEXT:    [[A:%.*]] = alloca i8, align 1
; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[A]], ptr align 4 [[P:%.*]], i64 8, i1 false)
; CHECK-NEXT:    call void @byval_callee(ptr byval(<vscale x 1 x i8>) align 1 [[A]])
; CHECK-NEXT:    ret void
;
  %a = alloca i8
  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %a, ptr align 4 %P, i64 8, i1 false)
  call void @byval_callee(ptr align 1 byval(<vscale x 1 x i8>) %a)
  ret void
}

declare void @llvm.memcpy.p0.p0.i64(ptr align 4, ptr align 4, i64, i1)
declare void @byval_callee(ptr align 1 byval(<vscale x 1 x i8>))

; Check that two scalable-vector stores (overlapping, with a constant offset)
; do not crash the compiler when checked whether or not they can be merged into
; a single memset. There was previously an assumption that the stored values'
; sizes could be represented by a known constant amount.
define void @merge_stores_both_scalable(ptr %ptr) {
; CHECK-LABEL: @merge_stores_both_scalable(
; CHECK-NEXT:    store <vscale x 1 x i8> zeroinitializer, ptr [[PTR:%.*]], align 1
; CHECK-NEXT:    [[PTR_NEXT:%.*]] = getelementptr i8, ptr [[PTR]], i64 1
; CHECK-NEXT:    store <vscale x 1 x i8> zeroinitializer, ptr [[PTR_NEXT]], align 1
; CHECK-NEXT:    ret void
;
  store <vscale x 1 x i8> zeroinitializer, ptr %ptr
  %ptr.next = getelementptr i8, ptr %ptr, i64 1
  store <vscale x 1 x i8> zeroinitializer, ptr %ptr.next
  ret void
}

; As above, but where the base is scalable but the subsequent store(s) are not.
define void @merge_stores_first_scalable(ptr %ptr) {
; CHECK-LABEL: @merge_stores_first_scalable(
; CHECK-NEXT:    store <vscale x 1 x i8> zeroinitializer, ptr [[PTR:%.*]], align 1
; CHECK-NEXT:    [[PTR_NEXT:%.*]] = getelementptr i8, ptr [[PTR]], i64 1
; CHECK-NEXT:    store i8 0, ptr [[PTR_NEXT]], align 1
; CHECK-NEXT:    ret void
;
  store <vscale x 1 x i8> zeroinitializer, ptr %ptr
  %ptr.next = getelementptr i8, ptr %ptr, i64 1
  store i8 zeroinitializer, ptr %ptr.next
  ret void
}

; As above, but where the base is not scalable but the subsequent store(s) are.
define void @merge_stores_second_scalable(ptr %ptr) {
; CHECK-LABEL: @merge_stores_second_scalable(
; CHECK-NEXT:    store i8 0, ptr [[PTR:%.*]], align 1
; CHECK-NEXT:    [[PTR_NEXT:%.*]] = getelementptr i8, ptr [[PTR]], i64 1
; CHECK-NEXT:    store <vscale x 1 x i8> zeroinitializer, ptr [[PTR_NEXT]], align 1
; CHECK-NEXT:    ret void
;
  store i8 zeroinitializer, ptr %ptr
  %ptr.next = getelementptr i8, ptr %ptr, i64 1
  store <vscale x 1 x i8> zeroinitializer, ptr %ptr.next
  ret void
}

; Check that the call-slot optimization doesn't crash when encountering scalable types.
define void @callslotoptzn(<vscale x 4 x float> %val, ptr %out) {
; CHECK-LABEL: @callslotoptzn(
; CHECK-NEXT:    [[ALLOC:%.*]] = alloca <vscale x 4 x float>, align 16
; CHECK-NEXT:    [[IDX:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
; CHECK-NEXT:    [[STRIDE:%.*]] = getelementptr inbounds float, ptr [[ALLOC]], <vscale x 4 x i32> [[IDX]]
; CHECK-NEXT:    call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[VAL:%.*]], <vscale x 4 x ptr> [[STRIDE]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
; CHECK-NEXT:    [[LI:%.*]] = load <vscale x 4 x float>, ptr [[ALLOC]], align 4
; CHECK-NEXT:    store <vscale x 4 x float> [[LI]], ptr [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
  %alloc = alloca <vscale x 4 x float>, align 16
  %idx = tail call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
  %stride = getelementptr inbounds float, ptr %alloc, <vscale x 4 x i32> %idx
  call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> %val, <vscale x 4 x ptr> %stride, i32 4, <vscale x 4 x i1> splat (i1 true))
  %li = load <vscale x 4 x float>, ptr %alloc, align 4
  store <vscale x 4 x float> %li, ptr %out, align 4
  ret void
}

%0 = type { <vscale x 8 x i8> }
%1 = type { <vscale x 8 x i8>, <vscale x 8 x i8> }

define void @memmove_vector(ptr %a, ptr %b) {
; CHECK-LABEL: @memmove_vector(
; CHECK-NEXT:    [[V:%.*]] = load <vscale x 8 x i8>, ptr [[A:%.*]], align 1
; CHECK-NEXT:    store <vscale x 8 x i8> [[V]], ptr [[B:%.*]], align 1
; CHECK-NEXT:    ret void
;
  %v = load <vscale x 8 x i8>, ptr %a, align 1
  store <vscale x 8 x i8> %v, ptr %b, align 1
  ret void
}

define void @memmove_agg1(ptr %a, ptr %b) {
; CHECK-LABEL: @memmove_agg1(
; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 [[TMP1]], 8
; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr align 1 [[B:%.*]], ptr align 1 [[A:%.*]], i64 [[TMP2]], i1 false)
; CHECK-NEXT:    ret void
;
  %v = load %0, ptr %a, align 1
  store %0 %v, ptr %b, align 1
  ret void
}

define void @memmove_agg2(ptr %a, ptr %b) {
; CHECK-LABEL: @memmove_agg2(
; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 [[TMP1]], 16
; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i64(ptr align 1 [[B:%.*]], ptr align 1 [[A:%.*]], i64 [[TMP2]], i1 false)
; CHECK-NEXT:    ret void
;
  %v = load %1, ptr %a, align 1
  store %1 %v, ptr %b, align 1
  ret void
}

declare <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
declare void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> , <vscale x 4 x ptr> , i32, <vscale x 4 x i1>)