| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 
 | ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-vectorize -mtriple=x86_64-unknown-linux-gnu -S < %s | FileCheck %s
; The test checks that there is no assert caused by issue described in PR35432
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@a = common local_unnamed_addr global [192 x [192 x i32]] zeroinitializer, align 16
define i32 @main(ptr %ptr) {
; CHECK-LABEL: @main(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT:    [[S:%.*]] = alloca i16, align 2
; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I]])
; CHECK-NEXT:    store i32 0, ptr [[I]], align 4
; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 2, ptr nonnull [[S]])
; CHECK-NEXT:    [[CALL:%.*]] = call i32 (ptr, ...) @goo(ptr nonnull [[I]])
; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[I]], align 4
; CHECK-NEXT:    [[STOREMERGE6:%.*]] = trunc i32 [[TMP0]] to i16
; CHECK-NEXT:    store i16 [[STOREMERGE6]], ptr [[S]], align 2
; CHECK-NEXT:    [[CONV17:%.*]] = and i32 [[TMP0]], 65472
; CHECK-NEXT:    [[CMP8:%.*]] = icmp eq i32 [[CONV17]], 0
; CHECK-NEXT:    br i1 [[CMP8]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END12:%.*]]
; CHECK:       for.body.lr.ph:
; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
; CHECK:       for.body:
; CHECK-NEXT:    [[STOREMERGE_IN9:%.*]] = phi i32 [ [[TMP0]], [[FOR_BODY_LR_PH]] ], [ [[ADD:%.*]], [[FOR_INC9:%.*]] ]
; CHECK-NEXT:    [[CONV52:%.*]] = and i32 [[STOREMERGE_IN9]], 255
; CHECK-NEXT:    [[CMP63:%.*]] = icmp ult i32 [[TMP0]], [[CONV52]]
; CHECK-NEXT:    br i1 [[CMP63]], label [[FOR_BODY8_LR_PH:%.*]], label [[FOR_INC9]]
; CHECK:       for.body8.lr.ph:
; CHECK-NEXT:    [[CONV3:%.*]] = trunc i32 [[STOREMERGE_IN9]] to i8
; CHECK-NEXT:    [[DOTPROMOTED:%.*]] = load i32, ptr @a, align 16
; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[CONV3]], -1
; CHECK-NEXT:    [[TMP2:%.*]] = zext i8 [[TMP1]] to i32
; CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[TMP2]], 1
; CHECK-NEXT:    [[UMIN1:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP0]], i32 [[TMP2]])
; CHECK-NEXT:    [[TMP4:%.*]] = sub i32 [[TMP3]], [[UMIN1]]
; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP4]], 32
; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; CHECK:       vector.scevcheck:
; CHECK-NEXT:    [[TMP5:%.*]] = add i8 [[CONV3]], -1
; CHECK-NEXT:    [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
; CHECK-NEXT:    [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP0]], i32 [[TMP6]])
; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 [[TMP6]], [[UMIN]]
; CHECK-NEXT:    [[TMP8:%.*]] = trunc i32 [[TMP7]] to i8
; CHECK-NEXT:    [[MUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 1, i8 [[TMP8]])
; CHECK-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i8, i1 } [[MUL]], 0
; CHECK-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i8, i1 } [[MUL]], 1
; CHECK-NEXT:    [[TMP9:%.*]] = sub i8 [[TMP5]], [[MUL_RESULT]]
; CHECK-NEXT:    [[TMP10:%.*]] = icmp ugt i8 [[TMP9]], [[TMP5]]
; CHECK-NEXT:    [[TMP11:%.*]] = or i1 [[TMP10]], [[MUL_OVERFLOW]]
; CHECK-NEXT:    [[TMP12:%.*]] = icmp ugt i32 [[TMP7]], 255
; CHECK-NEXT:    [[TMP13:%.*]] = or i1 [[TMP11]], [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = add i32 [[DOTPROMOTED]], 1
; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP14]], [[TMP7]]
; CHECK-NEXT:    [[TMP16:%.*]] = icmp slt i32 [[TMP15]], [[TMP14]]
; CHECK-NEXT:    [[TMP17:%.*]] = or i1 [[TMP13]], [[TMP16]]
; CHECK-NEXT:    br i1 [[TMP17]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK:       vector.ph:
; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[TMP4]], 8
; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[TMP4]], [[N_MOD_VF]]
; CHECK-NEXT:    [[IND_END:%.*]] = add i32 [[DOTPROMOTED]], [[N_VEC]]
; CHECK-NEXT:    [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i8
; CHECK-NEXT:    [[IND_END2:%.*]] = sub i8 [[CONV3]], [[DOTCAST]]
; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
; CHECK:       vector.body:
; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i32 [[DOTPROMOTED]], [[INDEX]]
; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[OFFSET_IDX]], 0
; CHECK-NEXT:    [[TMP19:%.*]] = add i32 [[OFFSET_IDX]], 4
; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP18]], 1
; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP19]], 1
; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i32 [[TMP20]]
; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i32 [[TMP21]]
; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i32 0
; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i32 4
; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP24]], align 4
; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP25]], align 4
; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT:    [[TMP26:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT:    br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK:       middle.block:
; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i32 [[TMP4]], [[N_VEC]]
; CHECK-NEXT:    br i1 [[CMP_N]], label [[FOR_COND4_FOR_INC9_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
; CHECK:       scalar.ph:
; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[DOTPROMOTED]], [[FOR_BODY8_LR_PH]] ], [ [[DOTPROMOTED]], [[VECTOR_SCEVCHECK]] ]
; CHECK-NEXT:    [[BC_RESUME_VAL3:%.*]] = phi i8 [ [[IND_END2]], [[MIDDLE_BLOCK]] ], [ [[CONV3]], [[FOR_BODY8_LR_PH]] ], [ [[CONV3]], [[VECTOR_SCEVCHECK]] ]
; CHECK-NEXT:    br label [[FOR_BODY8:%.*]]
; CHECK:       for.body8:
; CHECK-NEXT:    [[INC5:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY8]] ]
; CHECK-NEXT:    [[C_04:%.*]] = phi i8 [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ], [ [[DEC:%.*]], [[FOR_BODY8]] ]
; CHECK-NEXT:    [[INC]] = add i32 [[INC5]], 1
; CHECK-NEXT:    [[DEC]] = add i8 [[C_04]], -1
; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i32 [[INC]]
; CHECK-NEXT:    store i32 0, ptr [[GEP]], align 4
; CHECK-NEXT:    [[CONV5:%.*]] = zext i8 [[DEC]] to i32
; CHECK-NEXT:    [[CMP6:%.*]] = icmp ult i32 [[TMP0]], [[CONV5]]
; CHECK-NEXT:    br i1 [[CMP6]], label [[FOR_BODY8]], label [[FOR_COND4_FOR_INC9_CRIT_EDGE]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK:       for.cond4.for.inc9_crit_edge:
; CHECK-NEXT:    [[INC_LCSSA:%.*]] = phi i32 [ [[INC]], [[FOR_BODY8]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT:    store i32 [[INC_LCSSA]], ptr @a, align 16
; CHECK-NEXT:    br label [[FOR_INC9]]
; CHECK:       for.inc9:
; CHECK-NEXT:    [[CONV10:%.*]] = and i32 [[STOREMERGE_IN9]], 65535
; CHECK-NEXT:    [[ADD]] = add nuw nsw i32 [[CONV10]], 1
; CHECK-NEXT:    [[CONV1:%.*]] = and i32 [[ADD]], 65472
; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[CONV1]], 0
; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END12_CRIT_EDGE:%.*]]
; CHECK:       for.cond.for.end12_crit_edge:
; CHECK-NEXT:    [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_INC9]] ]
; CHECK-NEXT:    [[STOREMERGE:%.*]] = trunc i32 [[ADD_LCSSA]] to i16
; CHECK-NEXT:    store i16 [[STOREMERGE]], ptr [[S]], align 2
; CHECK-NEXT:    br label [[FOR_END12]]
; CHECK:       for.end12:
; CHECK-NEXT:    [[CALL13:%.*]] = call i32 (ptr, ...) @foo(ptr nonnull [[S]])
; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 2, ptr nonnull [[S]])
; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[I]])
; CHECK-NEXT:    ret i32 0
;
entry:
  %i = alloca i32, align 4
  %s = alloca i16, align 2
  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i) #3
  store i32 0, ptr %i, align 4
  call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %s) #3
  %call = call i32 (ptr, ...) @goo(ptr nonnull %i) #3
  %0 = load i32, ptr %i, align 4
  %storemerge6 = trunc i32 %0 to i16
  store i16 %storemerge6, ptr %s, align 2
  %conv17 = and i32 %0, 65472
  %cmp8 = icmp eq i32 %conv17, 0
  br i1 %cmp8, label %for.body.lr.ph, label %for.end12
for.body.lr.ph:                                   ; preds = %entry
  br label %for.body
for.body:                                         ; preds = %for.body.lr.ph, %for.inc9
  %storemerge.in9 = phi i32 [ %0, %for.body.lr.ph ], [ %add, %for.inc9 ]
  %conv52 = and i32 %storemerge.in9, 255
  %cmp63 = icmp ult i32 %0, %conv52
  br i1 %cmp63, label %for.body8.lr.ph, label %for.inc9
for.body8.lr.ph:                                  ; preds = %for.body
  %conv3 = trunc i32 %storemerge.in9 to i8
  %.promoted = load i32, ptr @a, align 16
  br label %for.body8
for.body8:                                        ; preds = %for.body8.lr.ph, %for.body8
  %inc5 = phi i32 [ %.promoted, %for.body8.lr.ph ], [ %inc, %for.body8 ]
  %c.04 = phi i8 [ %conv3, %for.body8.lr.ph ], [ %dec, %for.body8 ]
  %inc = add i32 %inc5, 1
  %dec = add i8 %c.04, -1
  %gep = getelementptr inbounds i32, ptr %ptr, i32 %inc
  store i32 0, ptr %gep
  %conv5 = zext i8 %dec to i32
  %cmp6 = icmp ult i32 %0, %conv5
  br i1 %cmp6, label %for.body8, label %for.cond4.for.inc9_crit_edge
for.cond4.for.inc9_crit_edge:                     ; preds = %for.body8
  %inc.lcssa = phi i32 [ %inc, %for.body8 ]
  store i32 %inc.lcssa, ptr @a, align 16
  br label %for.inc9
for.inc9:                                         ; preds = %for.cond4.for.inc9_crit_edge, %for.body
  %conv10 = and i32 %storemerge.in9, 65535
  %add = add nuw nsw i32 %conv10, 1
  %conv1 = and i32 %add, 65472
  %cmp = icmp eq i32 %conv1, 0
  br i1 %cmp, label %for.body, label %for.cond.for.end12_crit_edge
for.cond.for.end12_crit_edge:                     ; preds = %for.inc9
  %add.lcssa = phi i32 [ %add, %for.inc9 ]
  %storemerge = trunc i32 %add.lcssa to i16
  store i16 %storemerge, ptr %s, align 2
  br label %for.end12
for.end12:                                        ; preds = %for.cond.for.end12_crit_edge, %entry
  %call13 = call i32 (ptr, ...) @foo(ptr nonnull %s) #3
  call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %s) #3
  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i) #3
  ret i32 0
}
; Function Attrs: argmemonly nounwind
declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
declare i32 @goo(...) local_unnamed_addr #2
declare i32 @foo(...) local_unnamed_addr #2
; Function Attrs: argmemonly nounwind
declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 |