| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 
 | # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s
# Test that VPNOTs cannot be within a tail predicated loop.
--- |
  define dso_local void @inloop_vpnot(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture readonly %c, ptr nocapture readonly %d, ptr nocapture %e, i32 %N) local_unnamed_addr #0 {
  entry:
    %cmp9 = icmp eq i32 %N, 0
    %tmp = add i32 %N, 3
    %tmp1 = lshr i32 %tmp, 2
    %tmp2 = shl nuw i32 %tmp1, 2
    %tmp3 = add i32 %tmp2, -4
    %tmp4 = lshr i32 %tmp3, 2
    %tmp5 = add nuw nsw i32 %tmp4, 1
    br i1 %cmp9, label %for.cond.cleanup, label %vector.ph
  vector.ph:                                        ; preds = %entry
    %start = call i32 @llvm.start.loop.iterations.i32(i32 %tmp5)
    br label %vector.body
  vector.body:                                      ; preds = %vector.body, %vector.ph
    %lsr.iv1 = phi i32 [ %lsr.iv.next, %vector.body ], [ %start, %vector.ph ]
    %lsr.iv.e = phi ptr [ %scevgep.e, %vector.body ], [ %e, %vector.ph ]
    %lsr.iv.d = phi ptr [ %scevgep.d, %vector.body ], [ %d, %vector.ph ]
    %lsr.iv.c = phi ptr [ %scevgep.c, %vector.body ], [ %c, %vector.ph ]
    %lsr.iv18 = phi ptr [ %scevgep19, %vector.body ], [ %b, %vector.ph ]
    %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %a, %vector.ph ]
    %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %tmp14, %vector.body ]
    %tmp7 = phi i32 [ %N, %vector.ph ], [ %tmp9, %vector.body ]
    %lsr.iv17 = bitcast ptr %lsr.iv to ptr
    %lsr.iv1820 = bitcast ptr %lsr.iv18 to ptr
    %lsr.iv1820.c = bitcast ptr %lsr.iv.c to ptr
    %lsr.iv17.d = bitcast ptr %lsr.iv.d to ptr
    %lsr.cast.e = bitcast ptr %lsr.iv.e to ptr
    %tmp8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %tmp7)
    %tmp9 = sub i32 %tmp7, 4
    %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
    %tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
    %wide.masked.load14 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %lsr.iv1820, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
    %tmp11 = sext <4 x i16> %wide.masked.load14 to <4 x i32>
    %wide.masked.load.c = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %lsr.iv1820.c, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
    %sext.load.c = sext <4 x i16> %wide.masked.load.c to <4 x i32>
    %wide.masked.load.d = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %lsr.iv17.d, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
    %sext.load.d = sext <4 x i16> %wide.masked.load.d to <4 x i32>
    %tmp12 = mul nsw <4 x i32> %tmp11, %tmp10
    %mul.2 = mul nsw <4 x i32> %sext.load.c, %sext.load.d
    %tmp13 = add <4 x i32> %tmp12, %mul.2
    %tmp14 = add <4 x i32> %tmp13, %vec.phi
    %vpnot = xor <4 x i1> %tmp8, <i1 true, i1 true, i1 true, i1 true>
    call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp14, ptr %lsr.cast.e, i32 4, <4 x i1> %vpnot)
    %scevgep = getelementptr i16, ptr %lsr.iv, i32 4
    %scevgep19 = getelementptr i16, ptr %lsr.iv18, i32 4
    %scevgep.c = getelementptr i16, ptr %lsr.iv.c, i32 4
    %scevgep.d = getelementptr i16, ptr %lsr.iv.d, i32 4
    %scevgep.e = getelementptr i32, ptr %lsr.iv.e, i32 4
    %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
    %tmp16 = icmp ne i32 %tmp15, 0
    %lsr.iv.next = add nsw i32 %lsr.iv1, -1
    br i1 %tmp16, label %vector.body, label %for.cond.cleanup
  for.cond.cleanup:                                 ; preds = %vector.body, %entry
    ret void
  }
  declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32 immarg, <4 x i1>, <4 x i16>) #1
  declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>) #2
  declare i32 @llvm.start.loop.iterations.i32(i32) #3
  declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
  declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
...
---
name:            inloop_vpnot
alignment:       2
exposesReturnsTwice: false
legalized:       false
regBankSelected: false
selected:        false
failedISel:      false
tracksRegLiveness: true
hasWinCFI:       false
registers:       []
liveins:
  - { reg: '$r0', virtual-reg: '' }
  - { reg: '$r1', virtual-reg: '' }
  - { reg: '$r2', virtual-reg: '' }
  - { reg: '$r3', virtual-reg: '' }
frameInfo:
  isFrameAddressTaken: false
  isReturnAddressTaken: false
  hasStackMap:     false
  hasPatchPoint:   false
  stackSize:       16
  offsetAdjustment: 0
  maxAlignment:    4
  adjustsStack:    false
  hasCalls:        false
  stackProtector:  ''
  maxCallFrameSize: 0
  cvBytesOfCalleeSavedRegisters: 0
  hasOpaqueSPAdjustment: false
  hasVAStart:      false
  hasMustTailInVarArgFunc: false
  localFrameSize:  0
  savePoint:       ''
  restorePoint:    ''
fixedStack:
  - { id: 0, type: default, offset: 4, size: 4, alignment: 4, stack-id: default,
      isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true,
      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
  - { id: 1, type: default, offset: 0, size: 4, alignment: 8, stack-id: default,
      isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true,
      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
stack:
  - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
      stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
  - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
      stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
  - { id: 2, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4,
      stack-id: default, callee-saved-register: '$r5', callee-saved-restored: true,
      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
  - { id: 3, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4,
      stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true,
      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
callSites:       []
constants:       []
machineFunctionInfo: {}
body:             |
  ; CHECK-LABEL: name: inloop_vpnot
  ; CHECK: bb.0.entry:
  ; CHECK-NEXT:   successors: %bb.3(0x30000000), %bb.1(0x50000000)
  ; CHECK-NEXT:   liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r7
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $lr, -4
  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $r7, -8
  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $r5, -12
  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $r4, -16
  ; CHECK-NEXT:   renamable $r12 = t2LDRi12 $sp, 20, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.1)
  ; CHECK-NEXT:   t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
  ; CHECK-NEXT:   tBcc %bb.3, 0 /* CC::eq */, killed $cpsr
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1.vector.ph:
  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
  ; CHECK-NEXT:   liveins: $r0, $r1, $r2, $r3, $r12
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   renamable $lr = t2ADDri renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
  ; CHECK-NEXT:   renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
  ; CHECK-NEXT:   renamable $lr = t2BICri killed renamable $lr, 3, 14 /* CC::al */, $noreg, $noreg
  ; CHECK-NEXT:   renamable $r5 = tLDRspi $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
  ; CHECK-NEXT:   renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg
  ; CHECK-NEXT:   renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
  ; CHECK-NEXT:   renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
  ; CHECK-NEXT:   $r4 = tMOVr killed $lr, 14 /* CC::al */, $noreg
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2.vector.body:
  ; CHECK-NEXT:   successors: %bb.2(0x7c000000), %bb.3(0x04000000)
  ; CHECK-NEXT:   liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r12
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
  ; CHECK-NEXT:   MVE_VPST 4, implicit $vpr
  ; CHECK-NEXT:   renamable $r3, renamable $q1 = MVE_VLDRHS32_post killed renamable $r3, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17.d, align 2)
  ; CHECK-NEXT:   renamable $r2, renamable $q2 = MVE_VLDRHS32_post killed renamable $r2, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820.c, align 2)
  ; CHECK-NEXT:   renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
  ; CHECK-NEXT:   MVE_VPST 4, implicit $vpr
  ; CHECK-NEXT:   renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
  ; CHECK-NEXT:   renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
  ; CHECK-NEXT:   renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
  ; CHECK-NEXT:   $lr = tMOVr $r4, 14 /* CC::al */, $noreg
  ; CHECK-NEXT:   renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
  ; CHECK-NEXT:   renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
  ; CHECK-NEXT:   renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
  ; CHECK-NEXT:   renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
  ; CHECK-NEXT:   renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg, $noreg
  ; CHECK-NEXT:   MVE_VPST 8, implicit $vpr
  ; CHECK-NEXT:   renamable $r5 = MVE_VSTRWU32_post renamable $q0, killed renamable $r5, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.cast.e, align 4)
  ; CHECK-NEXT:   dead $lr = t2LEUpdate killed renamable $lr, %bb.2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.3.for.cond.cleanup:
  ; CHECK-NEXT:   tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc
  bb.0.entry:
    successors: %bb.3(0x30000000), %bb.1(0x50000000)
    liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r7, $lr
    frame-setup tPUSH 14, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
    frame-setup CFI_INSTRUCTION def_cfa_offset 16
    frame-setup CFI_INSTRUCTION offset $lr, -4
    frame-setup CFI_INSTRUCTION offset $r7, -8
    frame-setup CFI_INSTRUCTION offset $r5, -12
    frame-setup CFI_INSTRUCTION offset $r4, -16
    renamable $r12 = t2LDRi12 $sp, 20, 14, $noreg :: (load (s32) from %fixed-stack.0)
    t2CMPri renamable $r12, 0, 14, $noreg, implicit-def $cpsr
    tBcc %bb.3, 0, killed $cpsr
  bb.1.vector.ph:
    successors: %bb.2(0x80000000)
    liveins: $r0, $r1, $r2, $r3, $r12
    renamable $lr = t2ADDri renamable $r12, 3, 14, $noreg, $noreg
    renamable $r4, dead $cpsr = tMOVi8 1, 14, $noreg
    renamable $lr = t2BICri killed renamable $lr, 3, 14, $noreg, $noreg
    renamable $r5 = tLDRspi $sp, 4, 14, $noreg :: (load (s32) from %fixed-stack.1, align 8)
    renamable $lr = t2SUBri killed renamable $lr, 4, 14, $noreg, $noreg
    renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
    renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14, $noreg, $noreg
    $lr = t2DoLoopStart renamable $lr
    $r4 = tMOVr killed $lr, 14, $noreg
  bb.2.vector.body:
    successors: %bb.2(0x7c000000), %bb.3(0x04000000)
    liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r12
    renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
    MVE_VPST 4, implicit $vpr
    renamable $r3, renamable $q1 = MVE_VLDRHS32_post killed renamable $r3, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17.d, align 2)
    renamable $r2, renamable $q2 = MVE_VLDRHS32_post killed renamable $r2, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820.c, align 2)
    renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
    MVE_VPST 4, implicit $vpr
    renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
    renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
    renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
    $lr = tMOVr $r4, 14, $noreg
    renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
    renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14, $noreg
    renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
    renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg, $noreg
    MVE_VPST 8, implicit $vpr
    renamable $r5 = MVE_VSTRWU32_post renamable $q0, killed renamable $r5, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.cast.e, align 4)
    renamable $lr = t2LoopDec killed renamable $lr, 1
    t2LoopEnd killed renamable $lr, %bb.2, implicit-def dead $cpsr
    tB %bb.3, 14, $noreg
  bb.3.for.cond.cleanup:
    tPOP_RET 14, $noreg, def $r4, def $r5, def $r7, def $pc
...
 |