1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
|
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+lob -run-pass=arm-mve-vpt-opts --verify-machineinstrs %s -o - | FileCheck %s
--- |
@d = local_unnamed_addr global i32 0, align 4
@c = local_unnamed_addr global [1 x i32] zeroinitializer, align 4
define i32 @e() optsize {
entry:
%.pr = load i32, ptr @d, align 4
%cmp13 = icmp sgt i32 %.pr, -1
br i1 %cmp13, label %for.cond1.preheader.preheader, label %for.end9
for.cond1.preheader.preheader: ; preds = %entry
%0 = add i32 %.pr, 1
%1 = call i32 @llvm.start.loop.iterations.i32(i32 %0)
br label %for.cond1.preheader
for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.cond1.preheader
%2 = phi i32 [ %1, %for.cond1.preheader.preheader ], [ %3, %for.cond1.preheader ]
call void @llvm.memset.p0.i32(ptr nonnull align 4 dereferenceable(24) @c, i8 0, i32 24, i1 false)
%3 = call i32 @llvm.loop.decrement.reg.i32(i32 %2, i32 1)
%4 = icmp ne i32 %3, 0
br i1 %4, label %for.cond1.preheader, label %for.cond.for.end9_crit_edge
for.cond.for.end9_crit_edge: ; preds = %for.cond1.preheader
store i32 -1, ptr @d, align 4
br label %for.end9
for.end9: ; preds = %for.cond.for.end9_crit_edge, %entry
ret i32 undef
}
declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg)
declare i32 @llvm.start.loop.iterations.i32(i32)
declare i32 @llvm.loop.decrement.reg.i32(i32, i32)
...
---
name: e
alignment: 2
exposesReturnsTwice: false
legalized: false
regBankSelected: false
selected: false
failedISel: false
tracksRegLiveness: true
hasWinCFI: false
registers:
- { id: 0, class: gprnopc, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gprlr, preferred-register: '' }
- { id: 3, class: gpr, preferred-register: '' }
- { id: 4, class: rgpr, preferred-register: '' }
- { id: 5, class: rgpr, preferred-register: '' }
- { id: 6, class: gprlr, preferred-register: '' }
- { id: 7, class: rgpr, preferred-register: '' }
- { id: 8, class: rgpr, preferred-register: '' }
- { id: 9, class: gprlr, preferred-register: '' }
- { id: 10, class: gprlr, preferred-register: '' }
- { id: 11, class: rgpr, preferred-register: '' }
- { id: 12, class: rgpr, preferred-register: '' }
- { id: 13, class: gpr, preferred-register: '' }
liveins: []
body: |
; CHECK-LABEL: name: e
; CHECK: bb.0.entry:
; CHECK: successors: %bb.1(0x50000000), %bb.4(0x30000000)
; CHECK: [[t2MOVi32imm:%[0-9]+]]:rgpr = t2MOVi32imm @d
; CHECK: [[t2LDRi12_:%[0-9]+]]:gprnopc = t2LDRi12 [[t2MOVi32imm]], 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
; CHECK: t2CMPri [[t2LDRi12_]], 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
; CHECK: t2Bcc %bb.4, 4 /* CC::mi */, $cpsr
; CHECK: t2B %bb.1, 14 /* CC::al */, $noreg
; CHECK: bb.1.for.cond1.preheader.preheader:
; CHECK: successors: %bb.2(0x80000000)
; CHECK: [[t2ADDri:%[0-9]+]]:rgpr = t2ADDri [[t2LDRi12_]], 1, 14 /* CC::al */, $noreg, $noreg
; CHECK: [[tMOVr:%[0-9]+]]:gprlr = tMOVr killed [[t2ADDri]], 14 /* CC::al */, $noreg
; CHECK: [[COPY:%[0-9]+]]:gpr = COPY [[tMOVr]]
; CHECK: [[t2MOVi32imm1:%[0-9]+]]:rgpr = t2MOVi32imm @c
; CHECK: [[t2MOVi:%[0-9]+]]:rgpr = t2MOVi 24, 14 /* CC::al */, $noreg, $noreg
; CHECK: bb.2.for.cond1.preheader:
; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
; CHECK: [[PHI:%[0-9]+]]:gprlr = PHI [[COPY]], %bb.1, %3, %bb.2
; CHECK: ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
; CHECK: $r0 = COPY [[t2MOVi32imm1]]
; CHECK: $r1 = COPY [[t2MOVi]]
; CHECK: tBL 14 /* CC::al */, $noreg, &__aeabi_memclr4, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $sp
; CHECK: ADJCALLSTACKUP 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
; CHECK: [[t2SUBri:%[0-9]+]]:gprlr = t2SUBri [[PHI]], 1, 14 /* CC::al */, $noreg, $noreg
; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY [[t2SUBri]]
; CHECK: t2CMPri [[t2SUBri]], 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
; CHECK: t2Bcc %bb.2, 1 /* CC::ne */, $cpsr
; CHECK: t2B %bb.3, 14 /* CC::al */, $noreg
; CHECK: bb.3.for.cond.for.end9_crit_edge:
; CHECK: successors: %bb.4(0x80000000)
; CHECK: [[t2MOVi1:%[0-9]+]]:rgpr = t2MOVi -1, 14 /* CC::al */, $noreg, $noreg
; CHECK: t2STRi12 killed [[t2MOVi1]], [[t2MOVi32imm]], 0, 14 /* CC::al */, $noreg :: (store (s32) into @d)
; CHECK: bb.4.for.end9:
; CHECK: [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF
; CHECK: $r0 = COPY [[DEF]]
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit $r0
bb.0.entry:
successors: %bb.1(0x50000000), %bb.4(0x30000000)
%4:rgpr = t2MOVi32imm @d
%0:gprnopc = t2LDRi12 %4, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
t2CMPri %0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
t2Bcc %bb.4, 4 /* CC::mi */, $cpsr
t2B %bb.1, 14 /* CC::al */, $noreg
bb.1.for.cond1.preheader.preheader:
successors: %bb.2(0x80000000)
%5:rgpr = t2ADDri %0, 1, 14 /* CC::al */, $noreg, $noreg
%6:gprlr = t2DoLoopStart killed %5
%1:gpr = COPY %6
%7:rgpr = t2MOVi32imm @c
%8:rgpr = t2MOVi 24, 14 /* CC::al */, $noreg, $noreg
bb.2.for.cond1.preheader:
successors: %bb.2(0x7c000000), %bb.3(0x04000000)
%2:gprlr = PHI %1, %bb.1, %3, %bb.2
ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
$r0 = COPY %7
$r1 = COPY %8
tBL 14 /* CC::al */, $noreg, &__aeabi_memclr4, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $sp
ADJCALLSTACKUP 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
%9:gprlr = t2LoopDec %2, 1
%3:gpr = COPY %9
t2LoopEnd %9, %bb.2, implicit-def dead $cpsr
t2B %bb.3, 14 /* CC::al */, $noreg
bb.3.for.cond.for.end9_crit_edge:
successors: %bb.4(0x80000000)
%12:rgpr = t2MOVi -1, 14 /* CC::al */, $noreg, $noreg
t2STRi12 killed %12, %4, 0, 14 /* CC::al */, $noreg :: (store (s32) into @d)
bb.4.for.end9:
%13:gpr = IMPLICIT_DEF
$r0 = COPY %13
tBX_RET 14 /* CC::al */, $noreg, implicit $r0
...
|