1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
|
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s --verify-machineinstrs -o - | FileCheck %s
--- |
define i16 @predicated_livout(i8* %input_1_vect, i8* %input_2_vect, i32 %block_size) #0 {
entry:
%rnd.up = add i32 %block_size, 7
%div = lshr i32 %rnd.up, 3
%0 = call i1 @llvm.test.set.loop.iterations.i32(i32 %div)
br i1 %0, label %for.body.preheader, label %for.cond.cleanup
for.body.preheader: ; preds = %entry
br label %for.body
for.body: ; preds = %for.body.preheader, %for.body
%lsr.iv = phi i32 [ 0, %for.body.preheader ], [ %lsr.iv.next, %for.body ]
%input_1_vect.addr.052 = phi i8* [ %add.ptr, %for.body ], [ %input_1_vect, %for.body.preheader ]
%input_2_vect.addr.051 = phi i8* [ %add.ptr14, %for.body ], [ %input_2_vect, %for.body.preheader ]
%num_elements.049 = phi i32 [ %sub, %for.body ], [ %block_size, %for.body.preheader ]
%acc = phi <8 x i16> [ %acc.next, %for.body ], [ zeroinitializer, %for.body.preheader ]
%input_2_cast = bitcast i8* %input_2_vect.addr.051 to <8 x i8>*
%input_1_cast = bitcast i8* %input_1_vect.addr.052 to <8 x i8>*
%pred = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %num_elements.049)
%load.1 = tail call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %input_1_cast, i32 1, <8 x i1> %pred, <8 x i8> undef)
%zext.load.1 = zext <8 x i8> %load.1 to <8 x i16>
%load.2 = tail call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %input_2_cast, i32 1, <8 x i1> %pred, <8 x i8> undef)
%zext.load.2 = zext <8 x i8> %load.2 to <8 x i16>
%add = tail call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %zext.load.1, <8 x i16> %zext.load.2, <8 x i1> %pred, <8 x i16> undef)
%acc.next = tail call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %add, <8 x i16> %acc, <8 x i1> %pred, <8 x i16> undef)
%add.ptr = getelementptr inbounds i8, i8* %input_1_vect.addr.052, i32 8
%add.ptr14 = getelementptr inbounds i8, i8* %input_2_vect.addr.051, i32 8
%sub = add i32 %num_elements.049, -8
%iv.next = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1)
%cmp = icmp ne i32 %iv.next, 0
%lsr.iv.next = add i32 %lsr.iv, -1
br i1 %cmp, label %for.body, label %middle.block
middle.block: ; preds = %for.body
%reduce = tail call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %acc.next)
ret i16 %reduce
for.cond.cleanup: ; preds = %entry
ret i16 0
}
declare <8 x i1> @llvm.arm.mve.vctp16(i32) #1
declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>) #2
declare i1 @llvm.test.set.loop.iterations.i32(i32) #3
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) #4
declare <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #1
...
---
name: predicated_livout
alignment: 2
tracksRegLiveness: true
registers: []
liveins:
- { reg: '$r0', virtual-reg: '' }
- { reg: '$r1', virtual-reg: '' }
- { reg: '$r2', virtual-reg: '' }
frameInfo:
stackSize: 8
offsetAdjustment: 0
maxAlignment: 4
fixedStack: []
stack:
- { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
callSites: []
constants: []
machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: predicated_livout
; CHECK: bb.0.entry:
; CHECK: successors: %bb.1(0x40000000), %bb.4(0x40000000)
; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
; CHECK: $lr = MVE_WLSTP_16 killed renamable $r2, %bb.4
; CHECK: bb.1.for.body.preheader:
; CHECK: successors: %bb.2(0x80000000)
; CHECK: liveins: $lr, $r0, $r1
; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
; CHECK: bb.2.for.body:
; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
; CHECK: liveins: $lr, $q0, $r0, $r1, $r3
; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed $r3, 1, 14 /* CC::al */, $noreg
; CHECK: renamable $r1, renamable $q1 = MVE_VLDRBU16_post killed renamable $r1, 8, 0, $noreg, $noreg :: (load (s64) from %ir.input_2_cast, align 1)
; CHECK: renamable $r0, renamable $q2 = MVE_VLDRBU16_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.input_1_cast, align 1)
; CHECK: renamable $q1 = MVE_VADDi16 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
; CHECK: renamable $q0 = MVE_VADDi16 killed renamable $q1, killed renamable $q0, 0, killed $noreg, $noreg, undef renamable $q0
; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
; CHECK: bb.3.middle.block:
; CHECK: liveins: $q0
; CHECK: renamable $r0 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg, $noreg
; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
; CHECK: bb.4.for.cond.cleanup:
; CHECK: liveins: $lr
; CHECK: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x40000000), %bb.4(0x40000000)
liveins: $r0, $r1, $r2, $lr, $r7
frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
frame-setup CFI_INSTRUCTION def_cfa_offset 8
frame-setup CFI_INSTRUCTION offset $lr, -4
frame-setup CFI_INSTRUCTION offset $r7, -8
renamable $r3, dead $cpsr = tADDi3 renamable $r2, 7, 14, $noreg
renamable $lr = t2LSRri killed renamable $r3, 3, 14, $noreg, $noreg
$lr = t2WhileLoopStartLR renamable $lr, %bb.4, implicit-def dead $cpsr
tB %bb.1, 14, $noreg
bb.1.for.body.preheader:
successors: %bb.2(0x80000000)
liveins: $r0, $r1, $r2, $lr
renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
renamable $r3, dead $cpsr = tMOVi8 0, 14, $noreg
bb.2.for.body:
successors: %bb.2(0x7c000000), %bb.3(0x04000000)
liveins: $q0, $r0, $r1, $r2, $r3, $lr
renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg, $noreg
renamable $r3, dead $cpsr = tSUBi8 killed $r3, 1, 14, $noreg
renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14, $noreg
renamable $lr = t2LoopDec killed renamable $lr, 1
MVE_VPST 1, implicit $vpr
renamable $r1, renamable $q1 = MVE_VLDRBU16_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.input_2_cast, align 1)
renamable $r0, renamable $q2 = MVE_VLDRBU16_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.input_1_cast, align 1)
renamable $q1 = MVE_VADDi16 killed renamable $q2, killed renamable $q1, 1, renamable $vpr, $noreg, undef renamable $q1
renamable $q0 = MVE_VADDi16 killed renamable $q1, killed renamable $q0, 1, killed renamable $vpr, $noreg, undef renamable $q0
t2LoopEnd renamable $lr, %bb.2, implicit-def dead $cpsr
tB %bb.3, 14, $noreg
bb.3.middle.block:
liveins: $q0
renamable $r0 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg, $noreg
tPOP_RET 14, $noreg, def $r7, def $pc, implicit killed $r0
bb.4.for.cond.cleanup:
liveins: $lr
$r0, dead $cpsr = tMOVi8 0, 14, $noreg
tBX_RET 14, $noreg, implicit killed $r0
...
|