1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+lsl-fast | FileCheck %s
%struct.a = type [256 x i16]
%struct.b = type [256 x i32]
%struct.c = type [256 x i64]
declare void @foo()
define i16 @halfword(%struct.a* %ctx, i32 %xor72) nounwind {
; CHECK-LABEL: halfword:
; CHECK: // %bb.0:
; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: ubfx x21, x1, #9, #8
; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
; CHECK-NEXT: mov x19, x0
; CHECK-NEXT: ldrh w20, [x0, x21, lsl #1]
; CHECK-NEXT: bl foo
; CHECK-NEXT: mov w0, w20
; CHECK-NEXT: strh w20, [x19, x21, lsl #1]
; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: ret
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
%arrayidx86 = getelementptr inbounds %struct.a, %struct.a* %ctx, i64 0, i64 %idxprom83
%result = load i16, i16* %arrayidx86, align 2
call void @foo()
store i16 %result, i16* %arrayidx86, align 2
ret i16 %result
}
define i32 @word(%struct.b* %ctx, i32 %xor72) nounwind {
; CHECK-LABEL: word:
; CHECK: // %bb.0:
; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: ubfx x21, x1, #9, #8
; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
; CHECK-NEXT: mov x19, x0
; CHECK-NEXT: ldr w20, [x0, x21, lsl #2]
; CHECK-NEXT: bl foo
; CHECK-NEXT: mov w0, w20
; CHECK-NEXT: str w20, [x19, x21, lsl #2]
; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: ret
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
%arrayidx86 = getelementptr inbounds %struct.b, %struct.b* %ctx, i64 0, i64 %idxprom83
%result = load i32, i32* %arrayidx86, align 4
call void @foo()
store i32 %result, i32* %arrayidx86, align 4
ret i32 %result
}
define i64 @doubleword(%struct.c* %ctx, i32 %xor72) nounwind {
; CHECK-LABEL: doubleword:
; CHECK: // %bb.0:
; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: ubfx x21, x1, #9, #8
; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
; CHECK-NEXT: mov x19, x0
; CHECK-NEXT: ldr x20, [x0, x21, lsl #3]
; CHECK-NEXT: bl foo
; CHECK-NEXT: mov x0, x20
; CHECK-NEXT: str x20, [x19, x21, lsl #3]
; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: ret
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
%arrayidx86 = getelementptr inbounds %struct.c, %struct.c* %ctx, i64 0, i64 %idxprom83
%result = load i64, i64* %arrayidx86, align 8
call void @foo()
store i64 %result, i64* %arrayidx86, align 8
ret i64 %result
}
define i64 @multi_use_non_memory(i64 %a, i64 %b) {
; CHECK-LABEL: multi_use_non_memory:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: lsl x8, x0, #3
; CHECK-NEXT: lsl x9, x1, #3
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: b.lt .LBB3_2
; CHECK-NEXT: // %bb.1: // %falsebb
; CHECK-NEXT: csel x0, x8, x9, gt
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB3_2: // %truebb
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: bl foo
entry:
%mul1 = shl i64 %a, 3
%mul2 = shl i64 %b, 3
%cmp = icmp slt i64 %mul1, %mul2
br i1 %cmp, label %truebb, label %falsebb
truebb:
tail call void @foo()
unreachable
falsebb:
%cmp2 = icmp sgt i64 %mul1, %mul2
br i1 %cmp2, label %exitbb, label %endbb
exitbb:
ret i64 %mul1
endbb:
ret i64 %mul2
}
|