1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
|
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=aarch64 -run-pass=legalizer -global-isel -o - %s | FileCheck %s
--- |
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
target triple = "aarch64"
define <3 x i16> @range_v3i16(ptr %a_ptr, ptr %b_ptr) {
%a = load <3 x i16>, ptr %a_ptr, align 8, !range !0, !noundef !1
%b = load <3 x i16>, ptr %b_ptr, align 8, !range !2, !noundef !1
%result = add <3 x i16> %a, %b
ret <3 x i16> %result
}
!0 = !{i16 16, i16 17}
!1 = !{}
!2 = !{i16 32, i16 33}
...
---
name: range_v3i16
body: |
bb.1 (%ir-block.0):
liveins: $x0, $x1
; Make sure we drop the range metadata when widening an aligned load.
; CHECK-LABEL: name: range_v3i16
; CHECK: liveins: $x0, $x1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<4 x s16>) from %ir.a_ptr)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<4 x s16>) from %ir.b_ptr)
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<4 x s16>) = G_ADD [[LOAD]], [[LOAD1]]
; CHECK-NEXT: $d0 = COPY [[ADD]](<4 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $d0
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(<3 x s16>) = G_LOAD %0(p0) :: (load (<3 x s16>) from %ir.a_ptr, align 8, !range !0)
%3:_(<3 x s16>) = G_LOAD %1(p0) :: (load (<3 x s16>) from %ir.b_ptr, align 8, !range !2)
%4:_(<3 x s16>) = G_ADD %2, %3
%5:_(s16), %6:_(s16), %7:_(s16) = G_UNMERGE_VALUES %4(<3 x s16>)
%8:_(s16) = G_IMPLICIT_DEF
%9:_(<4 x s16>) = G_BUILD_VECTOR %5(s16), %6(s16), %7(s16), %8(s16)
$d0 = COPY %9(<4 x s16>)
RET_ReallyLR implicit $d0
...
|