1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
|
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=aarch64 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s
#
# Verify register banks for intrinsics with known constraints. (E.g. all
# operands must be FPRs.
#
...
---
name: uaddlv_fpr
alignment: 4
legalized: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $q0
; CHECK-LABEL: name: uaddlv_fpr
; CHECK: liveins: $q0
; CHECK: %copy:fpr(<16 x s8>) = COPY $q0
; CHECK: %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<16 x s8>)
; CHECK: $w0 = COPY %intrin(s32)
; CHECK: RET_ReallyLR implicit $w0
%copy:_(<16 x s8>) = COPY $q0
%intrin:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<16 x s8>)
$w0 = COPY %intrin(s32)
RET_ReallyLR implicit $w0
...
---
name: uaddlv_fpr_load
alignment: 4
legalized: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0
; CHECK-LABEL: name: uaddlv_fpr_load
; CHECK: liveins: $x0
; CHECK: %ptr:gpr(p0) = COPY $x0
; CHECK: %load:fpr(<2 x s32>) = G_LOAD %ptr(p0) :: (load (<2 x s32>))
; CHECK: %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %load(<2 x s32>)
; CHECK: $w0 = COPY %intrin(s32)
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%load:_(<2 x s32>) = G_LOAD %ptr :: (load (<2 x s32>))
%intrin:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %load(<2 x s32>)
$w0 = COPY %intrin(s32)
RET_ReallyLR implicit $w0
...
---
name: uaddlv_fpr_store
alignment: 4
legalized: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0, $x1
; CHECK-LABEL: name: uaddlv_fpr_store
; CHECK: liveins: $x0, $x1
; CHECK: %copy:gpr(<2 x s32>) = COPY $x0
; CHECK: %ptr:gpr(p0) = COPY $x0
; CHECK: [[COPY:%[0-9]+]]:fpr(<2 x s32>) = COPY %copy(<2 x s32>)
; CHECK: %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), [[COPY]](<2 x s32>)
; CHECK: G_STORE %intrin(s32), %ptr(p0) :: (store (s32))
%copy:_(<2 x s32>) = COPY $x0
%ptr:_(p0) = COPY $x0
%intrin:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<2 x s32>)
G_STORE %intrin, %ptr :: (store (s32))
|