1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2-aes < %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,+sve-aes < %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,+ssve-aes < %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme,+ssve-aes -force-streaming < %s | FileCheck %s
;
; AESD
;
define <vscale x 16 x i8> @aesd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
; CHECK-LABEL: aesd_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: aesd z0.b, z0.b, z1.b
; CHECK-NEXT: ret
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.aesd(<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %out
}
define <vscale x 16 x i8> @aesd_i8_commuted(<vscale x 16 x i8> %a,
; CHECK-LABEL: aesd_i8_commuted:
; CHECK: // %bb.0:
; CHECK-NEXT: aesd z0.b, z0.b, z1.b
; CHECK-NEXT: ret
<vscale x 16 x i8> %b) {
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.aesd(<vscale x 16 x i8> %b,
<vscale x 16 x i8> %a)
ret <vscale x 16 x i8> %out
}
;
; AESIMC
;
define <vscale x 16 x i8> @aesimc_i8(<vscale x 16 x i8> %a) {
; CHECK-LABEL: aesimc_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: aesimc z0.b, z0.b
; CHECK-NEXT: ret
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.aesimc(<vscale x 16 x i8> %a)
ret <vscale x 16 x i8> %out
}
;
; AESE
;
define <vscale x 16 x i8> @aese_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
; CHECK-LABEL: aese_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: aese z0.b, z0.b, z1.b
; CHECK-NEXT: ret
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.aese(<vscale x 16 x i8> %a,
<vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %out
}
define <vscale x 16 x i8> @aese_i8_commuted(<vscale x 16 x i8> %a,
; CHECK-LABEL: aese_i8_commuted:
; CHECK: // %bb.0:
; CHECK-NEXT: aese z0.b, z0.b, z1.b
; CHECK-NEXT: ret
<vscale x 16 x i8> %b) {
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.aese(<vscale x 16 x i8> %b,
<vscale x 16 x i8> %a)
ret <vscale x 16 x i8> %out
}
;
; AESMC
;
define <vscale x 16 x i8> @aesmc_i8(<vscale x 16 x i8> %a) {
; CHECK-LABEL: aesmc_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: aesmc z0.b, z0.b
; CHECK-NEXT: ret
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.aesmc(<vscale x 16 x i8> %a)
ret <vscale x 16 x i8> %out
}
declare <vscale x 16 x i8> @llvm.aarch64.sve.aesd(<vscale x 16 x i8>, <vscale x 16 x i8>)
declare <vscale x 16 x i8> @llvm.aarch64.sve.aesimc(<vscale x 16 x i8>)
declare <vscale x 16 x i8> @llvm.aarch64.sve.aese(<vscale x 16 x i8>, <vscale x 16 x i8>)
declare <vscale x 16 x i8> @llvm.aarch64.sve.aesmc(<vscale x 16 x i8>)
|