File: aes-intrinsics.ll

package info (click to toggle)
llvm-toolchain-9 1%3A9.0.1-16.1
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 882,388 kB
  • sloc: cpp: 4,167,636; ansic: 714,256; asm: 457,610; python: 155,927; objc: 65,094; sh: 42,856; lisp: 26,908; perl: 7,786; pascal: 7,722; makefile: 6,881; ml: 5,581; awk: 3,648; cs: 2,027; xml: 888; javascript: 381; ruby: 156
file content (44 lines) | stat: -rw-r--r-- 2,534 bytes parent folder | download | duplicates (13)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
; RUN: opt -S -instcombine < %s | FileCheck %s
; ARM64 AES intrinsic variants

define <16 x i8> @combineXorAeseZeroARM64(<16 x i8> %data, <16 x i8> %key) {
; CHECK-LABEL: @combineXorAeseZeroARM64(
; CHECK-NEXT:    %data.aes = tail call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %data, <16 x i8> %key)
; CHECK-NEXT:    ret <16 x i8> %data.aes
  %data.xor = xor <16 x i8> %data, %key
  %data.aes = tail call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %data.xor, <16 x i8> zeroinitializer)
  ret <16 x i8> %data.aes
}

define <16 x i8> @combineXorAeseNonZeroARM64(<16 x i8> %data, <16 x i8> %key) {
; CHECK-LABEL: @combineXorAeseNonZeroARM64(
; CHECK-NEXT:    %data.xor = xor <16 x i8> %data, %key
; CHECK-NEXT:    %data.aes = tail call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %data.xor, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
; CHECK-NEXT:    ret <16 x i8> %data.aes
  %data.xor = xor <16 x i8> %data, %key
  %data.aes = tail call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %data.xor, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
  ret <16 x i8> %data.aes
}

define <16 x i8> @combineXorAesdZeroARM64(<16 x i8> %data, <16 x i8> %key) {
; CHECK-LABEL: @combineXorAesdZeroARM64(
; CHECK-NEXT:    %data.aes = tail call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %data, <16 x i8> %key)
; CHECK-NEXT:    ret <16 x i8> %data.aes
  %data.xor = xor <16 x i8> %data, %key
  %data.aes = tail call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %data.xor, <16 x i8> zeroinitializer)
  ret <16 x i8> %data.aes
}

define <16 x i8> @combineXorAesdNonZeroARM64(<16 x i8> %data, <16 x i8> %key) {
; CHECK-LABEL: @combineXorAesdNonZeroARM64(
; CHECK-NEXT:    %data.xor = xor <16 x i8> %data, %key
; CHECK-NEXT:    %data.aes = tail call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %data.xor, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
; CHECK-NEXT:    ret <16 x i8> %data.aes
  %data.xor = xor <16 x i8> %data, %key
  %data.aes = tail call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %data.xor, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
  ret <16 x i8> %data.aes
}

declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8>, <16 x i8>) #0
declare <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8>, <16 x i8>) #0