1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -O0 -mtriple=x86_64-unknown-unknown -mattr=+amx-transpose,+amx-movrs | FileCheck %s --check-prefixes=CHECK,O0
; RUN: llc < %s -O2 -mtriple=x86_64-unknown-unknown -mattr=+amx-transpose,+amx-movrs | FileCheck %s --check-prefixes=CHECK,O2
; RUN: llc < %s -O2 -mtriple=x86_64-unknown-unknown -mattr=+amx-transpose,+amx-movrs,+egpr --show-mc-encoding | FileCheck %s --check-prefix=EGPR
define void @test_amx(i64 %stride, i8* %addr1) #0 {
; CHECK-LABEL: test_amx:
; CHECK: # %bb.0:
; CHECK-NEXT: t2rpntlvwz0rs (%rsi,%rdi), %tmm0
; CHECK-NEXT: t2rpntlvwz0rst1 (%rsi,%rdi), %tmm2
; CHECK-NEXT: t2rpntlvwz1rs (%rsi,%rdi), %tmm0
; CHECK-NEXT: t2rpntlvwz1rst1 (%rsi,%rdi), %tmm2
; CHECK-NEXT: retq
;
; EGPR-LABEL: test_amx:
; EGPR: # %bb.0:
; EGPR-NEXT: t2rpntlvwz0rs (%rsi,%rdi), %tmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe5,0x78,0xf8,0x04,0x3e]
; EGPR-NEXT: t2rpntlvwz0rst1 (%rsi,%rdi), %tmm2 # EVEX TO VEX Compression encoding: [0xc4,0xe5,0x78,0xf9,0x14,0x3e]
; EGPR-NEXT: t2rpntlvwz1rs (%rsi,%rdi), %tmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe5,0x79,0xf8,0x04,0x3e]
; EGPR-NEXT: t2rpntlvwz1rst1 (%rsi,%rdi), %tmm2 # EVEX TO VEX Compression encoding: [0xc4,0xe5,0x79,0xf9,0x14,0x3e]
; EGPR-NEXT: retq # encoding: [0xc3]
call void @llvm.x86.t2rpntlvwz0rs(i8 1, i8* %addr1, i64 %stride)
call void @llvm.x86.t2rpntlvwz0rst1(i8 2, i8* %addr1, i64 %stride)
call void @llvm.x86.t2rpntlvwz1rs(i8 1, i8* %addr1, i64 %stride)
call void @llvm.x86.t2rpntlvwz1rst1(i8 2, i8* %addr1, i64 %stride)
ret void
}
declare void @llvm.x86.t2rpntlvwz0rs(i8 , i8* , i64 )
declare void @llvm.x86.t2rpntlvwz0rst1(i8 , i8* , i64 )
declare void @llvm.x86.t2rpntlvwz1rs(i8 , i8* , i64 )
declare void @llvm.x86.t2rpntlvwz1rst1(i8 , i8* , i64 )
define void @test_amx2(i8* %base, i64 %stride) #0 {
; O0-LABEL: test_amx2:
; O0: # %bb.0:
; O0-NEXT: xorps %xmm0, %xmm0
; O0-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
; O0-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
; O0-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
; O0-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
; O0-NEXT: movb $1, -{{[0-9]+}}(%rsp)
; O0-NEXT: movw $8, %ax
; O0-NEXT: # implicit-def: $al
; O0-NEXT: movb %al, -{{[0-9]+}}(%rsp)
; O0-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; O0-NEXT: # implicit-def: $al
; O0-NEXT: movb %al, -{{[0-9]+}}(%rsp)
; O0-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; O0-NEXT: ldtilecfg -{{[0-9]+}}(%rsp)
; O0-NEXT: t2rpntlvwz0rst1 (%rdi,%rsi), %tmm4
; O0-NEXT: movw $8, %ax
; O0-NEXT: # implicit-def: $al
; O0-NEXT: movb %al, -{{[0-9]+}}(%rsp)
; O0-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; O0-NEXT: # implicit-def: $al
; O0-NEXT: movb %al, -{{[0-9]+}}(%rsp)
; O0-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; O0-NEXT: ldtilecfg -{{[0-9]+}}(%rsp)
; O0-NEXT: t2rpntlvwz1rs (%rdi,%rsi), %tmm4
; O0-NEXT: movw $8, %ax
; O0-NEXT: # implicit-def: $al
; O0-NEXT: movb %al, -{{[0-9]+}}(%rsp)
; O0-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; O0-NEXT: # implicit-def: $al
; O0-NEXT: movb %al, -{{[0-9]+}}(%rsp)
; O0-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
; O0-NEXT: ldtilecfg -{{[0-9]+}}(%rsp)
; O0-NEXT: t2rpntlvwz1rst1 (%rdi,%rsi), %tmm4
; O0-NEXT: tilerelease
; O0-NEXT: retq
;
; O2-LABEL: test_amx2:
; O2: # %bb.0:
; O2-NEXT: xorps %xmm0, %xmm0
; O2-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
; O2-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
; O2-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
; O2-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
; O2-NEXT: movb $1, -{{[0-9]+}}(%rsp)
; O2-NEXT: movb $8, -{{[0-9]+}}(%rsp)
; O2-NEXT: movw $8, -{{[0-9]+}}(%rsp)
; O2-NEXT: movb $8, -{{[0-9]+}}(%rsp)
; O2-NEXT: movw $8, -{{[0-9]+}}(%rsp)
; O2-NEXT: ldtilecfg -{{[0-9]+}}(%rsp)
; O2-NEXT: movw $8, %ax
; O2-NEXT: t2rpntlvwz0rs (%rdi,%rsi), %tmm4
; O2-NEXT: t2rpntlvwz0rst1 (%rdi,%rsi), %tmm4
; O2-NEXT: t2rpntlvwz1rs (%rdi,%rsi), %tmm4
; O2-NEXT: t2rpntlvwz1rst1 (%rdi,%rsi), %tmm4
; O2-NEXT: tilerelease
; O2-NEXT: retq
;
; EGPR-LABEL: test_amx2:
; EGPR: # %bb.0:
; EGPR-NEXT: xorps %xmm0, %xmm0 # encoding: [0x0f,0x57,0xc0]
; EGPR-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp) # encoding: [0x0f,0x11,0x44,0x24,0xc0]
; EGPR-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp) # encoding: [0x0f,0x11,0x44,0x24,0xd0]
; EGPR-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp) # encoding: [0x0f,0x11,0x44,0x24,0xe0]
; EGPR-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp) # encoding: [0x0f,0x11,0x44,0x24,0xf0]
; EGPR-NEXT: movb $1, -{{[0-9]+}}(%rsp) # encoding: [0xc6,0x44,0x24,0xc0,0x01]
; EGPR-NEXT: movb $8, -{{[0-9]+}}(%rsp) # encoding: [0xc6,0x44,0x24,0xf4,0x08]
; EGPR-NEXT: movw $8, -{{[0-9]+}}(%rsp) # encoding: [0x66,0xc7,0x44,0x24,0xd8,0x08,0x00]
; EGPR-NEXT: movb $8, -{{[0-9]+}}(%rsp) # encoding: [0xc6,0x44,0x24,0xf5,0x08]
; EGPR-NEXT: movw $8, -{{[0-9]+}}(%rsp) # encoding: [0x66,0xc7,0x44,0x24,0xda,0x08,0x00]
; EGPR-NEXT: ldtilecfg -{{[0-9]+}}(%rsp) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x78,0x49,0x44,0x24,0xc0]
; EGPR-NEXT: movw $8, %ax # encoding: [0x66,0xb8,0x08,0x00]
; EGPR-NEXT: t2rpntlvwz0rs (%rdi,%rsi), %tmm4 # EVEX TO VEX Compression encoding: [0xc4,0xe5,0x78,0xf8,0x24,0x37]
; EGPR-NEXT: t2rpntlvwz0rst1 (%rdi,%rsi), %tmm4 # EVEX TO VEX Compression encoding: [0xc4,0xe5,0x78,0xf9,0x24,0x37]
; EGPR-NEXT: t2rpntlvwz1rs (%rdi,%rsi), %tmm4 # EVEX TO VEX Compression encoding: [0xc4,0xe5,0x79,0xf8,0x24,0x37]
; EGPR-NEXT: t2rpntlvwz1rst1 (%rdi,%rsi), %tmm4 # EVEX TO VEX Compression encoding: [0xc4,0xe5,0x79,0xf9,0x24,0x37]
; EGPR-NEXT: tilerelease # encoding: [0xc4,0xe2,0x78,0x49,0xc0]
; EGPR-NEXT: retq # encoding: [0xc3]
call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0rs.internal(i16 8, i16 8, i16 8, i8* %base, i64 %stride)
call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0rst1.internal(i16 8, i16 8, i16 8, i8* %base, i64 %stride)
call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1rs.internal(i16 8, i16 8, i16 8, i8* %base, i64 %stride)
call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1rst1.internal(i16 8, i16 8, i16 8, i8* %base, i64 %stride)
ret void
}
declare { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0rs.internal(i16, i16, i16, i8*, i64)
declare { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0rst1.internal(i16, i16, i16, i8*, i64)
declare { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1rs.internal(i16, i16, i16, i8*, i64)
declare { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1rst1.internal(i16, i16, i16, i8*, i64)
|