File: avx512bf16-mov.ll

package info (click to toggle)
llvm-toolchain-19 1%3A19.1.7-3
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,998,520 kB
  • sloc: cpp: 6,951,680; ansic: 1,486,157; asm: 913,598; python: 232,024; f90: 80,126; objc: 75,281; lisp: 37,276; pascal: 16,990; sh: 10,009; ml: 5,058; perl: 4,724; awk: 3,523; makefile: 3,167; javascript: 2,504; xml: 892; fortran: 664; cs: 573
file content (43 lines) | stat: -rw-r--r-- 1,605 bytes parent folder | download | duplicates (12)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bf16 | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512bf16 | FileCheck %s --check-prefix=X86

define dso_local void @funbf16(ptr readonly %src, ptr writeonly %dst) {
; X64-LABEL: funbf16:
; X64:       # %bb.0: # %entry
; X64-NEXT:    vmovups (%rdi), %xmm0
; X64-NEXT:    vmovups %xmm0, (%rsi)
; X64-NEXT:    vmovaps (%rdi), %xmm0
; X64-NEXT:    vmovaps %xmm0, (%rsi)
; X64-NEXT:    vmovups (%rdi), %ymm0
; X64-NEXT:    vmovups %ymm0, (%rsi)
; X64-NEXT:    vmovaps (%rdi), %ymm0
; X64-NEXT:    vmovaps %ymm0, (%rsi)
; X64-NEXT:    vzeroupper
; X64-NEXT:    retq
;
; X86-LABEL: funbf16:
; X86:       # %bb.0: # %entry
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT:    vmovups (%ecx), %xmm0
; X86-NEXT:    vmovups %xmm0, (%eax)
; X86-NEXT:    vmovaps (%ecx), %xmm0
; X86-NEXT:    vmovaps %xmm0, (%eax)
; X86-NEXT:    vmovups (%ecx), %ymm0
; X86-NEXT:    vmovups %ymm0, (%eax)
; X86-NEXT:    vmovaps (%ecx), %ymm0
; X86-NEXT:    vmovaps %ymm0, (%eax)
; X86-NEXT:    vzeroupper
; X86-NEXT:    retl
entry:
  %0 = load <8 x bfloat>, ptr %src, align 1
  store <8 x bfloat> %0, ptr %dst, align 1
  %1 = load <8 x bfloat>, ptr %src, align 32
  store <8 x bfloat> %1, ptr %dst, align 32
  %2 = load <16 x bfloat>, ptr %src, align 1
  store <16 x bfloat> %2, ptr %dst, align 1
  %3 = load <16 x bfloat>, ptr %src, align 32
  store <16 x bfloat> %3, ptr %dst, align 32
  ret void
}