File: stack-folding-lwp.ll

package info (click to toggle)
llvm-toolchain-9 1%3A9.0.1-16
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 882,436 kB
  • sloc: cpp: 4,167,636; ansic: 714,256; asm: 457,610; python: 155,927; objc: 65,094; sh: 42,856; lisp: 26,908; perl: 7,786; pascal: 7,722; makefile: 6,881; ml: 5,581; awk: 3,648; cs: 2,027; xml: 888; javascript: 381; ruby: 156
file content (49 lines) | stat: -rw-r--r-- 2,321 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+lwp < %s | FileCheck %s

target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"

; Stack reload folding tests.
;
; By including a nop call with sideeffects we can force a partial register spill of the
; relevant registers and check that the reload is correctly folded into the instruction.

define i8 @stack_fold_lwpins_u32(i32 %a0, i32 %a1) {
; CHECK-LABEL: stack_fold_lwpins_u32
; CHECK:       # %bb.0:
; CHECK:       lwpins $2814, {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
  %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
  %2 = tail call i8 @llvm.x86.lwpins32(i32 %a0, i32 %a1, i32 2814)
  ret i8 %2
}
declare i8 @llvm.x86.lwpins32(i32, i32, i32)

define i8 @stack_fold_lwpins_u64(i64 %a0, i32 %a1) {
; CHECK-LABEL: stack_fold_lwpins_u64
; CHECK:       # %bb.0:
; CHECK:       lwpins $2814, {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 4-byte Folded Reload
  %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
  %2 = tail call i8 @llvm.x86.lwpins64(i64 %a0, i32 %a1, i32 2814)
  ret i8 %2
}
declare i8 @llvm.x86.lwpins64(i64, i32, i32)

define void @stack_fold_lwpval_u32(i32 %a0, i32 %a1) {
; CHECK-LABEL: stack_fold_lwpval_u32
; CHECK:       # %bb.0:
; CHECK:       lwpval $2814, {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
  %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
  tail call void @llvm.x86.lwpval32(i32 %a0, i32 %a1, i32 2814)
  ret void
}
declare void @llvm.x86.lwpval32(i32, i32, i32)

define void @stack_fold_lwpval_u64(i64 %a0, i32 %a1) {
; CHECK-LABEL: stack_fold_lwpval_u64
; CHECK:       # %bb.0:
; CHECK:       lwpval $2814, {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 4-byte Folded Reload
  %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
  tail call void @llvm.x86.lwpval64(i64 %a0, i32 %a1, i32 2814)
  ret void
}
declare void @llvm.x86.lwpval64(i64, i32, i32)