File: scalar-stack-align.ll

package info (click to toggle)
llvm-toolchain-20 1%3A20.1.6-1~exp1
  • links: PTS, VCS
  • area: main
  • in suites: experimental
  • size: 2,111,304 kB
  • sloc: cpp: 7,438,677; ansic: 1,393,822; asm: 1,012,926; python: 241,650; f90: 86,635; objc: 75,479; lisp: 42,144; pascal: 17,286; sh: 10,027; ml: 5,082; perl: 4,730; awk: 3,523; makefile: 3,349; javascript: 2,251; xml: 892; fortran: 672
file content (84 lines) | stat: -rw-r--r-- 3,122 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+zve64x -verify-machineinstrs < %s \
; RUN:   | FileCheck %s --check-prefixes=RV32,RV32-ZVE64
; RUN: llc -mtriple=riscv64 -mattr=+zve64x -verify-machineinstrs < %s \
; RUN:   | FileCheck %s --check-prefixes=RV64,RV64-ZVE64
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
; RUN:   | FileCheck %s --check-prefixes=RV32,RV32-V
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
; RUN:   | FileCheck %s --check-prefixes=RV64,RV64-V

define ptr @scalar_stack_align16() nounwind {
; RV32-ZVE64-LABEL: scalar_stack_align16:
; RV32-ZVE64:       # %bb.0:
; RV32-ZVE64-NEXT:    addi sp, sp, -48
; RV32-ZVE64-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
; RV32-ZVE64-NEXT:    csrr a0, vlenb
; RV32-ZVE64-NEXT:    slli a0, a0, 1
; RV32-ZVE64-NEXT:    sub sp, sp, a0
; RV32-ZVE64-NEXT:    addi a0, sp, 32
; RV32-ZVE64-NEXT:    call extern
; RV32-ZVE64-NEXT:    addi a0, sp, 16
; RV32-ZVE64-NEXT:    csrr a1, vlenb
; RV32-ZVE64-NEXT:    slli a1, a1, 1
; RV32-ZVE64-NEXT:    add sp, sp, a1
; RV32-ZVE64-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
; RV32-ZVE64-NEXT:    addi sp, sp, 48
; RV32-ZVE64-NEXT:    ret
;
; RV64-ZVE64-LABEL: scalar_stack_align16:
; RV64-ZVE64:       # %bb.0:
; RV64-ZVE64-NEXT:    addi sp, sp, -48
; RV64-ZVE64-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
; RV64-ZVE64-NEXT:    csrr a0, vlenb
; RV64-ZVE64-NEXT:    slli a0, a0, 1
; RV64-ZVE64-NEXT:    sub sp, sp, a0
; RV64-ZVE64-NEXT:    addi a0, sp, 32
; RV64-ZVE64-NEXT:    call extern
; RV64-ZVE64-NEXT:    addi a0, sp, 16
; RV64-ZVE64-NEXT:    csrr a1, vlenb
; RV64-ZVE64-NEXT:    slli a1, a1, 1
; RV64-ZVE64-NEXT:    add sp, sp, a1
; RV64-ZVE64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
; RV64-ZVE64-NEXT:    addi sp, sp, 48
; RV64-ZVE64-NEXT:    ret
;
; RV32-V-LABEL: scalar_stack_align16:
; RV32-V:       # %bb.0:
; RV32-V-NEXT:    addi sp, sp, -48
; RV32-V-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
; RV32-V-NEXT:    csrr a0, vlenb
; RV32-V-NEXT:    sub sp, sp, a0
; RV32-V-NEXT:    addi a0, sp, 32
; RV32-V-NEXT:    call extern
; RV32-V-NEXT:    addi a0, sp, 16
; RV32-V-NEXT:    csrr a1, vlenb
; RV32-V-NEXT:    add sp, sp, a1
; RV32-V-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
; RV32-V-NEXT:    addi sp, sp, 48
; RV32-V-NEXT:    ret
;
; RV64-V-LABEL: scalar_stack_align16:
; RV64-V:       # %bb.0:
; RV64-V-NEXT:    addi sp, sp, -48
; RV64-V-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
; RV64-V-NEXT:    csrr a0, vlenb
; RV64-V-NEXT:    sub sp, sp, a0
; RV64-V-NEXT:    addi a0, sp, 32
; RV64-V-NEXT:    call extern
; RV64-V-NEXT:    addi a0, sp, 16
; RV64-V-NEXT:    csrr a1, vlenb
; RV64-V-NEXT:    add sp, sp, a1
; RV64-V-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
; RV64-V-NEXT:    addi sp, sp, 48
; RV64-V-NEXT:    ret
  %a = alloca <vscale x 2 x i32>
  %c = alloca i64, align 16
  call void @extern(ptr %a)
  ret ptr %c
}

declare void @extern(ptr)
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; RV32: {{.*}}
; RV64: {{.*}}