File: sme-shared-za-interface.ll

package info (click to toggle)
llvm-toolchain-19 1%3A19.1.7-3
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,998,520 kB
  • sloc: cpp: 6,951,680; ansic: 1,486,157; asm: 913,598; python: 232,024; f90: 80,126; objc: 75,281; lisp: 37,276; pascal: 16,990; sh: 10,009; ml: 5,058; perl: 4,724; awk: 3,523; makefile: 3,167; javascript: 2,504; xml: 892; fortran: 664; cs: 573
file content (70 lines) | stat: -rw-r--r-- 2,512 bytes parent folder | download | duplicates (7)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme  -verify-machineinstrs < %s | FileCheck %s

declare void @private_za_callee()

; Ensure that we don't use tail call optimization when a lazy-save is required.
define void @disable_tailcallopt() "aarch64_inout_za" nounwind {
; CHECK-LABEL: disable_tailcallopt:
; CHECK:       // %bb.0:
; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
; CHECK-NEXT:    mov x29, sp
; CHECK-NEXT:    sub sp, sp, #16
; CHECK-NEXT:    rdsvl x8, #1
; CHECK-NEXT:    mov x9, sp
; CHECK-NEXT:    msub x9, x8, x8, x9
; CHECK-NEXT:    mov sp, x9
; CHECK-NEXT:    stur x9, [x29, #-16]
; CHECK-NEXT:    sub x9, x29, #16
; CHECK-NEXT:    sturh wzr, [x29, #-6]
; CHECK-NEXT:    stur wzr, [x29, #-4]
; CHECK-NEXT:    sturh w8, [x29, #-8]
; CHECK-NEXT:    msr TPIDR2_EL0, x9
; CHECK-NEXT:    bl private_za_callee
; CHECK-NEXT:    smstart za
; CHECK-NEXT:    mrs x8, TPIDR2_EL0
; CHECK-NEXT:    sub x0, x29, #16
; CHECK-NEXT:    cbnz x8, .LBB0_2
; CHECK-NEXT:  // %bb.1:
; CHECK-NEXT:    bl __arm_tpidr2_restore
; CHECK-NEXT:  .LBB0_2:
; CHECK-NEXT:    msr TPIDR2_EL0, xzr
; CHECK-NEXT:    mov sp, x29
; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT:    ret
  tail call void @private_za_callee()
  ret void
}

; Ensure we set up and restore the lazy save correctly for instructions which are lowered to lib calls
define fp128 @f128_call_za(fp128 %a, fp128 %b) "aarch64_inout_za" nounwind {
; CHECK-LABEL: f128_call_za:
; CHECK:       // %bb.0:
; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
; CHECK-NEXT:    mov x29, sp
; CHECK-NEXT:    sub sp, sp, #16
; CHECK-NEXT:    rdsvl x8, #1
; CHECK-NEXT:    mov x9, sp
; CHECK-NEXT:    msub x9, x8, x8, x9
; CHECK-NEXT:    mov sp, x9
; CHECK-NEXT:    stur x9, [x29, #-16]
; CHECK-NEXT:    sub x9, x29, #16
; CHECK-NEXT:    sturh wzr, [x29, #-6]
; CHECK-NEXT:    stur wzr, [x29, #-4]
; CHECK-NEXT:    sturh w8, [x29, #-8]
; CHECK-NEXT:    msr TPIDR2_EL0, x9
; CHECK-NEXT:    bl __addtf3
; CHECK-NEXT:    smstart za
; CHECK-NEXT:    mrs x8, TPIDR2_EL0
; CHECK-NEXT:    sub x0, x29, #16
; CHECK-NEXT:    cbnz x8, .LBB1_2
; CHECK-NEXT:  // %bb.1:
; CHECK-NEXT:    bl __arm_tpidr2_restore
; CHECK-NEXT:  .LBB1_2:
; CHECK-NEXT:    msr TPIDR2_EL0, xzr
; CHECK-NEXT:    mov sp, x29
; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT:    ret
  %res = fadd fp128 %a, %b
  ret fp128 %res
}