1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -emulated-tls -relocation-model=pic < %s \
; RUN: | FileCheck -check-prefix=RV32 %s
; RUN: llc -mtriple=riscv64 -emulated-tls -relocation-model=pic < %s \
; RUN: | FileCheck -check-prefix=RV64 %s
@external_x = external thread_local global i32, align 8
@y = thread_local global i8 7, align 2
@internal_z = internal thread_local global i64 9, align 16
define ptr @get_external_x() nounwind {
; RV32-LABEL: get_external_x:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .Lpcrel_hi0:
; RV32-NEXT: auipc a0, %got_pcrel_hi(__emutls_v.external_x)
; RV32-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi0)(a0)
; RV32-NEXT: call __emutls_get_address@plt
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: get_external_x:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .Lpcrel_hi0:
; RV64-NEXT: auipc a0, %got_pcrel_hi(__emutls_v.external_x)
; RV64-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi0)(a0)
; RV64-NEXT: call __emutls_get_address@plt
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
entry:
ret ptr @external_x
}
define ptr @get_y() nounwind {
; RV32-LABEL: get_y:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .Lpcrel_hi1:
; RV32-NEXT: auipc a0, %got_pcrel_hi(__emutls_v.y)
; RV32-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi1)(a0)
; RV32-NEXT: call __emutls_get_address@plt
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: get_y:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .Lpcrel_hi1:
; RV64-NEXT: auipc a0, %got_pcrel_hi(__emutls_v.y)
; RV64-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi1)(a0)
; RV64-NEXT: call __emutls_get_address@plt
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
entry:
ret ptr @y
}
define ptr @get_internal_z() nounwind {
; RV32-LABEL: get_internal_z:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .Lpcrel_hi2:
; RV32-NEXT: auipc a0, %pcrel_hi(__emutls_v.internal_z)
; RV32-NEXT: addi a0, a0, %pcrel_lo(.Lpcrel_hi2)
; RV32-NEXT: call __emutls_get_address@plt
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: get_internal_z:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .Lpcrel_hi2:
; RV64-NEXT: auipc a0, %pcrel_hi(__emutls_v.internal_z)
; RV64-NEXT: addi a0, a0, %pcrel_lo(.Lpcrel_hi2)
; RV64-NEXT: call __emutls_get_address@plt
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
entry:
ret ptr @internal_z
}
; UTC_ARGS: --disable
; RV32: .data
; RV32: .globl __emutls_v.y
; RV32: .p2align 2
; RV32-LABEL: __emutls_v.y:
; RV32-NEXT: .word 1
; RV32-NEXT: .word 2
; RV32-NEXT: .word 0
; RV32-NEXT: .word __emutls_t.y
; RV32: .section .rodata,
; RV32-LABEL: __emutls_t.y:
; RV32-NEXT: .byte 7
; RV32: .data
; RV32: .p2align 2
; RV32-LABEL: __emutls_v.internal_z:
; RV32-NEXT: .word 8
; RV32-NEXT: .word 16
; RV32-NEXT: .word 0
; RV32-NEXT: .word __emutls_t.internal_z
; RV32: .section .rodata,
; RV32-LABEL: __emutls_t.internal_z:
; RV32-NEXT: .quad 9
; RV64: .data
; RV64: .globl __emutls_v.y
; RV64: .p2align 3
; RV64-LABEL: __emutls_v.y:
; RV64-NEXT: .quad 1
; RV64-NEXT: .quad 2
; RV64-NEXT: .quad 0
; RV64-NEXT: .quad __emutls_t.y
; RV64: .section .rodata,
; RV64-LABEL: __emutls_t.y:
; RV64-NEXT: .byte 7
; RV64: .data
; RV64: .p2align 3
; RV64-LABEL: __emutls_v.internal_z:
; RV64-NEXT: .quad 8
; RV64-NEXT: .quad 16
; RV64-NEXT: .quad 0
; RV64-NEXT: .quad __emutls_t.internal_z
; RV64: .section .rodata,
; RV64-LABEL: __emutls_t.internal_z:
; RV64-NEXT: .quad 9
|