1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
define i8 @load_acquire_i8(ptr %ptr) {
; LA32-LABEL: load_acquire_i8:
; LA32: # %bb.0:
; LA32-NEXT: ld.b $a0, $a0, 0
; LA32-NEXT: dbar 0
; LA32-NEXT: jirl $zero, $ra, 0
;
; LA64-LABEL: load_acquire_i8:
; LA64: # %bb.0:
; LA64-NEXT: ld.b $a0, $a0, 0
; LA64-NEXT: dbar 0
; LA64-NEXT: jirl $zero, $ra, 0
%val = load atomic i8, ptr %ptr acquire, align 1
ret i8 %val
}
define i16 @load_acquire_i16(ptr %ptr) {
; LA32-LABEL: load_acquire_i16:
; LA32: # %bb.0:
; LA32-NEXT: ld.h $a0, $a0, 0
; LA32-NEXT: dbar 0
; LA32-NEXT: jirl $zero, $ra, 0
;
; LA64-LABEL: load_acquire_i16:
; LA64: # %bb.0:
; LA64-NEXT: ld.h $a0, $a0, 0
; LA64-NEXT: dbar 0
; LA64-NEXT: jirl $zero, $ra, 0
%val = load atomic i16, ptr %ptr acquire, align 2
ret i16 %val
}
define i32 @load_acquire_i32(ptr %ptr) {
; LA32-LABEL: load_acquire_i32:
; LA32: # %bb.0:
; LA32-NEXT: ld.w $a0, $a0, 0
; LA32-NEXT: dbar 0
; LA32-NEXT: jirl $zero, $ra, 0
;
; LA64-LABEL: load_acquire_i32:
; LA64: # %bb.0:
; LA64-NEXT: ld.w $a0, $a0, 0
; LA64-NEXT: dbar 0
; LA64-NEXT: jirl $zero, $ra, 0
%val = load atomic i32, ptr %ptr acquire, align 4
ret i32 %val
}
define i64 @load_acquire_i64(ptr %ptr) {
; LA32-LABEL: load_acquire_i64:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: .cfi_def_cfa_offset 16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: .cfi_offset 1, -4
; LA32-NEXT: ori $a1, $zero, 2
; LA32-NEXT: bl __atomic_load_8
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: jirl $zero, $ra, 0
;
; LA64-LABEL: load_acquire_i64:
; LA64: # %bb.0:
; LA64-NEXT: ld.d $a0, $a0, 0
; LA64-NEXT: dbar 0
; LA64-NEXT: jirl $zero, $ra, 0
%val = load atomic i64, ptr %ptr acquire, align 8
ret i64 %val
}
define void @store_release_i8(ptr %ptr, i8 signext %v) {
; LA32-LABEL: store_release_i8:
; LA32: # %bb.0:
; LA32-NEXT: dbar 0
; LA32-NEXT: st.b $a0, $a1, 0
; LA32-NEXT: jirl $zero, $ra, 0
;
; LA64-LABEL: store_release_i8:
; LA64: # %bb.0:
; LA64-NEXT: dbar 0
; LA64-NEXT: st.b $a0, $a1, 0
; LA64-NEXT: jirl $zero, $ra, 0
store atomic i8 %v, ptr %ptr release, align 1
ret void
}
define void @store_release_i16(ptr %ptr, i16 signext %v) {
; LA32-LABEL: store_release_i16:
; LA32: # %bb.0:
; LA32-NEXT: dbar 0
; LA32-NEXT: st.h $a0, $a1, 0
; LA32-NEXT: jirl $zero, $ra, 0
;
; LA64-LABEL: store_release_i16:
; LA64: # %bb.0:
; LA64-NEXT: dbar 0
; LA64-NEXT: st.h $a0, $a1, 0
; LA64-NEXT: jirl $zero, $ra, 0
store atomic i16 %v, ptr %ptr release, align 2
ret void
}
define void @store_release_i32(ptr %ptr, i32 signext %v) {
; LA32-LABEL: store_release_i32:
; LA32: # %bb.0:
; LA32-NEXT: dbar 0
; LA32-NEXT: st.w $a0, $a1, 0
; LA32-NEXT: jirl $zero, $ra, 0
;
; LA64-LABEL: store_release_i32:
; LA64: # %bb.0:
; LA64-NEXT: dbar 0
; LA64-NEXT: st.w $a0, $a1, 0
; LA64-NEXT: jirl $zero, $ra, 0
store atomic i32 %v, ptr %ptr release, align 4
ret void
}
define void @store_release_i64(ptr %ptr, i64 %v) {
; LA32-LABEL: store_release_i64:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: .cfi_def_cfa_offset 16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: .cfi_offset 1, -4
; LA32-NEXT: ori $a3, $zero, 3
; LA32-NEXT: bl __atomic_store_8
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: jirl $zero, $ra, 0
;
; LA64-LABEL: store_release_i64:
; LA64: # %bb.0:
; LA64-NEXT: dbar 0
; LA64-NEXT: st.d $a0, $a1, 0
; LA64-NEXT: jirl $zero, $ra, 0
store atomic i64 %v, ptr %ptr release, align 8
ret void
}
|