1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple powerpc64le-unknown-linux | FileCheck %s
; RUN: llc < %s -mtriple powerpc64le-unknown-linux -debug-only=machine-scheduler \
; RUN: 2>&1 | FileCheck %s --check-prefix=LOG
; REQUIRES: DEBUG
define double @in_nostrict(double %a, double %b, double %c, double %d) {
; CHECK-LABEL: in_nostrict:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mffs 0
; CHECK-NEXT: xsdivdp 1, 1, 2
; CHECK-NEXT: xsadddp 1, 1, 3
; CHECK-NEXT: xsadddp 0, 1, 0
; CHECK-NEXT: mtfsf 255, 4
; CHECK-NEXT: xsdivdp 1, 3, 4
; CHECK-NEXT: xsadddp 1, 1, 2
; CHECK-NEXT: xsadddp 1, 0, 1
; CHECK-NEXT: blr
;
; LOG: *** MI Scheduling ***
; LOG-NEXT: in_nostrict:%bb.0 entry
; LOG: ExitSU: MTFSF 255, %{{[0-9]+}}:f8rc, 0, 0
; LOG: *** MI Scheduling ***
; LOG-NEXT: in_nostrict:%bb.0 entry
; LOG: ExitSU: %{{[0-9]+}}:f8rc = MFFS implicit $rm
;
; LOG: *** MI Scheduling ***
; LOG-NEXT: in_nostrict:%bb.0 entry
; LOG: ExitSU: MTFSF 255, renamable $f{{[0-9]+}}, 0, 0
entry:
%0 = tail call double @llvm.ppc.readflm()
%1 = fdiv double %a, %b
%2 = fadd double %1, %c
%3 = fadd double %2, %0
call double @llvm.ppc.setflm(double %d)
%5 = fdiv double %c, %d
%6 = fadd double %5, %b
%7 = fadd double %3, %6
ret double %7
}
define double @in_strict(double %a, double %b, double %c, double %d) #0 {
; CHECK-LABEL: in_strict:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mffs 0
; CHECK-NEXT: xsdivdp 1, 1, 2
; CHECK-NEXT: xsadddp 1, 1, 3
; CHECK-NEXT: xsadddp 0, 1, 0
; CHECK-NEXT: mtfsf 255, 4
; CHECK-NEXT: xsdivdp 1, 3, 4
; CHECK-NEXT: xsadddp 1, 1, 2
; CHECK-NEXT: xsadddp 1, 0, 1
; CHECK-NEXT: blr
;
; LOG: ***** MI Scheduling *****
; LOG-NEXT: in_strict:%bb.0 entry
; LOG: ExitSU: MTFSF 255, %{{[0-9]+}}:f8rc, 0, 0
; LOG: ***** MI Scheduling *****
; LOG-NEXT: in_strict:%bb.0 entry
; LOG: ExitSU: %{{[0-9]+}}:f8rc = MFFS implicit $rm
;
; LOG: ***** MI Scheduling *****
; LOG-NEXT: in_strict:%bb.0 entry
; LOG: ExitSU: MTFSF 255, renamable $f{{[0-9]+}}, 0, 0
entry:
%0 = tail call double @llvm.ppc.readflm()
%1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
%2 = call double @llvm.experimental.constrained.fadd.f64(double %1, double %c, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
%3 = call double @llvm.experimental.constrained.fadd.f64(double %2, double %0, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
call double @llvm.ppc.setflm(double %d)
%5 = call double @llvm.experimental.constrained.fdiv.f64(double %c, double %d, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
%6 = call double @llvm.experimental.constrained.fadd.f64(double %5, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
%7 = call double @llvm.experimental.constrained.fadd.f64(double %3, double %6, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret double %7
}
define void @cse_nomerge(double* %f1, double* %f2, double %f3) #0 {
; CHECK-LABEL: cse_nomerge:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr 0
; CHECK-NEXT: .cfi_def_cfa_offset 64
; CHECK-NEXT: .cfi_offset lr, 16
; CHECK-NEXT: .cfi_offset r30, -24
; CHECK-NEXT: .cfi_offset f31, -8
; CHECK-NEXT: std 30, -24(1) # 8-byte Folded Spill
; CHECK-NEXT: stfd 31, -8(1) # 8-byte Folded Spill
; CHECK-NEXT: std 0, 16(1)
; CHECK-NEXT: stdu 1, -64(1)
; CHECK-NEXT: fmr 31, 1
; CHECK-NEXT: mr 30, 4
; CHECK-NEXT: mffs 0
; CHECK-NEXT: stfd 0, 0(3)
; CHECK-NEXT: bl effect_func
; CHECK-NEXT: nop
; CHECK-NEXT: mffs 0
; CHECK-NEXT: stfd 0, 0(30)
; CHECK-NEXT: mtfsf 255, 31
; CHECK-NEXT: addi 1, 1, 64
; CHECK-NEXT: ld 0, 16(1)
; CHECK-NEXT: lfd 31, -8(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 30, -24(1) # 8-byte Folded Reload
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
%0 = call double @llvm.ppc.readflm()
store double %0, double* %f1, align 8
call void @effect_func()
%1 = call double @llvm.ppc.readflm()
store double %1, double* %f2, align 8
%2 = call contract double @llvm.ppc.setflm(double %f3)
ret void
}
define void @cse_nomerge_readonly(double* %f1, double* %f2, double %f3) #0 {
; CHECK-LABEL: cse_nomerge_readonly:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr 0
; CHECK-NEXT: .cfi_def_cfa_offset 64
; CHECK-NEXT: .cfi_offset lr, 16
; CHECK-NEXT: .cfi_offset r30, -24
; CHECK-NEXT: .cfi_offset f31, -8
; CHECK-NEXT: std 30, -24(1) # 8-byte Folded Spill
; CHECK-NEXT: stfd 31, -8(1) # 8-byte Folded Spill
; CHECK-NEXT: std 0, 16(1)
; CHECK-NEXT: stdu 1, -64(1)
; CHECK-NEXT: fmr 31, 1
; CHECK-NEXT: mr 30, 4
; CHECK-NEXT: mffs 0
; CHECK-NEXT: stfd 0, 0(3)
; CHECK-NEXT: bl readonly_func
; CHECK-NEXT: nop
; CHECK-NEXT: mffs 0
; CHECK-NEXT: stfd 0, 0(30)
; CHECK-NEXT: mtfsf 255, 31
; CHECK-NEXT: addi 1, 1, 64
; CHECK-NEXT: ld 0, 16(1)
; CHECK-NEXT: lfd 31, -8(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 30, -24(1) # 8-byte Folded Reload
; CHECK-NEXT: mtlr 0
; CHECK-NEXT: blr
entry:
%0 = call double @llvm.ppc.readflm()
store double %0, double* %f1, align 8
call void @readonly_func()
%1 = call double @llvm.ppc.readflm()
store double %1, double* %f2, align 8
%2 = call contract double @llvm.ppc.setflm(double %f3)
ret void
}
declare void @effect_func()
declare void @readonly_func() #1
declare double @llvm.ppc.readflm()
declare double @llvm.ppc.setflm(double)
declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
attributes #0 = { strictfp }
attributes #1 = { readonly }
|