| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 
 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-pc-linux < %s | FileCheck %s
define double @foo(double %0) #0 {
; CHECK-LABEL: foo:
; CHECK:       # %bb.0:
; CHECK-NEXT:    subq $24, %rsp
; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT:    movl $1024, %edi # imm = 0x400
; CHECK-NEXT:    callq fesetround@PLT
; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [1.0E+0,0.0E+0]
; CHECK-NEXT:    divsd (%rsp), %xmm1 # 8-byte Folded Reload
; CHECK-NEXT:    movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT:    movl $1024, %edi # imm = 0x400
; CHECK-NEXT:    callq fesetround@PLT
; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; CHECK-NEXT:    divsd (%rsp), %xmm0 # 8-byte Folded Reload
; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT:    movl $1024, %edi # imm = 0x400
; CHECK-NEXT:    callq fesetround@PLT
; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [1.0E+0,0.0E+0]
; CHECK-NEXT:    divsd (%rsp), %xmm2 # 8-byte Folded Reload
; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT:    # xmm0 = mem[0],zero
; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT:    # xmm1 = mem[0],zero
; CHECK-NEXT:    callq fma@PLT
; CHECK-NEXT:    addq $24, %rsp
; CHECK-NEXT:    retq
    %2 = call i32 @fesetround(i32 noundef 1024)
    %3 = call double @llvm.experimental.constrained.fdiv.f64(double 1.000000e+00, double %0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
    %4 = call i32 @fesetround(i32 noundef 1024)
    %5 = call double @llvm.experimental.constrained.fdiv.f64(double 1.000000e+00, double %0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
    %6 = call i32 @fesetround(i32 noundef 1024)
    %7 = call double @llvm.experimental.constrained.fdiv.f64(double 1.000000e+00, double %0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
    %8 = call double @llvm.experimental.constrained.fma.f64(double %3, double %5, double %7, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
    ret double %8
}
declare i32 @fesetround(i32) #0
declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata) #0
declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) #0
attributes #0 = { nounwind strictfp }
 |