1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefixes=CHECK,X86,X86-X87
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86,X86-SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64
; Ideally this would compile to 5 multiplies.
define double @pow_wrapper(double %a) nounwind readonly ssp noredzone {
; X86-X87-LABEL: pow_wrapper:
; X86-X87: # %bb.0:
; X86-X87-NEXT: fldl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fmul %st(1), %st
; X86-X87-NEXT: fmul %st, %st(1)
; X86-X87-NEXT: fmul %st, %st(0)
; X86-X87-NEXT: fmul %st, %st(1)
; X86-X87-NEXT: fmul %st, %st(0)
; X86-X87-NEXT: fmulp %st, %st(1)
; X86-X87-NEXT: retl
;
; X86-SSE-LABEL: pow_wrapper:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: pushl %ebp
; X86-SSE-NEXT: movl %esp, %ebp
; X86-SSE-NEXT: andl $-8, %esp
; X86-SSE-NEXT: subl $8, %esp
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: movapd %xmm0, %xmm1
; X86-SSE-NEXT: mulsd %xmm0, %xmm1
; X86-SSE-NEXT: mulsd %xmm1, %xmm0
; X86-SSE-NEXT: mulsd %xmm1, %xmm1
; X86-SSE-NEXT: mulsd %xmm1, %xmm0
; X86-SSE-NEXT: mulsd %xmm1, %xmm1
; X86-SSE-NEXT: mulsd %xmm0, %xmm1
; X86-SSE-NEXT: movsd %xmm1, (%esp)
; X86-SSE-NEXT: fldl (%esp)
; X86-SSE-NEXT: movl %ebp, %esp
; X86-SSE-NEXT: popl %ebp
; X86-SSE-NEXT: retl
;
; X64-LABEL: pow_wrapper:
; X64: # %bb.0:
; X64-NEXT: movapd %xmm0, %xmm1
; X64-NEXT: mulsd %xmm0, %xmm1
; X64-NEXT: mulsd %xmm1, %xmm0
; X64-NEXT: mulsd %xmm1, %xmm1
; X64-NEXT: mulsd %xmm1, %xmm0
; X64-NEXT: mulsd %xmm1, %xmm1
; X64-NEXT: mulsd %xmm0, %xmm1
; X64-NEXT: movapd %xmm1, %xmm0
; X64-NEXT: retq
%ret = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; <double> [#uses=1]
ret double %ret
}
define double @pow_wrapper_optsize(double %a) optsize {
; X86-X87-LABEL: pow_wrapper_optsize:
; X86-X87: # %bb.0:
; X86-X87-NEXT: subl $12, %esp
; X86-X87-NEXT: .cfi_def_cfa_offset 16
; X86-X87-NEXT: fldl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fstpl (%esp)
; X86-X87-NEXT: movl $15, {{[0-9]+}}(%esp)
; X86-X87-NEXT: calll __powidf2
; X86-X87-NEXT: addl $12, %esp
; X86-X87-NEXT: .cfi_def_cfa_offset 4
; X86-X87-NEXT: retl
;
; X86-SSE-LABEL: pow_wrapper_optsize:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: movl $15, {{[0-9]+}}(%esp)
; X86-SSE-NEXT: calll __powidf2
; X86-SSE-NEXT: addl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 4
; X86-SSE-NEXT: retl
;
; X64-LABEL: pow_wrapper_optsize:
; X64: # %bb.0:
; X64-NEXT: movl $15, %edi
; X64-NEXT: jmp __powidf2 # TAILCALL
%ret = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; <double> [#uses=1]
ret double %ret
}
define double @pow_wrapper_minsize(double %a) minsize {
; X86-X87-LABEL: pow_wrapper_minsize:
; X86-X87: # %bb.0:
; X86-X87-NEXT: subl $12, %esp
; X86-X87-NEXT: .cfi_def_cfa_offset 16
; X86-X87-NEXT: fldl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fstpl (%esp)
; X86-X87-NEXT: movl $15, {{[0-9]+}}(%esp)
; X86-X87-NEXT: calll __powidf2
; X86-X87-NEXT: addl $12, %esp
; X86-X87-NEXT: .cfi_def_cfa_offset 4
; X86-X87-NEXT: retl
;
; X86-SSE-LABEL: pow_wrapper_minsize:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: movl $15, {{[0-9]+}}(%esp)
; X86-SSE-NEXT: calll __powidf2
; X86-SSE-NEXT: addl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 4
; X86-SSE-NEXT: retl
;
; X64-LABEL: pow_wrapper_minsize:
; X64: # %bb.0:
; X64-NEXT: pushq $15
; X64-NEXT: .cfi_adjust_cfa_offset 8
; X64-NEXT: popq %rdi
; X64-NEXT: .cfi_adjust_cfa_offset -8
; X64-NEXT: jmp __powidf2 # TAILCALL
%ret = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; <double> [#uses=1]
ret double %ret
}
declare double @llvm.powi.f64(double, i32) nounwind readonly
|