1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
|
/* i386 assembly routines for inner loop fraction routines in Metafont
and MetaPost. Public domain. Included in texmfmp.c.
By Wayne Sullivan <wgs@maths.ucd.ie>. */
asm(".text\n"
" .align 4\n"
#ifdef ASM_NEEDS_UNDERSCORE
".globl _ztakefraction\n"
"_ztakefraction:\n"
#else
".globl ztakefraction\n"
"ztakefraction:\n"
#endif
" pushl %ebp\n"
" movl %esp,%ebp\n"
" xorl %ecx,%ecx\n"
" movl 8(%ebp),%eax\n"
" cmpl $0x80000000,%eax\n"
" jz LL5\n"
" imull 12(%ebp)\n"
" orl %edx,%edx\n"
" jns LL2\n"
" negl %edx\n"
" negl %eax\n"
" sbbl %ecx,%edx\n"
" incl %ecx\n"
"LL2:\n"
" addl $0x08000000,%eax\n"
" adcl $0,%edx\n"
" cmpl $0x07ffffff,%edx\n"
" ja LL3\n"
" shrd $28, %edx,%eax\n"
"LL1: jecxz LL4\n"
" negl %eax\n"
"LL4:\n"
" movl %ebp,%esp\n"
" popl %ebp\n"
" ret\n"
"LL5: incl %ecx\n"
"LL3: movl $0x7fffffff,%eax\n"
#ifdef ASM_NEEDS_UNDERSCORE
" movb $1,_aritherror\n"
#else
" movb $1,aritherror\n"
#endif
" jmp LL1\n"
" .align 4, 0x90\n"
#ifdef ASM_NEEDS_UNDERSCORE
".globl _ztakescaled\n"
"_ztakescaled:\n"
#else
".globl ztakescaled\n"
"ztakescaled:\n"
#endif
" pushl %ebp\n"
" movl %esp,%ebp\n"
" movl 8(%ebp),%eax\n"
" xorl %ecx,%ecx\n"
" cmpl $0x80000000,%eax\n"
" jz LL5\n"
" imull 12(%ebp)\n"
" orl %edx,%edx\n"
" jns LL12\n"
" negl %edx\n"
" negl %eax\n"
" sbbl %ecx,%edx\n"
" incl %ecx\n"
"LL12:\n"
" addl $0x00008000,%eax\n"
" adcl $0,%edx\n"
" cmpl $0x00007fff,%edx\n"
" ja LL3\n"
" shrd $16, %edx,%eax\n"
" jecxz LL4\n"
" negl %eax\n"
" jmp LL4\n"
" .align 4, 0x90\n"
#ifdef ASM_NEEDS_UNDERSCORE
".globl _zmakescaled\n"
".globl _zmakefraction\n"
"_zmakescaled:\n"
#else
".globl zmakescaled\n"
".globl zmakefraction\n"
"zmakescaled:\n"
#endif
" movb $16,%cl\n"
" jmp LL30\n"
" .align 4, 0x90\n"
#ifdef ASM_NEEDS_UNDERSCORE
"_zmakefraction:\n"
#else
"zmakefraction:\n"
#endif
" movb $4,%cl\n"
"LL30:\n"
" movb $0,%ch\n"
" pushl %ebp\n"
" movl %esp,%ebp\n"
" pushl %ebx\n"
" movl 8(%ebp),%edx\n"
" xorl %eax,%eax\n"
" orl %edx,%edx\n"
" jns LL32\n"
" inc %ch\n"
" negl %edx\n"
"LL32:\n"
" movl 12(%ebp),%ebx\n"
" orl %ebx,%ebx\n"
" jns LL33\n"
" dec %ch\n"
" negl %ebx\n"
" orl %ebx,%ebx\n"
" js LL34\n"
"LL33:\n"
" orl %edx,%edx\n"
" js LL34\n"
" shrd %cl,%edx,%eax\n"
" shrl %cl,%edx\n"
" cmpl %ebx,%edx\n"
" jae LL34\n"
" divl %ebx\n"
" addl %edx,%edx\n"
" incl %edx\n"
" subl %edx,%ebx\n"
" adcl $0,%eax\n"
" jc LL34\n"
" cmpl $0x7fffffff,%eax\n"
" ja LL34\n"
"LL31: or %ch,%ch\n"
" jz LL35\n"
" negl %eax\n"
"LL35:\n"
" popl %ebx\n"
" movl %ebp,%esp\n"
" popl %ebp\n"
" ret\n"
"LL34: movl $0x7fffffff,%eax\n"
#ifdef ASM_NEEDS_UNDERSCORE
" movb $1,_aritherror\n"
#else
" movb $1,aritherror\n"
#endif
" jmp LL31\n");
|