1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
|
/* This is a stripped down version of floatlib.c. It supplies only those
functions which exist in libgcc, but for which there is not assembly
language versions in m68k/lb1sf68.asm.
It also includes simplistic support for extended floats (by working in
double precision). You must compile this file again with -DEXTFLOAT
to get this support. */
/*
** gnulib support for software floating point.
** Copyright (C) 1991 by Pipeline Associates, Inc. All rights reserved.
** Permission is granted to do *anything* you want with this file,
** commercial or otherwise, provided this message remains intact. So there!
** I would appreciate receiving any updates/patches/changes that anyone
** makes, and am willing to be the repository for said changes (am I
** making a big mistake?).
**
** Pat Wood
** Pipeline Associates, Inc.
** pipeline!phw@motown.com or
** sun!pipeline!phw or
** uunet!motown!pipeline!phw
**
** 05/01/91 -- V1.0 -- first release to gcc mailing lists
** 05/04/91 -- V1.1 -- added float and double prototypes and return values
** -- fixed problems with adding and subtracting zero
** -- fixed rounding in truncdfsf2
** -- fixed SWAP define and tested on 386
*/
/*
** The following are routines that replace the gnulib soft floating point
** routines that are called automatically when -msoft-float is selected.
** The support single and double precision IEEE format, with provisions
** for byte-swapped machines (tested on 386). Some of the double-precision
** routines work at full precision, but most of the hard ones simply punt
** and call the single precision routines, producing a loss of accuracy.
** long long support is not assumed or included.
** Overall accuracy is close to IEEE (actually 68882) for single-precision
** arithmetic. I think there may still be a 1 in 1000 chance of a bit
** being rounded the wrong way during a multiply. I'm not fussy enough to
** bother with it, but if anyone is, knock yourself out.
**
** Efficiency has only been addressed where it was obvious that something
** would make a big difference. Anyone who wants to do this right for
** best speed should go in and rewrite in assembler.
**
** I have tested this only on a 68030 workstation and 386/ix integrated
** in with -msoft-float.
*/
/* the following deal with IEEE single-precision numbers */
#define EXCESS 126L
#define SIGNBIT 0x80000000L
#define HIDDEN (1L << 23L)
#define SIGN(fp) ((fp) & SIGNBIT)
#define EXP(fp) (((fp) >> 23L) & 0xFF)
#define MANT(fp) (((fp) & 0x7FFFFFL) | HIDDEN)
#define PACK(s,e,m) ((s) | ((e) << 23L) | (m))
/* the following deal with IEEE double-precision numbers */
#define EXCESSD 1022L
#define HIDDEND (1L << 20L)
#define EXPDBITS 11
#define EXPDMASK 0x7FFL
#define EXPD(fp) (((fp.l.upper) >> 20L) & 0x7FFL)
#define SIGND(fp) ((fp.l.upper) & SIGNBIT)
#define MANTD(fp) (((((fp.l.upper) & 0xFFFFF) | HIDDEND) << 10) | \
(fp.l.lower >> 22))
#define MANTDMASK 0xFFFFFL /* mask of upper part */
/* the following deal with IEEE extended-precision numbers */
#define EXCESSX 16382L
#define HIDDENX (1L << 31L)
#define EXPXBITS 15
#define EXPXMASK 0x7FFF
#define EXPX(fp) (((fp.l.upper) >> 16) & EXPXMASK)
#define SIGNX(fp) ((fp.l.upper) & SIGNBIT)
#define MANTXMASK 0x7FFFFFFFL /* mask of upper part */
union double_long
{
double d;
struct {
long upper;
unsigned long lower;
} l;
};
union float_long {
float f;
long l;
};
union long_double_long
{
long double ld;
struct
{
long upper;
unsigned long middle;
unsigned long lower;
} l;
};
#ifndef EXTFLOAT
int
__unordsf2(float a, float b)
{
union float_long fl;
fl.f = a;
if (EXP(fl.l) == EXP(~0u) && (MANT(fl.l) & ~HIDDEN) != 0)
return 1;
fl.f = b;
if (EXP(fl.l) == EXP(~0u) && (MANT(fl.l) & ~HIDDEN) != 0)
return 1;
return 0;
}
int
__unorddf2(double a, double b)
{
union double_long dl;
dl.d = a;
if (EXPD(dl) == EXPDMASK
&& ((dl.l.upper & MANTDMASK) != 0 || dl.l.lower != 0))
return 1;
dl.d = b;
if (EXPD(dl) == EXPDMASK
&& ((dl.l.upper & MANTDMASK) != 0 || dl.l.lower != 0))
return 1;
return 0;
}
/* convert unsigned int to double */
double
__floatunsidf (unsigned long a1)
{
long exp = 32 + EXCESSD;
union double_long dl;
if (!a1)
{
dl.l.upper = dl.l.lower = 0;
return dl.d;
}
while (a1 < 0x2000000L)
{
a1 <<= 4;
exp -= 4;
}
while (a1 < 0x80000000L)
{
a1 <<= 1;
exp--;
}
/* pack up and go home */
dl.l.upper = exp << 20L;
dl.l.upper |= (a1 >> 11L) & ~HIDDEND;
dl.l.lower = a1 << 21L;
return dl.d;
}
/* convert int to double */
double
__floatsidf (long a1)
{
long sign = 0, exp = 31 + EXCESSD;
union double_long dl;
if (!a1)
{
dl.l.upper = dl.l.lower = 0;
return dl.d;
}
if (a1 < 0)
{
sign = SIGNBIT;
a1 = (long)-(unsigned long)a1;
if (a1 < 0)
{
dl.l.upper = SIGNBIT | ((32 + EXCESSD) << 20L);
dl.l.lower = 0;
return dl.d;
}
}
while (a1 < 0x1000000L)
{
a1 <<= 4;
exp -= 4;
}
while (a1 < 0x40000000L)
{
a1 <<= 1;
exp--;
}
/* pack up and go home */
dl.l.upper = sign;
dl.l.upper |= exp << 20L;
dl.l.upper |= (a1 >> 10L) & ~HIDDEND;
dl.l.lower = a1 << 22L;
return dl.d;
}
/* convert unsigned int to float */
float
__floatunsisf (unsigned long l)
{
double foo = __floatunsidf (l);
return foo;
}
/* convert int to float */
float
__floatsisf (long l)
{
double foo = __floatsidf (l);
return foo;
}
/* convert float to double */
double
__extendsfdf2 (float a1)
{
register union float_long fl1;
register union double_long dl;
register long exp;
register long mant;
fl1.f = a1;
dl.l.upper = SIGN (fl1.l);
if ((fl1.l & ~SIGNBIT) == 0)
{
dl.l.lower = 0;
return dl.d;
}
exp = EXP(fl1.l);
mant = MANT (fl1.l) & ~HIDDEN;
if (exp == 0)
{
/* Denormal. */
exp = 1;
while (!(mant & HIDDEN))
{
mant <<= 1;
exp--;
}
mant &= ~HIDDEN;
}
exp = exp - EXCESS + EXCESSD;
dl.l.upper |= exp << 20;
dl.l.upper |= mant >> 3;
dl.l.lower = mant << 29;
return dl.d;
}
/* convert double to float */
float
__truncdfsf2 (double a1)
{
register long exp;
register long mant;
register union float_long fl;
register union double_long dl1;
dl1.d = a1;
if ((dl1.l.upper & ~SIGNBIT) == 0 && !dl1.l.lower)
{
fl.l = SIGND(dl1);
return fl.f;
}
exp = EXPD (dl1) - EXCESSD + EXCESS;
/* shift double mantissa 6 bits so we can round */
mant = MANTD (dl1) >> 6;
/* Check for underflow and denormals. */
if (exp <= 0)
{
if (exp < -24)
mant = 0;
else
mant >>= 1 - exp;
exp = 0;
}
/* now round and shift down */
mant += 1;
mant >>= 1;
/* did the round overflow? */
if (mant & 0xFF000000L)
{
mant >>= 1;
exp++;
}
mant &= ~HIDDEN;
/* pack up and go home */
fl.l = PACK (SIGND (dl1), exp, mant);
return (fl.f);
}
/* convert double to int */
long
__fixdfsi (double a1)
{
register union double_long dl1;
register long exp;
register long l;
dl1.d = a1;
if (!dl1.l.upper && !dl1.l.lower)
return 0;
exp = EXPD (dl1) - EXCESSD - 31;
l = MANTD (dl1);
if (exp > 0)
{
/* Return largest integer. */
return SIGND (dl1) ? 0x80000000L : 0x7fffffffL;
}
if (exp <= -32)
return 0;
/* shift down until exp = 0 */
if (exp < 0)
l >>= -exp;
return (SIGND (dl1) ? -l : l);
}
/* convert float to int */
long
__fixsfsi (float a1)
{
double foo = a1;
return __fixdfsi (foo);
}
#else /* EXTFLOAT */
/* Primitive extended precision floating point support.
We assume all numbers are normalized, don't do any rounding, etc. */
/* Prototypes for the above in case we use them. */
double __floatunsidf (unsigned long);
double __floatsidf (long);
float __floatsisf (long);
double __extendsfdf2 (float);
float __truncdfsf2 (double);
long __fixdfsi (double);
long __fixsfsi (float);
int
__unordxf2(long double a, long double b)
{
union long_double_long ldl;
ldl.ld = a;
if (EXPX(ldl) == EXPXMASK
&& ((ldl.l.middle & MANTXMASK) != 0 || ldl.l.lower != 0))
return 1;
ldl.ld = b;
if (EXPX(ldl) == EXPXMASK
&& ((ldl.l.middle & MANTXMASK) != 0 || ldl.l.lower != 0))
return 1;
return 0;
}
/* convert double to long double */
long double
__extenddfxf2 (double d)
{
register union double_long dl;
register union long_double_long ldl;
register long exp;
dl.d = d;
/*printf ("dfxf in: %g\n", d);*/
ldl.l.upper = SIGND (dl);
if ((dl.l.upper & ~SIGNBIT) == 0 && !dl.l.lower)
{
ldl.l.middle = 0;
ldl.l.lower = 0;
return ldl.ld;
}
exp = EXPD (dl) - EXCESSD + EXCESSX;
ldl.l.upper |= exp << 16;
ldl.l.middle = HIDDENX;
/* 31-20: # mantissa bits in ldl.l.middle - # mantissa bits in dl.l.upper */
ldl.l.middle |= (dl.l.upper & MANTDMASK) << (31 - 20);
/* 1+20: explicit-integer-bit + # mantissa bits in dl.l.upper */
ldl.l.middle |= dl.l.lower >> (1 + 20);
/* 32 - 21: # bits of dl.l.lower in ldl.l.middle */
ldl.l.lower = dl.l.lower << (32 - 21);
/*printf ("dfxf out: %s\n", dumpxf (ldl.ld));*/
return ldl.ld;
}
/* convert long double to double */
double
__truncxfdf2 (long double ld)
{
register long exp;
register union double_long dl;
register union long_double_long ldl;
ldl.ld = ld;
/*printf ("xfdf in: %s\n", dumpxf (ld));*/
dl.l.upper = SIGNX (ldl);
if ((ldl.l.upper & ~SIGNBIT) == 0 && !ldl.l.middle && !ldl.l.lower)
{
dl.l.lower = 0;
return dl.d;
}
exp = EXPX (ldl) - EXCESSX + EXCESSD;
/* ??? quick and dirty: keep `exp' sane */
if (exp >= EXPDMASK)
exp = EXPDMASK - 1;
dl.l.upper |= exp << (32 - (EXPDBITS + 1));
/* +1-1: add one for sign bit, but take one off for explicit-integer-bit */
dl.l.upper |= (ldl.l.middle & MANTXMASK) >> (EXPDBITS + 1 - 1);
dl.l.lower = (ldl.l.middle & MANTXMASK) << (32 - (EXPDBITS + 1 - 1));
dl.l.lower |= ldl.l.lower >> (EXPDBITS + 1 - 1);
/*printf ("xfdf out: %g\n", dl.d);*/
return dl.d;
}
/* convert a float to a long double */
long double
__extendsfxf2 (float f)
{
long double foo = __extenddfxf2 (__extendsfdf2 (f));
return foo;
}
/* convert a long double to a float */
float
__truncxfsf2 (long double ld)
{
float foo = __truncdfsf2 (__truncxfdf2 (ld));
return foo;
}
/* convert an int to a long double */
long double
__floatsixf (long l)
{
double foo = __floatsidf (l);
return foo;
}
/* convert an unsigned int to a long double */
long double
__floatunsixf (unsigned long l)
{
double foo = __floatunsidf (l);
return foo;
}
/* convert a long double to an int */
long
__fixxfsi (long double ld)
{
long foo = __fixdfsi ((double) ld);
return foo;
}
/* The remaining provide crude math support by working in double precision. */
long double
__addxf3 (long double x1, long double x2)
{
return (double) x1 + (double) x2;
}
long double
__subxf3 (long double x1, long double x2)
{
return (double) x1 - (double) x2;
}
long double
__mulxf3 (long double x1, long double x2)
{
return (double) x1 * (double) x2;
}
long double
__divxf3 (long double x1, long double x2)
{
return (double) x1 / (double) x2;
}
long double
__negxf2 (long double x1)
{
return - (double) x1;
}
long
__cmpxf2 (long double x1, long double x2)
{
return __cmpdf2 ((double) x1, (double) x2);
}
long
__eqxf2 (long double x1, long double x2)
{
return __cmpdf2 ((double) x1, (double) x2);
}
long
__nexf2 (long double x1, long double x2)
{
return __cmpdf2 ((double) x1, (double) x2);
}
long
__ltxf2 (long double x1, long double x2)
{
return __cmpdf2 ((double) x1, (double) x2);
}
long
__lexf2 (long double x1, long double x2)
{
return __cmpdf2 ((double) x1, (double) x2);
}
long
__gtxf2 (long double x1, long double x2)
{
return __cmpdf2 ((double) x1, (double) x2);
}
long
__gexf2 (long double x1, long double x2)
{
return __cmpdf2 ((double) x1, (double) x2);
}
#endif /* EXTFLOAT */
|