Linux-2.6.12-rc2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / m68k / math-emu / fp_arith.c
blob08f286db3c5af889f87e6a19c2aae24719f58c5c
1 /*
3 fp_arith.c: floating-point math routines for the Linux-m68k
4 floating point emulator.
6 Copyright (c) 1998-1999 David Huggins-Daines.
8 Somewhat based on the AlphaLinux floating point emulator, by David
9 Mosberger-Tang.
11 You may copy, modify, and redistribute this file under the terms of
12 the GNU General Public License, version 2, or any later version, at
13 your convenience.
16 #include "fp_emu.h"
17 #include "multi_arith.h"
18 #include "fp_arith.h"
20 const struct fp_ext fp_QNaN =
22 .exp = 0x7fff,
23 .mant = { .m64 = ~0 }
26 const struct fp_ext fp_Inf =
28 .exp = 0x7fff,
31 /* let's start with the easy ones */
33 struct fp_ext *
34 fp_fabs(struct fp_ext *dest, struct fp_ext *src)
36 dprint(PINSTR, "fabs\n");
38 fp_monadic_check(dest, src);
40 dest->sign = 0;
42 return dest;
45 struct fp_ext *
46 fp_fneg(struct fp_ext *dest, struct fp_ext *src)
48 dprint(PINSTR, "fneg\n");
50 fp_monadic_check(dest, src);
52 dest->sign = !dest->sign;
54 return dest;
57 /* Now, the slightly harder ones */
59 /* fp_fadd: Implements the kernel of the FADD, FSADD, FDADD, FSUB,
60 FDSUB, and FCMP instructions. */
62 struct fp_ext *
63 fp_fadd(struct fp_ext *dest, struct fp_ext *src)
65 int diff;
67 dprint(PINSTR, "fadd\n");
69 fp_dyadic_check(dest, src);
71 if (IS_INF(dest)) {
72 /* infinity - infinity == NaN */
73 if (IS_INF(src) && (src->sign != dest->sign))
74 fp_set_nan(dest);
75 return dest;
77 if (IS_INF(src)) {
78 fp_copy_ext(dest, src);
79 return dest;
82 if (IS_ZERO(dest)) {
83 if (IS_ZERO(src)) {
84 if (src->sign != dest->sign) {
85 if (FPDATA->rnd == FPCR_ROUND_RM)
86 dest->sign = 1;
87 else
88 dest->sign = 0;
90 } else
91 fp_copy_ext(dest, src);
92 return dest;
95 dest->lowmant = src->lowmant = 0;
97 if ((diff = dest->exp - src->exp) > 0)
98 fp_denormalize(src, diff);
99 else if ((diff = -diff) > 0)
100 fp_denormalize(dest, diff);
102 if (dest->sign == src->sign) {
103 if (fp_addmant(dest, src))
104 if (!fp_addcarry(dest))
105 return dest;
106 } else {
107 if (dest->mant.m64 < src->mant.m64) {
108 fp_submant(dest, src, dest);
109 dest->sign = !dest->sign;
110 } else
111 fp_submant(dest, dest, src);
114 return dest;
117 /* fp_fsub: Implements the kernel of the FSUB, FSSUB, and FDSUB
118 instructions.
120 Remember that the arguments are in assembler-syntax order! */
122 struct fp_ext *
123 fp_fsub(struct fp_ext *dest, struct fp_ext *src)
125 dprint(PINSTR, "fsub ");
127 src->sign = !src->sign;
128 return fp_fadd(dest, src);
132 struct fp_ext *
133 fp_fcmp(struct fp_ext *dest, struct fp_ext *src)
135 dprint(PINSTR, "fcmp ");
137 FPDATA->temp[1] = *dest;
138 src->sign = !src->sign;
139 return fp_fadd(&FPDATA->temp[1], src);
142 struct fp_ext *
143 fp_ftst(struct fp_ext *dest, struct fp_ext *src)
145 dprint(PINSTR, "ftst\n");
147 (void)dest;
149 return src;
152 struct fp_ext *
153 fp_fmul(struct fp_ext *dest, struct fp_ext *src)
155 union fp_mant128 temp;
156 int exp;
158 dprint(PINSTR, "fmul\n");
160 fp_dyadic_check(dest, src);
162 /* calculate the correct sign now, as it's necessary for infinities */
163 dest->sign = src->sign ^ dest->sign;
165 /* Handle infinities */
166 if (IS_INF(dest)) {
167 if (IS_ZERO(src))
168 fp_set_nan(dest);
169 return dest;
171 if (IS_INF(src)) {
172 if (IS_ZERO(dest))
173 fp_set_nan(dest);
174 else
175 fp_copy_ext(dest, src);
176 return dest;
179 /* Of course, as we all know, zero * anything = zero. You may
180 not have known that it might be a positive or negative
181 zero... */
182 if (IS_ZERO(dest) || IS_ZERO(src)) {
183 dest->exp = 0;
184 dest->mant.m64 = 0;
185 dest->lowmant = 0;
187 return dest;
190 exp = dest->exp + src->exp - 0x3ffe;
192 /* shift up the mantissa for denormalized numbers,
193 so that the highest bit is set, this makes the
194 shift of the result below easier */
195 if ((long)dest->mant.m32[0] >= 0)
196 exp -= fp_overnormalize(dest);
197 if ((long)src->mant.m32[0] >= 0)
198 exp -= fp_overnormalize(src);
200 /* now, do a 64-bit multiply with expansion */
201 fp_multiplymant(&temp, dest, src);
203 /* normalize it back to 64 bits and stuff it back into the
204 destination struct */
205 if ((long)temp.m32[0] > 0) {
206 exp--;
207 fp_putmant128(dest, &temp, 1);
208 } else
209 fp_putmant128(dest, &temp, 0);
211 if (exp >= 0x7fff) {
212 fp_set_ovrflw(dest);
213 return dest;
215 dest->exp = exp;
216 if (exp < 0) {
217 fp_set_sr(FPSR_EXC_UNFL);
218 fp_denormalize(dest, -exp);
221 return dest;
224 /* fp_fdiv: Implements the "kernel" of the FDIV, FSDIV, FDDIV and
225 FSGLDIV instructions.
227 Note that the order of the operands is counter-intuitive: instead
228 of src / dest, the result is actually dest / src. */
230 struct fp_ext *
231 fp_fdiv(struct fp_ext *dest, struct fp_ext *src)
233 union fp_mant128 temp;
234 int exp;
236 dprint(PINSTR, "fdiv\n");
238 fp_dyadic_check(dest, src);
240 /* calculate the correct sign now, as it's necessary for infinities */
241 dest->sign = src->sign ^ dest->sign;
243 /* Handle infinities */
244 if (IS_INF(dest)) {
245 /* infinity / infinity = NaN (quiet, as always) */
246 if (IS_INF(src))
247 fp_set_nan(dest);
248 /* infinity / anything else = infinity (with approprate sign) */
249 return dest;
251 if (IS_INF(src)) {
252 /* anything / infinity = zero (with appropriate sign) */
253 dest->exp = 0;
254 dest->mant.m64 = 0;
255 dest->lowmant = 0;
257 return dest;
260 /* zeroes */
261 if (IS_ZERO(dest)) {
262 /* zero / zero = NaN */
263 if (IS_ZERO(src))
264 fp_set_nan(dest);
265 /* zero / anything else = zero */
266 return dest;
268 if (IS_ZERO(src)) {
269 /* anything / zero = infinity (with appropriate sign) */
270 fp_set_sr(FPSR_EXC_DZ);
271 dest->exp = 0x7fff;
272 dest->mant.m64 = 0;
274 return dest;
277 exp = dest->exp - src->exp + 0x3fff;
279 /* shift up the mantissa for denormalized numbers,
280 so that the highest bit is set, this makes lots
281 of things below easier */
282 if ((long)dest->mant.m32[0] >= 0)
283 exp -= fp_overnormalize(dest);
284 if ((long)src->mant.m32[0] >= 0)
285 exp -= fp_overnormalize(src);
287 /* now, do the 64-bit divide */
288 fp_dividemant(&temp, dest, src);
290 /* normalize it back to 64 bits and stuff it back into the
291 destination struct */
292 if (!temp.m32[0]) {
293 exp--;
294 fp_putmant128(dest, &temp, 32);
295 } else
296 fp_putmant128(dest, &temp, 31);
298 if (exp >= 0x7fff) {
299 fp_set_ovrflw(dest);
300 return dest;
302 dest->exp = exp;
303 if (exp < 0) {
304 fp_set_sr(FPSR_EXC_UNFL);
305 fp_denormalize(dest, -exp);
308 return dest;
311 struct fp_ext *
312 fp_fsglmul(struct fp_ext *dest, struct fp_ext *src)
314 int exp;
316 dprint(PINSTR, "fsglmul\n");
318 fp_dyadic_check(dest, src);
320 /* calculate the correct sign now, as it's necessary for infinities */
321 dest->sign = src->sign ^ dest->sign;
323 /* Handle infinities */
324 if (IS_INF(dest)) {
325 if (IS_ZERO(src))
326 fp_set_nan(dest);
327 return dest;
329 if (IS_INF(src)) {
330 if (IS_ZERO(dest))
331 fp_set_nan(dest);
332 else
333 fp_copy_ext(dest, src);
334 return dest;
337 /* Of course, as we all know, zero * anything = zero. You may
338 not have known that it might be a positive or negative
339 zero... */
340 if (IS_ZERO(dest) || IS_ZERO(src)) {
341 dest->exp = 0;
342 dest->mant.m64 = 0;
343 dest->lowmant = 0;
345 return dest;
348 exp = dest->exp + src->exp - 0x3ffe;
350 /* do a 32-bit multiply */
351 fp_mul64(dest->mant.m32[0], dest->mant.m32[1],
352 dest->mant.m32[0] & 0xffffff00,
353 src->mant.m32[0] & 0xffffff00);
355 if (exp >= 0x7fff) {
356 fp_set_ovrflw(dest);
357 return dest;
359 dest->exp = exp;
360 if (exp < 0) {
361 fp_set_sr(FPSR_EXC_UNFL);
362 fp_denormalize(dest, -exp);
365 return dest;
368 struct fp_ext *
369 fp_fsgldiv(struct fp_ext *dest, struct fp_ext *src)
371 int exp;
372 unsigned long quot, rem;
374 dprint(PINSTR, "fsgldiv\n");
376 fp_dyadic_check(dest, src);
378 /* calculate the correct sign now, as it's necessary for infinities */
379 dest->sign = src->sign ^ dest->sign;
381 /* Handle infinities */
382 if (IS_INF(dest)) {
383 /* infinity / infinity = NaN (quiet, as always) */
384 if (IS_INF(src))
385 fp_set_nan(dest);
386 /* infinity / anything else = infinity (with approprate sign) */
387 return dest;
389 if (IS_INF(src)) {
390 /* anything / infinity = zero (with appropriate sign) */
391 dest->exp = 0;
392 dest->mant.m64 = 0;
393 dest->lowmant = 0;
395 return dest;
398 /* zeroes */
399 if (IS_ZERO(dest)) {
400 /* zero / zero = NaN */
401 if (IS_ZERO(src))
402 fp_set_nan(dest);
403 /* zero / anything else = zero */
404 return dest;
406 if (IS_ZERO(src)) {
407 /* anything / zero = infinity (with appropriate sign) */
408 fp_set_sr(FPSR_EXC_DZ);
409 dest->exp = 0x7fff;
410 dest->mant.m64 = 0;
412 return dest;
415 exp = dest->exp - src->exp + 0x3fff;
417 dest->mant.m32[0] &= 0xffffff00;
418 src->mant.m32[0] &= 0xffffff00;
420 /* do the 32-bit divide */
421 if (dest->mant.m32[0] >= src->mant.m32[0]) {
422 fp_sub64(dest->mant, src->mant);
423 fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
424 dest->mant.m32[0] = 0x80000000 | (quot >> 1);
425 dest->mant.m32[1] = (quot & 1) | rem; /* only for rounding */
426 } else {
427 fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
428 dest->mant.m32[0] = quot;
429 dest->mant.m32[1] = rem; /* only for rounding */
430 exp--;
433 if (exp >= 0x7fff) {
434 fp_set_ovrflw(dest);
435 return dest;
437 dest->exp = exp;
438 if (exp < 0) {
439 fp_set_sr(FPSR_EXC_UNFL);
440 fp_denormalize(dest, -exp);
443 return dest;
446 /* fp_roundint: Internal rounding function for use by several of these
447 emulated instructions.
449 This one rounds off the fractional part using the rounding mode
450 specified. */
452 static void fp_roundint(struct fp_ext *dest, int mode)
454 union fp_mant64 oldmant;
455 unsigned long mask;
457 if (!fp_normalize_ext(dest))
458 return;
460 /* infinities and zeroes */
461 if (IS_INF(dest) || IS_ZERO(dest))
462 return;
464 /* first truncate the lower bits */
465 oldmant = dest->mant;
466 switch (dest->exp) {
467 case 0 ... 0x3ffe:
468 dest->mant.m64 = 0;
469 break;
470 case 0x3fff ... 0x401e:
471 dest->mant.m32[0] &= 0xffffffffU << (0x401e - dest->exp);
472 dest->mant.m32[1] = 0;
473 if (oldmant.m64 == dest->mant.m64)
474 return;
475 break;
476 case 0x401f ... 0x403e:
477 dest->mant.m32[1] &= 0xffffffffU << (0x403e - dest->exp);
478 if (oldmant.m32[1] == dest->mant.m32[1])
479 return;
480 break;
481 default:
482 return;
484 fp_set_sr(FPSR_EXC_INEX2);
486 /* We might want to normalize upwards here... however, since
487 we know that this is only called on the output of fp_fdiv,
488 or with the input to fp_fint or fp_fintrz, and the inputs
489 to all these functions are either normal or denormalized
490 (no subnormals allowed!), there's really no need.
492 In the case of fp_fdiv, observe that 0x80000000 / 0xffff =
493 0xffff8000, and the same holds for 128-bit / 64-bit. (i.e. the
494 smallest possible normal dividend and the largest possible normal
495 divisor will still produce a normal quotient, therefore, (normal
496 << 64) / normal is normal in all cases) */
498 switch (mode) {
499 case FPCR_ROUND_RN:
500 switch (dest->exp) {
501 case 0 ... 0x3ffd:
502 return;
503 case 0x3ffe:
504 /* As noted above, the input is always normal, so the
505 guard bit (bit 63) is always set. therefore, the
506 only case in which we will NOT round to 1.0 is when
507 the input is exactly 0.5. */
508 if (oldmant.m64 == (1ULL << 63))
509 return;
510 break;
511 case 0x3fff ... 0x401d:
512 mask = 1 << (0x401d - dest->exp);
513 if (!(oldmant.m32[0] & mask))
514 return;
515 if (oldmant.m32[0] & (mask << 1))
516 break;
517 if (!(oldmant.m32[0] << (dest->exp - 0x3ffd)) &&
518 !oldmant.m32[1])
519 return;
520 break;
521 case 0x401e:
522 if (!(oldmant.m32[1] >= 0))
523 return;
524 if (oldmant.m32[0] & 1)
525 break;
526 if (!(oldmant.m32[1] << 1))
527 return;
528 break;
529 case 0x401f ... 0x403d:
530 mask = 1 << (0x403d - dest->exp);
531 if (!(oldmant.m32[1] & mask))
532 return;
533 if (oldmant.m32[1] & (mask << 1))
534 break;
535 if (!(oldmant.m32[1] << (dest->exp - 0x401d)))
536 return;
537 break;
538 default:
539 return;
541 break;
542 case FPCR_ROUND_RZ:
543 return;
544 default:
545 if (dest->sign ^ (mode - FPCR_ROUND_RM))
546 break;
547 return;
550 switch (dest->exp) {
551 case 0 ... 0x3ffe:
552 dest->exp = 0x3fff;
553 dest->mant.m64 = 1ULL << 63;
554 break;
555 case 0x3fff ... 0x401e:
556 mask = 1 << (0x401e - dest->exp);
557 if (dest->mant.m32[0] += mask)
558 break;
559 dest->mant.m32[0] = 0x80000000;
560 dest->exp++;
561 break;
562 case 0x401f ... 0x403e:
563 mask = 1 << (0x403e - dest->exp);
564 if (dest->mant.m32[1] += mask)
565 break;
566 if (dest->mant.m32[0] += 1)
567 break;
568 dest->mant.m32[0] = 0x80000000;
569 dest->exp++;
570 break;
574 /* modrem_kernel: Implementation of the FREM and FMOD instructions
575 (which are exactly the same, except for the rounding used on the
576 intermediate value) */
578 static struct fp_ext *
579 modrem_kernel(struct fp_ext *dest, struct fp_ext *src, int mode)
581 struct fp_ext tmp;
583 fp_dyadic_check(dest, src);
585 /* Infinities and zeros */
586 if (IS_INF(dest) || IS_ZERO(src)) {
587 fp_set_nan(dest);
588 return dest;
590 if (IS_ZERO(dest) || IS_INF(src))
591 return dest;
593 /* FIXME: there is almost certainly a smarter way to do this */
594 fp_copy_ext(&tmp, dest);
595 fp_fdiv(&tmp, src); /* NOTE: src might be modified */
596 fp_roundint(&tmp, mode);
597 fp_fmul(&tmp, src);
598 fp_fsub(dest, &tmp);
600 /* set the quotient byte */
601 fp_set_quotient((dest->mant.m64 & 0x7f) | (dest->sign << 7));
602 return dest;
605 /* fp_fmod: Implements the kernel of the FMOD instruction.
607 Again, the argument order is backwards. The result, as defined in
608 the Motorola manuals, is:
610 fmod(src,dest) = (dest - (src * floor(dest / src))) */
612 struct fp_ext *
613 fp_fmod(struct fp_ext *dest, struct fp_ext *src)
615 dprint(PINSTR, "fmod\n");
616 return modrem_kernel(dest, src, FPCR_ROUND_RZ);
619 /* fp_frem: Implements the kernel of the FREM instruction.
621 frem(src,dest) = (dest - (src * round(dest / src)))
624 struct fp_ext *
625 fp_frem(struct fp_ext *dest, struct fp_ext *src)
627 dprint(PINSTR, "frem\n");
628 return modrem_kernel(dest, src, FPCR_ROUND_RN);
631 struct fp_ext *
632 fp_fint(struct fp_ext *dest, struct fp_ext *src)
634 dprint(PINSTR, "fint\n");
636 fp_copy_ext(dest, src);
638 fp_roundint(dest, FPDATA->rnd);
640 return dest;
643 struct fp_ext *
644 fp_fintrz(struct fp_ext *dest, struct fp_ext *src)
646 dprint(PINSTR, "fintrz\n");
648 fp_copy_ext(dest, src);
650 fp_roundint(dest, FPCR_ROUND_RZ);
652 return dest;
655 struct fp_ext *
656 fp_fscale(struct fp_ext *dest, struct fp_ext *src)
658 int scale, oldround;
660 dprint(PINSTR, "fscale\n");
662 fp_dyadic_check(dest, src);
664 /* Infinities */
665 if (IS_INF(src)) {
666 fp_set_nan(dest);
667 return dest;
669 if (IS_INF(dest))
670 return dest;
672 /* zeroes */
673 if (IS_ZERO(src) || IS_ZERO(dest))
674 return dest;
676 /* Source exponent out of range */
677 if (src->exp >= 0x400c) {
678 fp_set_ovrflw(dest);
679 return dest;
682 /* src must be rounded with round to zero. */
683 oldround = FPDATA->rnd;
684 FPDATA->rnd = FPCR_ROUND_RZ;
685 scale = fp_conv_ext2long(src);
686 FPDATA->rnd = oldround;
688 /* new exponent */
689 scale += dest->exp;
691 if (scale >= 0x7fff) {
692 fp_set_ovrflw(dest);
693 } else if (scale <= 0) {
694 fp_set_sr(FPSR_EXC_UNFL);
695 fp_denormalize(dest, -scale);
696 } else
697 dest->exp = scale;
699 return dest;