2003-12-26 Guilhem Lavaux <guilhem@kaffe.org>
[official-gcc.git] / gcc / config / m68k / lb1sf68.asm
blob755f1af84da2be0fce7da303306dfb09a693deab
1 /* libgcc routines for 68000 w/o floating-point hardware.
2 Copyright (C) 1994, 1996, 1997, 1998 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
9 later version.
11 In addition to the permissions in the GNU General Public License, the
12 Free Software Foundation gives you unlimited permission to link the
13 compiled version of this file with other programs, and to distribute
14 those programs without any restriction coming from the use of this
15 file. (The General Public License restrictions do apply in other
16 respects; for example, they cover modification of the file, and
17 distribution when not linked into another program.)
19 This file is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, 59 Temple Place - Suite 330,
27 Boston, MA 02111-1307, USA. */
29 /* As a special exception, if you link this library with files
30 compiled with GCC to produce an executable, this does not cause
31 the resulting executable to be covered by the GNU General Public License.
32 This exception does not however invalidate any other reasons why
33 the executable file might be covered by the GNU General Public License. */
35 /* Use this one for any 680x0; assumes no floating point hardware.
36 The trailing " '" appearing on some lines is for ANSI preprocessors. Yuk.
37 Some of this code comes from MINIX, via the folks at ericsson.
38 D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
41 /* These are predefined by new versions of GNU cpp. */
43 #ifndef __USER_LABEL_PREFIX__
44 #define __USER_LABEL_PREFIX__ _
45 #endif
47 #ifndef __REGISTER_PREFIX__
48 #define __REGISTER_PREFIX__
49 #endif
51 #ifndef __IMMEDIATE_PREFIX__
52 #define __IMMEDIATE_PREFIX__ #
53 #endif
55 /* ANSI concatenation macros. */
57 #define CONCAT1(a, b) CONCAT2(a, b)
58 #define CONCAT2(a, b) a ## b
60 /* Use the right prefix for global labels. */
62 #define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
64 /* Use the right prefix for registers. */
66 #define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
68 /* Use the right prefix for immediate values. */
70 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
72 #define d0 REG (d0)
73 #define d1 REG (d1)
74 #define d2 REG (d2)
75 #define d3 REG (d3)
76 #define d4 REG (d4)
77 #define d5 REG (d5)
78 #define d6 REG (d6)
79 #define d7 REG (d7)
80 #define a0 REG (a0)
81 #define a1 REG (a1)
82 #define a2 REG (a2)
83 #define a3 REG (a3)
84 #define a4 REG (a4)
85 #define a5 REG (a5)
86 #define a6 REG (a6)
87 #define fp REG (fp)
88 #define sp REG (sp)
89 #define pc REG (pc)
91 /* Provide a few macros to allow for PIC code support.
92 * With PIC, data is stored A5 relative so we've got to take a bit of special
93 * care to ensure that all loads of global data is via A5. PIC also requires
94 * jumps and subroutine calls to be PC relative rather than absolute. We cheat
95 * a little on this and in the PIC case, we use short offset branches and
96 * hope that the final object code is within range (which it should be).
98 #ifndef __PIC__
100 /* Non PIC (absolute/relocatable) versions */
102 .macro PICCALL addr
103 jbsr \addr
104 .endm
106 .macro PICJUMP addr
107 jmp \addr
108 .endm
110 .macro PICLEA sym, reg
111 lea \sym, \reg
112 .endm
114 .macro PICPEA sym, areg
115 pea \sym
116 .endm
118 #else /* __PIC__ */
120 /* Common for -mid-shared-libary and -msep-data */
122 .macro PICCALL addr
123 bsr \addr
124 .endm
126 .macro PICJUMP addr
127 bra \addr
128 .endm
130 # if defined(__ID_SHARED_LIBRARY__)
132 /* -mid-shared-library versions */
134 .macro PICLEA sym, reg
135 movel a5@(_current_shared_library_a5_offset_), \reg
136 movel \sym@GOT(\reg), \reg
137 .endm
139 .macro PICPEA sym, areg
140 movel a5@(_current_shared_library_a5_offset_), \areg
141 movel \sym@GOT(\areg), sp@-
142 .endm
144 # else /* !__ID_SHARED_LIBRARY__ */
146 /* Versions for -msep-data */
148 .macro PICLEA sym, reg
149 movel \sym@GOT(a5), \reg
150 .endm
152 .macro PICPEA sym, areg
153 movel \sym@GOT(a5), sp@-
154 .endm
156 # endif /* !__ID_SHARED_LIBRARY__ */
157 #endif /* __PIC__ */
160 #ifdef L_floatex
162 | This is an attempt at a decent floating point (single, double and
163 | extended double) code for the GNU C compiler. It should be easy to
164 | adapt to other compilers (but beware of the local labels!).
166 | Starting date: 21 October, 1990
168 | It is convenient to introduce the notation (s,e,f) for a floating point
169 | number, where s=sign, e=exponent, f=fraction. We will call a floating
170 | point number fpn to abbreviate, independently of the precision.
171 | Let MAX_EXP be in each case the maximum exponent (255 for floats, 1023
172 | for doubles and 16383 for long doubles). We then have the following
173 | different cases:
174 | 1. Normalized fpns have 0 < e < MAX_EXP. They correspond to
175 | (-1)^s x 1.f x 2^(e-bias-1).
176 | 2. Denormalized fpns have e=0. They correspond to numbers of the form
177 | (-1)^s x 0.f x 2^(-bias).
178 | 3. +/-INFINITY have e=MAX_EXP, f=0.
179 | 4. Quiet NaN (Not a Number) have all bits set.
180 | 5. Signaling NaN (Not a Number) have s=0, e=MAX_EXP, f=1.
182 |=============================================================================
183 | exceptions
184 |=============================================================================
186 | This is the floating point condition code register (_fpCCR):
188 | struct {
189 | short _exception_bits;
190 | short _trap_enable_bits;
191 | short _sticky_bits;
192 | short _rounding_mode;
193 | short _format;
194 | short _last_operation;
195 | union {
196 | float sf;
197 | double df;
198 | } _operand1;
199 | union {
200 | float sf;
201 | double df;
202 | } _operand2;
203 | } _fpCCR;
205 .data
206 .even
208 .globl SYM (_fpCCR)
210 SYM (_fpCCR):
211 __exception_bits:
212 .word 0
213 __trap_enable_bits:
214 .word 0
215 __sticky_bits:
216 .word 0
217 __rounding_mode:
218 .word ROUND_TO_NEAREST
219 __format:
220 .word NIL
221 __last_operation:
222 .word NOOP
223 __operand1:
224 .long 0
225 .long 0
226 __operand2:
227 .long 0
228 .long 0
230 | Offsets:
231 EBITS = __exception_bits - SYM (_fpCCR)
232 TRAPE = __trap_enable_bits - SYM (_fpCCR)
233 STICK = __sticky_bits - SYM (_fpCCR)
234 ROUND = __rounding_mode - SYM (_fpCCR)
235 FORMT = __format - SYM (_fpCCR)
236 LASTO = __last_operation - SYM (_fpCCR)
237 OPER1 = __operand1 - SYM (_fpCCR)
238 OPER2 = __operand2 - SYM (_fpCCR)
240 | The following exception types are supported:
241 INEXACT_RESULT = 0x0001
242 UNDERFLOW = 0x0002
243 OVERFLOW = 0x0004
244 DIVIDE_BY_ZERO = 0x0008
245 INVALID_OPERATION = 0x0010
247 | The allowed rounding modes are:
248 UNKNOWN = -1
249 ROUND_TO_NEAREST = 0 | round result to nearest representable value
250 ROUND_TO_ZERO = 1 | round result towards zero
251 ROUND_TO_PLUS = 2 | round result towards plus infinity
252 ROUND_TO_MINUS = 3 | round result towards minus infinity
254 | The allowed values of format are:
255 NIL = 0
256 SINGLE_FLOAT = 1
257 DOUBLE_FLOAT = 2
258 LONG_FLOAT = 3
260 | The allowed values for the last operation are:
261 NOOP = 0
262 ADD = 1
263 MULTIPLY = 2
264 DIVIDE = 3
265 NEGATE = 4
266 COMPARE = 5
267 EXTENDSFDF = 6
268 TRUNCDFSF = 7
270 |=============================================================================
271 | __clear_sticky_bits
272 |=============================================================================
274 | The sticky bits are normally not cleared (thus the name), whereas the
275 | exception type and exception value reflect the last computation.
276 | This routine is provided to clear them (you can also write to _fpCCR,
277 | since it is globally visible).
279 .globl SYM (__clear_sticky_bit)
281 .text
282 .even
284 | void __clear_sticky_bits(void);
285 SYM (__clear_sticky_bit):
286 PICLEA SYM (_fpCCR),a0
287 #ifndef __mcoldfire__
288 movew IMM (0),a0@(STICK)
289 #else
290 clr.w a0@(STICK)
291 #endif
294 |=============================================================================
295 | $_exception_handler
296 |=============================================================================
298 .globl $_exception_handler
300 .text
301 .even
303 | This is the common exit point if an exception occurs.
304 | NOTE: it is NOT callable from C!
305 | It expects the exception type in d7, the format (SINGLE_FLOAT,
306 | DOUBLE_FLOAT or LONG_FLOAT) in d6, and the last operation code in d5.
307 | It sets the corresponding exception and sticky bits, and the format.
308 | Depending on the format if fills the corresponding slots for the
309 | operands which produced the exception (all this information is provided
310 | so if you write your own exception handlers you have enough information
311 | to deal with the problem).
312 | Then checks to see if the corresponding exception is trap-enabled,
313 | in which case it pushes the address of _fpCCR and traps through
314 | trap FPTRAP (15 for the moment).
316 FPTRAP = 15
318 $_exception_handler:
319 PICLEA SYM (_fpCCR),a0
320 movew d7,a0@(EBITS) | set __exception_bits
321 #ifndef __mcoldfire__
322 orw d7,a0@(STICK) | and __sticky_bits
323 #else
324 movew a0@(STICK),d4
325 orl d7,d4
326 movew d4,a0@(STICK)
327 #endif
328 movew d6,a0@(FORMT) | and __format
329 movew d5,a0@(LASTO) | and __last_operation
331 | Now put the operands in place:
332 #ifndef __mcoldfire__
333 cmpw IMM (SINGLE_FLOAT),d6
334 #else
335 cmpl IMM (SINGLE_FLOAT),d6
336 #endif
337 beq 1f
338 movel a6@(8),a0@(OPER1)
339 movel a6@(12),a0@(OPER1+4)
340 movel a6@(16),a0@(OPER2)
341 movel a6@(20),a0@(OPER2+4)
342 bra 2f
343 1: movel a6@(8),a0@(OPER1)
344 movel a6@(12),a0@(OPER2)
346 | And check whether the exception is trap-enabled:
347 #ifndef __mcoldfire__
348 andw a0@(TRAPE),d7 | is exception trap-enabled?
349 #else
350 clrl d6
351 movew a0@(TRAPE),d6
352 andl d6,d7
353 #endif
354 beq 1f | no, exit
355 PICPEA SYM (_fpCCR),a1 | yes, push address of _fpCCR
356 trap IMM (FPTRAP) | and trap
357 #ifndef __mcoldfire__
358 1: moveml sp@+,d2-d7 | restore data registers
359 #else
360 1: moveml sp@,d2-d7
361 | XXX if frame pointer is ever removed, stack pointer must
362 | be adjusted here.
363 #endif
364 unlk a6 | and return
366 #endif /* L_floatex */
368 #ifdef L_mulsi3
369 .text
370 .proc
371 .globl SYM (__mulsi3)
372 SYM (__mulsi3):
373 movew sp@(4), d0 /* x0 -> d0 */
374 muluw sp@(10), d0 /* x0*y1 */
375 movew sp@(6), d1 /* x1 -> d1 */
376 muluw sp@(8), d1 /* x1*y0 */
377 #ifndef __mcoldfire__
378 addw d1, d0
379 #else
380 addl d1, d0
381 #endif
382 swap d0
383 clrw d0
384 movew sp@(6), d1 /* x1 -> d1 */
385 muluw sp@(10), d1 /* x1*y1 */
386 addl d1, d0
389 #endif /* L_mulsi3 */
391 #ifdef L_udivsi3
392 .text
393 .proc
394 .globl SYM (__udivsi3)
395 SYM (__udivsi3):
396 #ifndef __mcoldfire__
397 movel d2, sp@-
398 movel sp@(12), d1 /* d1 = divisor */
399 movel sp@(8), d0 /* d0 = dividend */
401 cmpl IMM (0x10000), d1 /* divisor >= 2 ^ 16 ? */
402 jcc L3 /* then try next algorithm */
403 movel d0, d2
404 clrw d2
405 swap d2
406 divu d1, d2 /* high quotient in lower word */
407 movew d2, d0 /* save high quotient */
408 swap d0
409 movew sp@(10), d2 /* get low dividend + high rest */
410 divu d1, d2 /* low quotient */
411 movew d2, d0
412 jra L6
414 L3: movel d1, d2 /* use d2 as divisor backup */
415 L4: lsrl IMM (1), d1 /* shift divisor */
416 lsrl IMM (1), d0 /* shift dividend */
417 cmpl IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ? */
418 jcc L4
419 divu d1, d0 /* now we have 16-bit divisor */
420 andl IMM (0xffff), d0 /* mask out divisor, ignore remainder */
422 /* Multiply the 16-bit tentative quotient with the 32-bit divisor. Because of
423 the operand ranges, this might give a 33-bit product. If this product is
424 greater than the dividend, the tentative quotient was too large. */
425 movel d2, d1
426 mulu d0, d1 /* low part, 32 bits */
427 swap d2
428 mulu d0, d2 /* high part, at most 17 bits */
429 swap d2 /* align high part with low part */
430 tstw d2 /* high part 17 bits? */
431 jne L5 /* if 17 bits, quotient was too large */
432 addl d2, d1 /* add parts */
433 jcs L5 /* if sum is 33 bits, quotient was too large */
434 cmpl sp@(8), d1 /* compare the sum with the dividend */
435 jls L6 /* if sum > dividend, quotient was too large */
436 L5: subql IMM (1), d0 /* adjust quotient */
438 L6: movel sp@+, d2
441 #else /* __mcoldfire__ */
443 /* ColdFire implementation of non-restoring division algorithm from
444 Hennessy & Patterson, Appendix A. */
445 link a6,IMM (-12)
446 moveml d2-d4,sp@
447 movel a6@(8),d0
448 movel a6@(12),d1
449 clrl d2 | clear p
450 moveq IMM (31),d4
451 L1: addl d0,d0 | shift reg pair (p,a) one bit left
452 addxl d2,d2
453 movl d2,d3 | subtract b from p, store in tmp.
454 subl d1,d3
455 jcs L2 | if no carry,
456 bset IMM (0),d0 | set the low order bit of a to 1,
457 movl d3,d2 | and store tmp in p.
458 L2: subql IMM (1),d4
459 jcc L1
460 moveml sp@,d2-d4 | restore data registers
461 unlk a6 | and return
463 #endif /* __mcoldfire__ */
465 #endif /* L_udivsi3 */
467 #ifdef L_divsi3
468 .text
469 .proc
470 .globl SYM (__divsi3)
471 SYM (__divsi3):
472 movel d2, sp@-
474 moveq IMM (1), d2 /* sign of result stored in d2 (=1 or =-1) */
475 movel sp@(12), d1 /* d1 = divisor */
476 jpl L1
477 negl d1
478 #ifndef __mcoldfire__
479 negb d2 /* change sign because divisor <0 */
480 #else
481 negl d2 /* change sign because divisor <0 */
482 #endif
483 L1: movel sp@(8), d0 /* d0 = dividend */
484 jpl L2
485 negl d0
486 #ifndef __mcoldfire__
487 negb d2
488 #else
489 negl d2
490 #endif
492 L2: movel d1, sp@-
493 movel d0, sp@-
494 PICCALL SYM (__udivsi3) /* divide abs(dividend) by abs(divisor) */
495 addql IMM (8), sp
497 tstb d2
498 jpl L3
499 negl d0
501 L3: movel sp@+, d2
503 #endif /* L_divsi3 */
505 #ifdef L_umodsi3
506 .text
507 .proc
508 .globl SYM (__umodsi3)
509 SYM (__umodsi3):
510 movel sp@(8), d1 /* d1 = divisor */
511 movel sp@(4), d0 /* d0 = dividend */
512 movel d1, sp@-
513 movel d0, sp@-
514 PICCALL SYM (__udivsi3)
515 addql IMM (8), sp
516 movel sp@(8), d1 /* d1 = divisor */
517 #ifndef __mcoldfire__
518 movel d1, sp@-
519 movel d0, sp@-
520 PICCALL SYM (__mulsi3) /* d0 = (a/b)*b */
521 addql IMM (8), sp
522 #else
523 mulsl d1,d0
524 #endif
525 movel sp@(4), d1 /* d1 = dividend */
526 subl d0, d1 /* d1 = a - (a/b)*b */
527 movel d1, d0
529 #endif /* L_umodsi3 */
531 #ifdef L_modsi3
532 .text
533 .proc
534 .globl SYM (__modsi3)
535 SYM (__modsi3):
536 movel sp@(8), d1 /* d1 = divisor */
537 movel sp@(4), d0 /* d0 = dividend */
538 movel d1, sp@-
539 movel d0, sp@-
540 PICCALL SYM (__divsi3)
541 addql IMM (8), sp
542 movel sp@(8), d1 /* d1 = divisor */
543 #ifndef __mcoldfire__
544 movel d1, sp@-
545 movel d0, sp@-
546 PICCALL SYM (__mulsi3) /* d0 = (a/b)*b */
547 addql IMM (8), sp
548 #else
549 mulsl d1,d0
550 #endif
551 movel sp@(4), d1 /* d1 = dividend */
552 subl d0, d1 /* d1 = a - (a/b)*b */
553 movel d1, d0
555 #endif /* L_modsi3 */
558 #ifdef L_double
560 .globl SYM (_fpCCR)
561 .globl $_exception_handler
563 QUIET_NaN = 0xffffffff
565 D_MAX_EXP = 0x07ff
566 D_BIAS = 1022
567 DBL_MAX_EXP = D_MAX_EXP - D_BIAS
568 DBL_MIN_EXP = 1 - D_BIAS
569 DBL_MANT_DIG = 53
571 INEXACT_RESULT = 0x0001
572 UNDERFLOW = 0x0002
573 OVERFLOW = 0x0004
574 DIVIDE_BY_ZERO = 0x0008
575 INVALID_OPERATION = 0x0010
577 DOUBLE_FLOAT = 2
579 NOOP = 0
580 ADD = 1
581 MULTIPLY = 2
582 DIVIDE = 3
583 NEGATE = 4
584 COMPARE = 5
585 EXTENDSFDF = 6
586 TRUNCDFSF = 7
588 UNKNOWN = -1
589 ROUND_TO_NEAREST = 0 | round result to nearest representable value
590 ROUND_TO_ZERO = 1 | round result towards zero
591 ROUND_TO_PLUS = 2 | round result towards plus infinity
592 ROUND_TO_MINUS = 3 | round result towards minus infinity
594 | Entry points:
596 .globl SYM (__adddf3)
597 .globl SYM (__subdf3)
598 .globl SYM (__muldf3)
599 .globl SYM (__divdf3)
600 .globl SYM (__negdf2)
601 .globl SYM (__cmpdf2)
603 .text
604 .even
606 | These are common routines to return and signal exceptions.
608 Ld$den:
609 | Return and signal a denormalized number
610 orl d7,d0
611 movew IMM (INEXACT_RESULT+UNDERFLOW),d7
612 moveq IMM (DOUBLE_FLOAT),d6
613 PICJUMP $_exception_handler
615 Ld$infty:
616 Ld$overflow:
617 | Return a properly signed INFINITY and set the exception flags
618 movel IMM (0x7ff00000),d0
619 movel IMM (0),d1
620 orl d7,d0
621 movew IMM (INEXACT_RESULT+OVERFLOW),d7
622 moveq IMM (DOUBLE_FLOAT),d6
623 PICJUMP $_exception_handler
625 Ld$underflow:
626 | Return 0 and set the exception flags
627 movel IMM (0),d0
628 movel d0,d1
629 movew IMM (INEXACT_RESULT+UNDERFLOW),d7
630 moveq IMM (DOUBLE_FLOAT),d6
631 PICJUMP $_exception_handler
633 Ld$inop:
634 | Return a quiet NaN and set the exception flags
635 movel IMM (QUIET_NaN),d0
636 movel d0,d1
637 movew IMM (INEXACT_RESULT+INVALID_OPERATION),d7
638 moveq IMM (DOUBLE_FLOAT),d6
639 PICJUMP $_exception_handler
641 Ld$div$0:
642 | Return a properly signed INFINITY and set the exception flags
643 movel IMM (0x7ff00000),d0
644 movel IMM (0),d1
645 orl d7,d0
646 movew IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
647 moveq IMM (DOUBLE_FLOAT),d6
648 PICJUMP $_exception_handler
650 |=============================================================================
651 |=============================================================================
652 | double precision routines
653 |=============================================================================
654 |=============================================================================
656 | A double precision floating point number (double) has the format:
658 | struct _double {
659 | unsigned int sign : 1; /* sign bit */
660 | unsigned int exponent : 11; /* exponent, shifted by 126 */
661 | unsigned int fraction : 52; /* fraction */
662 | } double;
664 | Thus sizeof(double) = 8 (64 bits).
666 | All the routines are callable from C programs, and return the result
667 | in the register pair d0-d1. They also preserve all registers except
668 | d0-d1 and a0-a1.
670 |=============================================================================
671 | __subdf3
672 |=============================================================================
674 | double __subdf3(double, double);
675 SYM (__subdf3):
676 bchg IMM (31),sp@(12) | change sign of second operand
677 | and fall through, so we always add
678 |=============================================================================
679 | __adddf3
680 |=============================================================================
682 | double __adddf3(double, double);
683 SYM (__adddf3):
684 #ifndef __mcoldfire__
685 link a6,IMM (0) | everything will be done in registers
686 moveml d2-d7,sp@- | save all data registers and a2 (but d0-d1)
687 #else
688 link a6,IMM (-24)
689 moveml d2-d7,sp@
690 #endif
691 movel a6@(8),d0 | get first operand
692 movel a6@(12),d1 |
693 movel a6@(16),d2 | get second operand
694 movel a6@(20),d3 |
696 movel d0,d7 | get d0's sign bit in d7 '
697 addl d1,d1 | check and clear sign bit of a, and gain one
698 addxl d0,d0 | bit of extra precision
699 beq Ladddf$b | if zero return second operand
701 movel d2,d6 | save sign in d6
702 addl d3,d3 | get rid of sign bit and gain one bit of
703 addxl d2,d2 | extra precision
704 beq Ladddf$a | if zero return first operand
706 andl IMM (0x80000000),d7 | isolate a's sign bit '
707 swap d6 | and also b's sign bit '
708 #ifndef __mcoldfire__
709 andw IMM (0x8000),d6 |
710 orw d6,d7 | and combine them into d7, so that a's sign '
711 | bit is in the high word and b's is in the '
712 | low word, so d6 is free to be used
713 #else
714 andl IMM (0x8000),d6
715 orl d6,d7
716 #endif
717 movel d7,a0 | now save d7 into a0, so d7 is free to
718 | be used also
720 | Get the exponents and check for denormalized and/or infinity.
722 movel IMM (0x001fffff),d6 | mask for the fraction
723 movel IMM (0x00200000),d7 | mask to put hidden bit back
725 movel d0,d4 |
726 andl d6,d0 | get fraction in d0
727 notl d6 | make d6 into mask for the exponent
728 andl d6,d4 | get exponent in d4
729 beq Ladddf$a$den | branch if a is denormalized
730 cmpl d6,d4 | check for INFINITY or NaN
731 beq Ladddf$nf |
732 orl d7,d0 | and put hidden bit back
733 Ladddf$1:
734 swap d4 | shift right exponent so that it starts
735 #ifndef __mcoldfire__
736 lsrw IMM (5),d4 | in bit 0 and not bit 20
737 #else
738 lsrl IMM (5),d4 | in bit 0 and not bit 20
739 #endif
740 | Now we have a's exponent in d4 and fraction in d0-d1 '
741 movel d2,d5 | save b to get exponent
742 andl d6,d5 | get exponent in d5
743 beq Ladddf$b$den | branch if b is denormalized
744 cmpl d6,d5 | check for INFINITY or NaN
745 beq Ladddf$nf
746 notl d6 | make d6 into mask for the fraction again
747 andl d6,d2 | and get fraction in d2
748 orl d7,d2 | and put hidden bit back
749 Ladddf$2:
750 swap d5 | shift right exponent so that it starts
751 #ifndef __mcoldfire__
752 lsrw IMM (5),d5 | in bit 0 and not bit 20
753 #else
754 lsrl IMM (5),d5 | in bit 0 and not bit 20
755 #endif
757 | Now we have b's exponent in d5 and fraction in d2-d3. '
759 | The situation now is as follows: the signs are combined in a0, the
760 | numbers are in d0-d1 (a) and d2-d3 (b), and the exponents in d4 (a)
761 | and d5 (b). To do the rounding correctly we need to keep all the
762 | bits until the end, so we need to use d0-d1-d2-d3 for the first number
763 | and d4-d5-d6-d7 for the second. To do this we store (temporarily) the
764 | exponents in a2-a3.
766 #ifndef __mcoldfire__
767 moveml a2-a3,sp@- | save the address registers
768 #else
769 movel a2,sp@-
770 movel a3,sp@-
771 movel a4,sp@-
772 #endif
774 movel d4,a2 | save the exponents
775 movel d5,a3 |
777 movel IMM (0),d7 | and move the numbers around
778 movel d7,d6 |
779 movel d3,d5 |
780 movel d2,d4 |
781 movel d7,d3 |
782 movel d7,d2 |
784 | Here we shift the numbers until the exponents are the same, and put
785 | the largest exponent in a2.
786 #ifndef __mcoldfire__
787 exg d4,a2 | get exponents back
788 exg d5,a3 |
789 cmpw d4,d5 | compare the exponents
790 #else
791 movel d4,a4 | get exponents back
792 movel a2,d4
793 movel a4,a2
794 movel d5,a4
795 movel a3,d5
796 movel a4,a3
797 cmpl d4,d5 | compare the exponents
798 #endif
799 beq Ladddf$3 | if equal don't shift '
800 bhi 9f | branch if second exponent is higher
802 | Here we have a's exponent larger than b's, so we have to shift b. We do
803 | this by using as counter d2:
804 1: movew d4,d2 | move largest exponent to d2
805 #ifndef __mcoldfire__
806 subw d5,d2 | and subtract second exponent
807 exg d4,a2 | get back the longs we saved
808 exg d5,a3 |
809 #else
810 subl d5,d2 | and subtract second exponent
811 movel d4,a4 | get back the longs we saved
812 movel a2,d4
813 movel a4,a2
814 movel d5,a4
815 movel a3,d5
816 movel a4,a3
817 #endif
818 | if difference is too large we don't shift (actually, we can just exit) '
819 #ifndef __mcoldfire__
820 cmpw IMM (DBL_MANT_DIG+2),d2
821 #else
822 cmpl IMM (DBL_MANT_DIG+2),d2
823 #endif
824 bge Ladddf$b$small
825 #ifndef __mcoldfire__
826 cmpw IMM (32),d2 | if difference >= 32, shift by longs
827 #else
828 cmpl IMM (32),d2 | if difference >= 32, shift by longs
829 #endif
830 bge 5f
832 #ifndef __mcoldfire__
833 cmpw IMM (16),d2 | if difference >= 16, shift by words
834 #else
835 cmpl IMM (16),d2 | if difference >= 16, shift by words
836 #endif
837 bge 6f
838 bra 3f | enter dbra loop
841 #ifndef __mcoldfire__
842 lsrl IMM (1),d4
843 roxrl IMM (1),d5
844 roxrl IMM (1),d6
845 roxrl IMM (1),d7
846 #else
847 lsrl IMM (1),d7
848 btst IMM (0),d6
849 beq 10f
850 bset IMM (31),d7
851 10: lsrl IMM (1),d6
852 btst IMM (0),d5
853 beq 11f
854 bset IMM (31),d6
855 11: lsrl IMM (1),d5
856 btst IMM (0),d4
857 beq 12f
858 bset IMM (31),d5
859 12: lsrl IMM (1),d4
860 #endif
862 #ifndef __mcoldfire__
863 dbra d2,4b
864 #else
865 subql IMM (1),d2
866 bpl 4b
867 #endif
868 movel IMM (0),d2
869 movel d2,d3
870 bra Ladddf$4
872 movel d6,d7
873 movel d5,d6
874 movel d4,d5
875 movel IMM (0),d4
876 #ifndef __mcoldfire__
877 subw IMM (32),d2
878 #else
879 subl IMM (32),d2
880 #endif
881 bra 2b
883 movew d6,d7
884 swap d7
885 movew d5,d6
886 swap d6
887 movew d4,d5
888 swap d5
889 movew IMM (0),d4
890 swap d4
891 #ifndef __mcoldfire__
892 subw IMM (16),d2
893 #else
894 subl IMM (16),d2
895 #endif
896 bra 3b
899 #ifndef __mcoldfire__
900 exg d4,d5
901 movew d4,d6
902 subw d5,d6 | keep d5 (largest exponent) in d4
903 exg d4,a2
904 exg d5,a3
905 #else
906 movel d5,d6
907 movel d4,d5
908 movel d6,d4
909 subl d5,d6
910 movel d4,a4
911 movel a2,d4
912 movel a4,a2
913 movel d5,a4
914 movel a3,d5
915 movel a4,a3
916 #endif
917 | if difference is too large we don't shift (actually, we can just exit) '
918 #ifndef __mcoldfire__
919 cmpw IMM (DBL_MANT_DIG+2),d6
920 #else
921 cmpl IMM (DBL_MANT_DIG+2),d6
922 #endif
923 bge Ladddf$a$small
924 #ifndef __mcoldfire__
925 cmpw IMM (32),d6 | if difference >= 32, shift by longs
926 #else
927 cmpl IMM (32),d6 | if difference >= 32, shift by longs
928 #endif
929 bge 5f
931 #ifndef __mcoldfire__
932 cmpw IMM (16),d6 | if difference >= 16, shift by words
933 #else
934 cmpl IMM (16),d6 | if difference >= 16, shift by words
935 #endif
936 bge 6f
937 bra 3f | enter dbra loop
940 #ifndef __mcoldfire__
941 lsrl IMM (1),d0
942 roxrl IMM (1),d1
943 roxrl IMM (1),d2
944 roxrl IMM (1),d3
945 #else
946 lsrl IMM (1),d3
947 btst IMM (0),d2
948 beq 10f
949 bset IMM (31),d3
950 10: lsrl IMM (1),d2
951 btst IMM (0),d1
952 beq 11f
953 bset IMM (31),d2
954 11: lsrl IMM (1),d1
955 btst IMM (0),d0
956 beq 12f
957 bset IMM (31),d1
958 12: lsrl IMM (1),d0
959 #endif
961 #ifndef __mcoldfire__
962 dbra d6,4b
963 #else
964 subql IMM (1),d6
965 bpl 4b
966 #endif
967 movel IMM (0),d7
968 movel d7,d6
969 bra Ladddf$4
971 movel d2,d3
972 movel d1,d2
973 movel d0,d1
974 movel IMM (0),d0
975 #ifndef __mcoldfire__
976 subw IMM (32),d6
977 #else
978 subl IMM (32),d6
979 #endif
980 bra 2b
982 movew d2,d3
983 swap d3
984 movew d1,d2
985 swap d2
986 movew d0,d1
987 swap d1
988 movew IMM (0),d0
989 swap d0
990 #ifndef __mcoldfire__
991 subw IMM (16),d6
992 #else
993 subl IMM (16),d6
994 #endif
995 bra 3b
996 Ladddf$3:
997 #ifndef __mcoldfire__
998 exg d4,a2
999 exg d5,a3
1000 #else
1001 movel d4,a4
1002 movel a2,d4
1003 movel a4,a2
1004 movel d5,a4
1005 movel a3,d5
1006 movel a4,a3
1007 #endif
1008 Ladddf$4:
1009 | Now we have the numbers in d0--d3 and d4--d7, the exponent in a2, and
1010 | the signs in a4.
1012 | Here we have to decide whether to add or subtract the numbers:
1013 #ifndef __mcoldfire__
1014 exg d7,a0 | get the signs
1015 exg d6,a3 | a3 is free to be used
1016 #else
1017 movel d7,a4
1018 movel a0,d7
1019 movel a4,a0
1020 movel d6,a4
1021 movel a3,d6
1022 movel a4,a3
1023 #endif
1024 movel d7,d6 |
1025 movew IMM (0),d7 | get a's sign in d7 '
1026 swap d6 |
1027 movew IMM (0),d6 | and b's sign in d6 '
1028 eorl d7,d6 | compare the signs
1029 bmi Lsubdf$0 | if the signs are different we have
1030 | to subtract
1031 #ifndef __mcoldfire__
1032 exg d7,a0 | else we add the numbers
1033 exg d6,a3 |
1034 #else
1035 movel d7,a4
1036 movel a0,d7
1037 movel a4,a0
1038 movel d6,a4
1039 movel a3,d6
1040 movel a4,a3
1041 #endif
1042 addl d7,d3 |
1043 addxl d6,d2 |
1044 addxl d5,d1 |
1045 addxl d4,d0 |
1047 movel a2,d4 | return exponent to d4
1048 movel a0,d7 |
1049 andl IMM (0x80000000),d7 | d7 now has the sign
1051 #ifndef __mcoldfire__
1052 moveml sp@+,a2-a3
1053 #else
1054 movel sp@+,a4
1055 movel sp@+,a3
1056 movel sp@+,a2
1057 #endif
1059 | Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
1060 | the case of denormalized numbers in the rounding routine itself).
1061 | As in the addition (not in the subtraction!) we could have set
1062 | one more bit we check this:
1063 btst IMM (DBL_MANT_DIG+1),d0
1064 beq 1f
1065 #ifndef __mcoldfire__
1066 lsrl IMM (1),d0
1067 roxrl IMM (1),d1
1068 roxrl IMM (1),d2
1069 roxrl IMM (1),d3
1070 addw IMM (1),d4
1071 #else
1072 lsrl IMM (1),d3
1073 btst IMM (0),d2
1074 beq 10f
1075 bset IMM (31),d3
1076 10: lsrl IMM (1),d2
1077 btst IMM (0),d1
1078 beq 11f
1079 bset IMM (31),d2
1080 11: lsrl IMM (1),d1
1081 btst IMM (0),d0
1082 beq 12f
1083 bset IMM (31),d1
1084 12: lsrl IMM (1),d0
1085 addl IMM (1),d4
1086 #endif
1088 lea pc@(Ladddf$5),a0 | to return from rounding routine
1089 PICLEA SYM (_fpCCR),a1 | check the rounding mode
1090 #ifdef __mcoldfire__
1091 clrl d6
1092 #endif
1093 movew a1@(6),d6 | rounding mode in d6
1094 beq Lround$to$nearest
1095 #ifndef __mcoldfire__
1096 cmpw IMM (ROUND_TO_PLUS),d6
1097 #else
1098 cmpl IMM (ROUND_TO_PLUS),d6
1099 #endif
1100 bhi Lround$to$minus
1101 blt Lround$to$zero
1102 bra Lround$to$plus
1103 Ladddf$5:
1104 | Put back the exponent and check for overflow
1105 #ifndef __mcoldfire__
1106 cmpw IMM (0x7ff),d4 | is the exponent big?
1107 #else
1108 cmpl IMM (0x7ff),d4 | is the exponent big?
1109 #endif
1110 bge 1f
1111 bclr IMM (DBL_MANT_DIG-1),d0
1112 #ifndef __mcoldfire__
1113 lslw IMM (4),d4 | put exponent back into position
1114 #else
1115 lsll IMM (4),d4 | put exponent back into position
1116 #endif
1117 swap d0 |
1118 #ifndef __mcoldfire__
1119 orw d4,d0 |
1120 #else
1121 orl d4,d0 |
1122 #endif
1123 swap d0 |
1124 bra Ladddf$ret
1126 movew IMM (ADD),d5
1127 bra Ld$overflow
1129 Lsubdf$0:
1130 | Here we do the subtraction.
1131 #ifndef __mcoldfire__
1132 exg d7,a0 | put sign back in a0
1133 exg d6,a3 |
1134 #else
1135 movel d7,a4
1136 movel a0,d7
1137 movel a4,a0
1138 movel d6,a4
1139 movel a3,d6
1140 movel a4,a3
1141 #endif
1142 subl d7,d3 |
1143 subxl d6,d2 |
1144 subxl d5,d1 |
1145 subxl d4,d0 |
1146 beq Ladddf$ret$1 | if zero just exit
1147 bpl 1f | if positive skip the following
1148 movel a0,d7 |
1149 bchg IMM (31),d7 | change sign bit in d7
1150 movel d7,a0 |
1151 negl d3 |
1152 negxl d2 |
1153 negxl d1 | and negate result
1154 negxl d0 |
1156 movel a2,d4 | return exponent to d4
1157 movel a0,d7
1158 andl IMM (0x80000000),d7 | isolate sign bit
1159 #ifndef __mcoldfire__
1160 moveml sp@+,a2-a3 |
1161 #else
1162 movel sp@+,a4
1163 movel sp@+,a3
1164 movel sp@+,a2
1165 #endif
1167 | Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
1168 | the case of denormalized numbers in the rounding routine itself).
1169 | As in the addition (not in the subtraction!) we could have set
1170 | one more bit we check this:
1171 btst IMM (DBL_MANT_DIG+1),d0
1172 beq 1f
1173 #ifndef __mcoldfire__
1174 lsrl IMM (1),d0
1175 roxrl IMM (1),d1
1176 roxrl IMM (1),d2
1177 roxrl IMM (1),d3
1178 addw IMM (1),d4
1179 #else
1180 lsrl IMM (1),d3
1181 btst IMM (0),d2
1182 beq 10f
1183 bset IMM (31),d3
1184 10: lsrl IMM (1),d2
1185 btst IMM (0),d1
1186 beq 11f
1187 bset IMM (31),d2
1188 11: lsrl IMM (1),d1
1189 btst IMM (0),d0
1190 beq 12f
1191 bset IMM (31),d1
1192 12: lsrl IMM (1),d0
1193 addl IMM (1),d4
1194 #endif
1196 lea pc@(Lsubdf$1),a0 | to return from rounding routine
1197 PICLEA SYM (_fpCCR),a1 | check the rounding mode
1198 #ifdef __mcoldfire__
1199 clrl d6
1200 #endif
1201 movew a1@(6),d6 | rounding mode in d6
1202 beq Lround$to$nearest
1203 #ifndef __mcoldfire__
1204 cmpw IMM (ROUND_TO_PLUS),d6
1205 #else
1206 cmpl IMM (ROUND_TO_PLUS),d6
1207 #endif
1208 bhi Lround$to$minus
1209 blt Lround$to$zero
1210 bra Lround$to$plus
1211 Lsubdf$1:
1212 | Put back the exponent and sign (we don't have overflow). '
1213 bclr IMM (DBL_MANT_DIG-1),d0
1214 #ifndef __mcoldfire__
1215 lslw IMM (4),d4 | put exponent back into position
1216 #else
1217 lsll IMM (4),d4 | put exponent back into position
1218 #endif
1219 swap d0 |
1220 #ifndef __mcoldfire__
1221 orw d4,d0 |
1222 #else
1223 orl d4,d0 |
1224 #endif
1225 swap d0 |
1226 bra Ladddf$ret
1228 | If one of the numbers was too small (difference of exponents >=
1229 | DBL_MANT_DIG+1) we return the other (and now we don't have to '
1230 | check for finiteness or zero).
1231 Ladddf$a$small:
1232 #ifndef __mcoldfire__
1233 moveml sp@+,a2-a3
1234 #else
1235 movel sp@+,a4
1236 movel sp@+,a3
1237 movel sp@+,a2
1238 #endif
1239 movel a6@(16),d0
1240 movel a6@(20),d1
1241 PICLEA SYM (_fpCCR),a0
1242 movew IMM (0),a0@
1243 #ifndef __mcoldfire__
1244 moveml sp@+,d2-d7 | restore data registers
1245 #else
1246 moveml sp@,d2-d7
1247 | XXX if frame pointer is ever removed, stack pointer must
1248 | be adjusted here.
1249 #endif
1250 unlk a6 | and return
1253 Ladddf$b$small:
1254 #ifndef __mcoldfire__
1255 moveml sp@+,a2-a3
1256 #else
1257 movel sp@+,a4
1258 movel sp@+,a3
1259 movel sp@+,a2
1260 #endif
1261 movel a6@(8),d0
1262 movel a6@(12),d1
1263 PICLEA SYM (_fpCCR),a0
1264 movew IMM (0),a0@
1265 #ifndef __mcoldfire__
1266 moveml sp@+,d2-d7 | restore data registers
1267 #else
1268 moveml sp@,d2-d7
1269 | XXX if frame pointer is ever removed, stack pointer must
1270 | be adjusted here.
1271 #endif
1272 unlk a6 | and return
1275 Ladddf$a$den:
1276 movel d7,d4 | d7 contains 0x00200000
1277 bra Ladddf$1
1279 Ladddf$b$den:
1280 movel d7,d5 | d7 contains 0x00200000
1281 notl d6
1282 bra Ladddf$2
1284 Ladddf$b:
1285 | Return b (if a is zero)
1286 movel d2,d0
1287 movel d3,d1
1288 bra 1f
1289 Ladddf$a:
1290 movel a6@(8),d0
1291 movel a6@(12),d1
1293 movew IMM (ADD),d5
1294 | Check for NaN and +/-INFINITY.
1295 movel d0,d7 |
1296 andl IMM (0x80000000),d7 |
1297 bclr IMM (31),d0 |
1298 cmpl IMM (0x7ff00000),d0 |
1299 bge 2f |
1300 movel d0,d0 | check for zero, since we don't '
1301 bne Ladddf$ret | want to return -0 by mistake
1302 bclr IMM (31),d7 |
1303 bra Ladddf$ret |
1305 andl IMM (0x000fffff),d0 | check for NaN (nonzero fraction)
1306 orl d1,d0 |
1307 bne Ld$inop |
1308 bra Ld$infty |
1310 Ladddf$ret$1:
1311 #ifndef __mcoldfire__
1312 moveml sp@+,a2-a3 | restore regs and exit
1313 #else
1314 movel sp@+,a4
1315 movel sp@+,a3
1316 movel sp@+,a2
1317 #endif
1319 Ladddf$ret:
1320 | Normal exit.
1321 PICLEA SYM (_fpCCR),a0
1322 movew IMM (0),a0@
1323 orl d7,d0 | put sign bit back
1324 #ifndef __mcoldfire__
1325 moveml sp@+,d2-d7
1326 #else
1327 moveml sp@,d2-d7
1328 | XXX if frame pointer is ever removed, stack pointer must
1329 | be adjusted here.
1330 #endif
1331 unlk a6
1334 Ladddf$ret$den:
1335 | Return a denormalized number.
1336 #ifndef __mcoldfire__
1337 lsrl IMM (1),d0 | shift right once more
1338 roxrl IMM (1),d1 |
1339 #else
1340 lsrl IMM (1),d1
1341 btst IMM (0),d0
1342 beq 10f
1343 bset IMM (31),d1
1344 10: lsrl IMM (1),d0
1345 #endif
1346 bra Ladddf$ret
1348 Ladddf$nf:
1349 movew IMM (ADD),d5
1350 | This could be faster but it is not worth the effort, since it is not
1351 | executed very often. We sacrifice speed for clarity here.
1352 movel a6@(8),d0 | get the numbers back (remember that we
1353 movel a6@(12),d1 | did some processing already)
1354 movel a6@(16),d2 |
1355 movel a6@(20),d3 |
1356 movel IMM (0x7ff00000),d4 | useful constant (INFINITY)
1357 movel d0,d7 | save sign bits
1358 movel d2,d6 |
1359 bclr IMM (31),d0 | clear sign bits
1360 bclr IMM (31),d2 |
1361 | We know that one of them is either NaN of +/-INFINITY
1362 | Check for NaN (if either one is NaN return NaN)
1363 cmpl d4,d0 | check first a (d0)
1364 bhi Ld$inop | if d0 > 0x7ff00000 or equal and
1365 bne 2f
1366 tstl d1 | d1 > 0, a is NaN
1367 bne Ld$inop |
1368 2: cmpl d4,d2 | check now b (d1)
1369 bhi Ld$inop |
1370 bne 3f
1371 tstl d3 |
1372 bne Ld$inop |
1374 | Now comes the check for +/-INFINITY. We know that both are (maybe not
1375 | finite) numbers, but we have to check if both are infinite whether we
1376 | are adding or subtracting them.
1377 eorl d7,d6 | to check sign bits
1378 bmi 1f
1379 andl IMM (0x80000000),d7 | get (common) sign bit
1380 bra Ld$infty
1382 | We know one (or both) are infinite, so we test for equality between the
1383 | two numbers (if they are equal they have to be infinite both, so we
1384 | return NaN).
1385 cmpl d2,d0 | are both infinite?
1386 bne 1f | if d0 <> d2 they are not equal
1387 cmpl d3,d1 | if d0 == d2 test d3 and d1
1388 beq Ld$inop | if equal return NaN
1390 andl IMM (0x80000000),d7 | get a's sign bit '
1391 cmpl d4,d0 | test now for infinity
1392 beq Ld$infty | if a is INFINITY return with this sign
1393 bchg IMM (31),d7 | else we know b is INFINITY and has
1394 bra Ld$infty | the opposite sign
1396 |=============================================================================
1397 | __muldf3
1398 |=============================================================================
1400 | double __muldf3(double, double);
1401 SYM (__muldf3):
1402 #ifndef __mcoldfire__
1403 link a6,IMM (0)
1404 moveml d2-d7,sp@-
1405 #else
1406 link a6,IMM (-24)
1407 moveml d2-d7,sp@
1408 #endif
1409 movel a6@(8),d0 | get a into d0-d1
1410 movel a6@(12),d1 |
1411 movel a6@(16),d2 | and b into d2-d3
1412 movel a6@(20),d3 |
1413 movel d0,d7 | d7 will hold the sign of the product
1414 eorl d2,d7 |
1415 andl IMM (0x80000000),d7 |
1416 movel d7,a0 | save sign bit into a0
1417 movel IMM (0x7ff00000),d7 | useful constant (+INFINITY)
1418 movel d7,d6 | another (mask for fraction)
1419 notl d6 |
1420 bclr IMM (31),d0 | get rid of a's sign bit '
1421 movel d0,d4 |
1422 orl d1,d4 |
1423 beq Lmuldf$a$0 | branch if a is zero
1424 movel d0,d4 |
1425 bclr IMM (31),d2 | get rid of b's sign bit '
1426 movel d2,d5 |
1427 orl d3,d5 |
1428 beq Lmuldf$b$0 | branch if b is zero
1429 movel d2,d5 |
1430 cmpl d7,d0 | is a big?
1431 bhi Lmuldf$inop | if a is NaN return NaN
1432 beq Lmuldf$a$nf | we still have to check d1 and b ...
1433 cmpl d7,d2 | now compare b with INFINITY
1434 bhi Lmuldf$inop | is b NaN?
1435 beq Lmuldf$b$nf | we still have to check d3 ...
1436 | Here we have both numbers finite and nonzero (and with no sign bit).
1437 | Now we get the exponents into d4 and d5.
1438 andl d7,d4 | isolate exponent in d4
1439 beq Lmuldf$a$den | if exponent zero, have denormalized
1440 andl d6,d0 | isolate fraction
1441 orl IMM (0x00100000),d0 | and put hidden bit back
1442 swap d4 | I like exponents in the first byte
1443 #ifndef __mcoldfire__
1444 lsrw IMM (4),d4 |
1445 #else
1446 lsrl IMM (4),d4 |
1447 #endif
1448 Lmuldf$1:
1449 andl d7,d5 |
1450 beq Lmuldf$b$den |
1451 andl d6,d2 |
1452 orl IMM (0x00100000),d2 | and put hidden bit back
1453 swap d5 |
1454 #ifndef __mcoldfire__
1455 lsrw IMM (4),d5 |
1456 #else
1457 lsrl IMM (4),d5 |
1458 #endif
1459 Lmuldf$2: |
1460 #ifndef __mcoldfire__
1461 addw d5,d4 | add exponents
1462 subw IMM (D_BIAS+1),d4 | and subtract bias (plus one)
1463 #else
1464 addl d5,d4 | add exponents
1465 subl IMM (D_BIAS+1),d4 | and subtract bias (plus one)
1466 #endif
1468 | We are now ready to do the multiplication. The situation is as follows:
1469 | both a and b have bit 52 ( bit 20 of d0 and d2) set (even if they were
1470 | denormalized to start with!), which means that in the product bit 104
1471 | (which will correspond to bit 8 of the fourth long) is set.
1473 | Here we have to do the product.
1474 | To do it we have to juggle the registers back and forth, as there are not
1475 | enough to keep everything in them. So we use the address registers to keep
1476 | some intermediate data.
1478 #ifndef __mcoldfire__
1479 moveml a2-a3,sp@- | save a2 and a3 for temporary use
1480 #else
1481 movel a2,sp@-
1482 movel a3,sp@-
1483 movel a4,sp@-
1484 #endif
1485 movel IMM (0),a2 | a2 is a null register
1486 movel d4,a3 | and a3 will preserve the exponent
1488 | First, shift d2-d3 so bit 20 becomes bit 31:
1489 #ifndef __mcoldfire__
1490 rorl IMM (5),d2 | rotate d2 5 places right
1491 swap d2 | and swap it
1492 rorl IMM (5),d3 | do the same thing with d3
1493 swap d3 |
1494 movew d3,d6 | get the rightmost 11 bits of d3
1495 andw IMM (0x07ff),d6 |
1496 orw d6,d2 | and put them into d2
1497 andw IMM (0xf800),d3 | clear those bits in d3
1498 #else
1499 moveq IMM (11),d7 | left shift d2 11 bits
1500 lsll d7,d2
1501 movel d3,d6 | get a copy of d3
1502 lsll d7,d3 | left shift d3 11 bits
1503 andl IMM (0xffe00000),d6 | get the top 11 bits of d3
1504 moveq IMM (21),d7 | right shift them 21 bits
1505 lsrl d7,d6
1506 orl d6,d2 | stick them at the end of d2
1507 #endif
1509 movel d2,d6 | move b into d6-d7
1510 movel d3,d7 | move a into d4-d5
1511 movel d0,d4 | and clear d0-d1-d2-d3 (to put result)
1512 movel d1,d5 |
1513 movel IMM (0),d3 |
1514 movel d3,d2 |
1515 movel d3,d1 |
1516 movel d3,d0 |
1518 | We use a1 as counter:
1519 movel IMM (DBL_MANT_DIG-1),a1
1520 #ifndef __mcoldfire__
1521 exg d7,a1
1522 #else
1523 movel d7,a4
1524 movel a1,d7
1525 movel a4,a1
1526 #endif
1529 #ifndef __mcoldfire__
1530 exg d7,a1 | put counter back in a1
1531 #else
1532 movel d7,a4
1533 movel a1,d7
1534 movel a4,a1
1535 #endif
1536 addl d3,d3 | shift sum once left
1537 addxl d2,d2 |
1538 addxl d1,d1 |
1539 addxl d0,d0 |
1540 addl d7,d7 |
1541 addxl d6,d6 |
1542 bcc 2f | if bit clear skip the following
1543 #ifndef __mcoldfire__
1544 exg d7,a2 |
1545 #else
1546 movel d7,a4
1547 movel a2,d7
1548 movel a4,a2
1549 #endif
1550 addl d5,d3 | else add a to the sum
1551 addxl d4,d2 |
1552 addxl d7,d1 |
1553 addxl d7,d0 |
1554 #ifndef __mcoldfire__
1555 exg d7,a2 |
1556 #else
1557 movel d7,a4
1558 movel a2,d7
1559 movel a4,a2
1560 #endif
1562 #ifndef __mcoldfire__
1563 exg d7,a1 | put counter in d7
1564 dbf d7,1b | decrement and branch
1565 #else
1566 movel d7,a4
1567 movel a1,d7
1568 movel a4,a1
1569 subql IMM (1),d7
1570 bpl 1b
1571 #endif
1573 movel a3,d4 | restore exponent
1574 #ifndef __mcoldfire__
1575 moveml sp@+,a2-a3
1576 #else
1577 movel sp@+,a4
1578 movel sp@+,a3
1579 movel sp@+,a2
1580 #endif
1582 | Now we have the product in d0-d1-d2-d3, with bit 8 of d0 set. The
1583 | first thing to do now is to normalize it so bit 8 becomes bit
1584 | DBL_MANT_DIG-32 (to do the rounding); later we will shift right.
1585 swap d0
1586 swap d1
1587 movew d1,d0
1588 swap d2
1589 movew d2,d1
1590 swap d3
1591 movew d3,d2
1592 movew IMM (0),d3
1593 #ifndef __mcoldfire__
1594 lsrl IMM (1),d0
1595 roxrl IMM (1),d1
1596 roxrl IMM (1),d2
1597 roxrl IMM (1),d3
1598 lsrl IMM (1),d0
1599 roxrl IMM (1),d1
1600 roxrl IMM (1),d2
1601 roxrl IMM (1),d3
1602 lsrl IMM (1),d0
1603 roxrl IMM (1),d1
1604 roxrl IMM (1),d2
1605 roxrl IMM (1),d3
1606 #else
1607 moveq IMM (29),d6
1608 lsrl IMM (3),d3
1609 movel d2,d7
1610 lsll d6,d7
1611 orl d7,d3
1612 lsrl IMM (3),d2
1613 movel d1,d7
1614 lsll d6,d7
1615 orl d7,d2
1616 lsrl IMM (3),d1
1617 movel d0,d7
1618 lsll d6,d7
1619 orl d7,d1
1620 lsrl IMM (3),d0
1621 #endif
1623 | Now round, check for over- and underflow, and exit.
1624 movel a0,d7 | get sign bit back into d7
1625 movew IMM (MULTIPLY),d5
1627 btst IMM (DBL_MANT_DIG+1-32),d0
1628 beq Lround$exit
1629 #ifndef __mcoldfire__
1630 lsrl IMM (1),d0
1631 roxrl IMM (1),d1
1632 addw IMM (1),d4
1633 #else
1634 lsrl IMM (1),d1
1635 btst IMM (0),d0
1636 beq 10f
1637 bset IMM (31),d1
1638 10: lsrl IMM (1),d0
1639 addl IMM (1),d4
1640 #endif
1641 bra Lround$exit
1643 Lmuldf$inop:
1644 movew IMM (MULTIPLY),d5
1645 bra Ld$inop
1647 Lmuldf$b$nf:
1648 movew IMM (MULTIPLY),d5
1649 movel a0,d7 | get sign bit back into d7
1650 tstl d3 | we know d2 == 0x7ff00000, so check d3
1651 bne Ld$inop | if d3 <> 0 b is NaN
1652 bra Ld$overflow | else we have overflow (since a is finite)
1654 Lmuldf$a$nf:
1655 movew IMM (MULTIPLY),d5
1656 movel a0,d7 | get sign bit back into d7
1657 tstl d1 | we know d0 == 0x7ff00000, so check d1
1658 bne Ld$inop | if d1 <> 0 a is NaN
1659 bra Ld$overflow | else signal overflow
1661 | If either number is zero return zero, unless the other is +/-INFINITY or
1662 | NaN, in which case we return NaN.
1663 Lmuldf$b$0:
1664 movew IMM (MULTIPLY),d5
1665 #ifndef __mcoldfire__
1666 exg d2,d0 | put b (==0) into d0-d1
1667 exg d3,d1 | and a (with sign bit cleared) into d2-d3
1668 #else
1669 movel d2,d7
1670 movel d0,d2
1671 movel d7,d0
1672 movel d3,d7
1673 movel d1,d3
1674 movel d7,d1
1675 #endif
1676 bra 1f
1677 Lmuldf$a$0:
1678 movel a6@(16),d2 | put b into d2-d3 again
1679 movel a6@(20),d3 |
1680 bclr IMM (31),d2 | clear sign bit
1681 1: cmpl IMM (0x7ff00000),d2 | check for non-finiteness
1682 bge Ld$inop | in case NaN or +/-INFINITY return NaN
1683 PICLEA SYM (_fpCCR),a0
1684 movew IMM (0),a0@
1685 #ifndef __mcoldfire__
1686 moveml sp@+,d2-d7
1687 #else
1688 moveml sp@,d2-d7
1689 | XXX if frame pointer is ever removed, stack pointer must
1690 | be adjusted here.
1691 #endif
1692 unlk a6
1695 | If a number is denormalized we put an exponent of 1 but do not put the
1696 | hidden bit back into the fraction; instead we shift left until bit 21
1697 | (the hidden bit) is set, adjusting the exponent accordingly. We do this
1698 | to ensure that the product of the fractions is close to 1.
1699 Lmuldf$a$den:
1700 movel IMM (1),d4
1701 andl d6,d0
1702 1: addl d1,d1 | shift a left until bit 20 is set
1703 addxl d0,d0 |
1704 #ifndef __mcoldfire__
1705 subw IMM (1),d4 | and adjust exponent
1706 #else
1707 subl IMM (1),d4 | and adjust exponent
1708 #endif
1709 btst IMM (20),d0 |
1710 bne Lmuldf$1 |
1711 bra 1b
1713 Lmuldf$b$den:
1714 movel IMM (1),d5
1715 andl d6,d2
1716 1: addl d3,d3 | shift b left until bit 20 is set
1717 addxl d2,d2 |
1718 #ifndef __mcoldfire__
1719 subw IMM (1),d5 | and adjust exponent
1720 #else
1721 subql IMM (1),d5 | and adjust exponent
1722 #endif
1723 btst IMM (20),d2 |
1724 bne Lmuldf$2 |
1725 bra 1b
1728 |=============================================================================
1729 | __divdf3
1730 |=============================================================================
1732 | double __divdf3(double, double);
1733 SYM (__divdf3):
1734 #ifndef __mcoldfire__
1735 link a6,IMM (0)
1736 moveml d2-d7,sp@-
1737 #else
1738 link a6,IMM (-24)
1739 moveml d2-d7,sp@
1740 #endif
1741 movel a6@(8),d0 | get a into d0-d1
1742 movel a6@(12),d1 |
1743 movel a6@(16),d2 | and b into d2-d3
1744 movel a6@(20),d3 |
1745 movel d0,d7 | d7 will hold the sign of the result
1746 eorl d2,d7 |
1747 andl IMM (0x80000000),d7
1748 movel d7,a0 | save sign into a0
1749 movel IMM (0x7ff00000),d7 | useful constant (+INFINITY)
1750 movel d7,d6 | another (mask for fraction)
1751 notl d6 |
1752 bclr IMM (31),d0 | get rid of a's sign bit '
1753 movel d0,d4 |
1754 orl d1,d4 |
1755 beq Ldivdf$a$0 | branch if a is zero
1756 movel d0,d4 |
1757 bclr IMM (31),d2 | get rid of b's sign bit '
1758 movel d2,d5 |
1759 orl d3,d5 |
1760 beq Ldivdf$b$0 | branch if b is zero
1761 movel d2,d5
1762 cmpl d7,d0 | is a big?
1763 bhi Ldivdf$inop | if a is NaN return NaN
1764 beq Ldivdf$a$nf | if d0 == 0x7ff00000 we check d1
1765 cmpl d7,d2 | now compare b with INFINITY
1766 bhi Ldivdf$inop | if b is NaN return NaN
1767 beq Ldivdf$b$nf | if d2 == 0x7ff00000 we check d3
1768 | Here we have both numbers finite and nonzero (and with no sign bit).
1769 | Now we get the exponents into d4 and d5 and normalize the numbers to
1770 | ensure that the ratio of the fractions is around 1. We do this by
1771 | making sure that both numbers have bit #DBL_MANT_DIG-32-1 (hidden bit)
1772 | set, even if they were denormalized to start with.
1773 | Thus, the result will satisfy: 2 > result > 1/2.
1774 andl d7,d4 | and isolate exponent in d4
1775 beq Ldivdf$a$den | if exponent is zero we have a denormalized
1776 andl d6,d0 | and isolate fraction
1777 orl IMM (0x00100000),d0 | and put hidden bit back
1778 swap d4 | I like exponents in the first byte
1779 #ifndef __mcoldfire__
1780 lsrw IMM (4),d4 |
1781 #else
1782 lsrl IMM (4),d4 |
1783 #endif
1784 Ldivdf$1: |
1785 andl d7,d5 |
1786 beq Ldivdf$b$den |
1787 andl d6,d2 |
1788 orl IMM (0x00100000),d2
1789 swap d5 |
1790 #ifndef __mcoldfire__
1791 lsrw IMM (4),d5 |
1792 #else
1793 lsrl IMM (4),d5 |
1794 #endif
1795 Ldivdf$2: |
1796 #ifndef __mcoldfire__
1797 subw d5,d4 | subtract exponents
1798 addw IMM (D_BIAS),d4 | and add bias
1799 #else
1800 subl d5,d4 | subtract exponents
1801 addl IMM (D_BIAS),d4 | and add bias
1802 #endif
1804 | We are now ready to do the division. We have prepared things in such a way
1805 | that the ratio of the fractions will be less than 2 but greater than 1/2.
1806 | At this point the registers in use are:
1807 | d0-d1 hold a (first operand, bit DBL_MANT_DIG-32=0, bit
1808 | DBL_MANT_DIG-1-32=1)
1809 | d2-d3 hold b (second operand, bit DBL_MANT_DIG-32=1)
1810 | d4 holds the difference of the exponents, corrected by the bias
1811 | a0 holds the sign of the ratio
1813 | To do the rounding correctly we need to keep information about the
1814 | nonsignificant bits. One way to do this would be to do the division
1815 | using four registers; another is to use two registers (as originally
1816 | I did), but use a sticky bit to preserve information about the
1817 | fractional part. Note that we can keep that info in a1, which is not
1818 | used.
1819 movel IMM (0),d6 | d6-d7 will hold the result
1820 movel d6,d7 |
1821 movel IMM (0),a1 | and a1 will hold the sticky bit
1823 movel IMM (DBL_MANT_DIG-32+1),d5
1825 1: cmpl d0,d2 | is a < b?
1826 bhi 3f | if b > a skip the following
1827 beq 4f | if d0==d2 check d1 and d3
1828 2: subl d3,d1 |
1829 subxl d2,d0 | a <-- a - b
1830 bset d5,d6 | set the corresponding bit in d6
1831 3: addl d1,d1 | shift a by 1
1832 addxl d0,d0 |
1833 #ifndef __mcoldfire__
1834 dbra d5,1b | and branch back
1835 #else
1836 subql IMM (1), d5
1837 bpl 1b
1838 #endif
1839 bra 5f
1840 4: cmpl d1,d3 | here d0==d2, so check d1 and d3
1841 bhi 3b | if d1 > d2 skip the subtraction
1842 bra 2b | else go do it
1844 | Here we have to start setting the bits in the second long.
1845 movel IMM (31),d5 | again d5 is counter
1847 1: cmpl d0,d2 | is a < b?
1848 bhi 3f | if b > a skip the following
1849 beq 4f | if d0==d2 check d1 and d3
1850 2: subl d3,d1 |
1851 subxl d2,d0 | a <-- a - b
1852 bset d5,d7 | set the corresponding bit in d7
1853 3: addl d1,d1 | shift a by 1
1854 addxl d0,d0 |
1855 #ifndef __mcoldfire__
1856 dbra d5,1b | and branch back
1857 #else
1858 subql IMM (1), d5
1859 bpl 1b
1860 #endif
1861 bra 5f
1862 4: cmpl d1,d3 | here d0==d2, so check d1 and d3
1863 bhi 3b | if d1 > d2 skip the subtraction
1864 bra 2b | else go do it
1866 | Now go ahead checking until we hit a one, which we store in d2.
1867 movel IMM (DBL_MANT_DIG),d5
1868 1: cmpl d2,d0 | is a < b?
1869 bhi 4f | if b < a, exit
1870 beq 3f | if d0==d2 check d1 and d3
1871 2: addl d1,d1 | shift a by 1
1872 addxl d0,d0 |
1873 #ifndef __mcoldfire__
1874 dbra d5,1b | and branch back
1875 #else
1876 subql IMM (1), d5
1877 bpl 1b
1878 #endif
1879 movel IMM (0),d2 | here no sticky bit was found
1880 movel d2,d3
1881 bra 5f
1882 3: cmpl d1,d3 | here d0==d2, so check d1 and d3
1883 bhi 2b | if d1 > d2 go back
1885 | Here put the sticky bit in d2-d3 (in the position which actually corresponds
1886 | to it; if you don't do this the algorithm loses in some cases). '
1887 movel IMM (0),d2
1888 movel d2,d3
1889 #ifndef __mcoldfire__
1890 subw IMM (DBL_MANT_DIG),d5
1891 addw IMM (63),d5
1892 cmpw IMM (31),d5
1893 #else
1894 subl IMM (DBL_MANT_DIG),d5
1895 addl IMM (63),d5
1896 cmpl IMM (31),d5
1897 #endif
1898 bhi 2f
1899 1: bset d5,d3
1900 bra 5f
1901 #ifndef __mcoldfire__
1902 subw IMM (32),d5
1903 #else
1904 subl IMM (32),d5
1905 #endif
1906 2: bset d5,d2
1908 | Finally we are finished! Move the longs in the address registers to
1909 | their final destination:
1910 movel d6,d0
1911 movel d7,d1
1912 movel IMM (0),d3
1914 | Here we have finished the division, with the result in d0-d1-d2-d3, with
1915 | 2^21 <= d6 < 2^23. Thus bit 23 is not set, but bit 22 could be set.
1916 | If it is not, then definitely bit 21 is set. Normalize so bit 22 is
1917 | not set:
1918 btst IMM (DBL_MANT_DIG-32+1),d0
1919 beq 1f
1920 #ifndef __mcoldfire__
1921 lsrl IMM (1),d0
1922 roxrl IMM (1),d1
1923 roxrl IMM (1),d2
1924 roxrl IMM (1),d3
1925 addw IMM (1),d4
1926 #else
1927 lsrl IMM (1),d3
1928 btst IMM (0),d2
1929 beq 10f
1930 bset IMM (31),d3
1931 10: lsrl IMM (1),d2
1932 btst IMM (0),d1
1933 beq 11f
1934 bset IMM (31),d2
1935 11: lsrl IMM (1),d1
1936 btst IMM (0),d0
1937 beq 12f
1938 bset IMM (31),d1
1939 12: lsrl IMM (1),d0
1940 addl IMM (1),d4
1941 #endif
1943 | Now round, check for over- and underflow, and exit.
1944 movel a0,d7 | restore sign bit to d7
1945 movew IMM (DIVIDE),d5
1946 bra Lround$exit
1948 Ldivdf$inop:
1949 movew IMM (DIVIDE),d5
1950 bra Ld$inop
1952 Ldivdf$a$0:
1953 | If a is zero check to see whether b is zero also. In that case return
1954 | NaN; then check if b is NaN, and return NaN also in that case. Else
1955 | return zero.
1956 movew IMM (DIVIDE),d5
1957 bclr IMM (31),d2 |
1958 movel d2,d4 |
1959 orl d3,d4 |
1960 beq Ld$inop | if b is also zero return NaN
1961 cmpl IMM (0x7ff00000),d2 | check for NaN
1962 bhi Ld$inop |
1963 blt 1f |
1964 tstl d3 |
1965 bne Ld$inop |
1966 1: movel IMM (0),d0 | else return zero
1967 movel d0,d1 |
1968 PICLEA SYM (_fpCCR),a0 | clear exception flags
1969 movew IMM (0),a0@ |
1970 #ifndef __mcoldfire__
1971 moveml sp@+,d2-d7 |
1972 #else
1973 moveml sp@,d2-d7 |
1974 | XXX if frame pointer is ever removed, stack pointer must
1975 | be adjusted here.
1976 #endif
1977 unlk a6 |
1978 rts |
1980 Ldivdf$b$0:
1981 movew IMM (DIVIDE),d5
1982 | If we got here a is not zero. Check if a is NaN; in that case return NaN,
1983 | else return +/-INFINITY. Remember that a is in d0 with the sign bit
1984 | cleared already.
1985 movel a0,d7 | put a's sign bit back in d7 '
1986 cmpl IMM (0x7ff00000),d0 | compare d0 with INFINITY
1987 bhi Ld$inop | if larger it is NaN
1988 tstl d1 |
1989 bne Ld$inop |
1990 bra Ld$div$0 | else signal DIVIDE_BY_ZERO
1992 Ldivdf$b$nf:
1993 movew IMM (DIVIDE),d5
1994 | If d2 == 0x7ff00000 we have to check d3.
1995 tstl d3 |
1996 bne Ld$inop | if d3 <> 0, b is NaN
1997 bra Ld$underflow | else b is +/-INFINITY, so signal underflow
1999 Ldivdf$a$nf:
2000 movew IMM (DIVIDE),d5
2001 | If d0 == 0x7ff00000 we have to check d1.
2002 tstl d1 |
2003 bne Ld$inop | if d1 <> 0, a is NaN
2004 | If a is INFINITY we have to check b
2005 cmpl d7,d2 | compare b with INFINITY
2006 bge Ld$inop | if b is NaN or INFINITY return NaN
2007 tstl d3 |
2008 bne Ld$inop |
2009 bra Ld$overflow | else return overflow
2011 | If a number is denormalized we put an exponent of 1 but do not put the
2012 | bit back into the fraction.
2013 Ldivdf$a$den:
2014 movel IMM (1),d4
2015 andl d6,d0
2016 1: addl d1,d1 | shift a left until bit 20 is set
2017 addxl d0,d0
2018 #ifndef __mcoldfire__
2019 subw IMM (1),d4 | and adjust exponent
2020 #else
2021 subl IMM (1),d4 | and adjust exponent
2022 #endif
2023 btst IMM (DBL_MANT_DIG-32-1),d0
2024 bne Ldivdf$1
2025 bra 1b
2027 Ldivdf$b$den:
2028 movel IMM (1),d5
2029 andl d6,d2
2030 1: addl d3,d3 | shift b left until bit 20 is set
2031 addxl d2,d2
2032 #ifndef __mcoldfire__
2033 subw IMM (1),d5 | and adjust exponent
2034 #else
2035 subql IMM (1),d5 | and adjust exponent
2036 #endif
2037 btst IMM (DBL_MANT_DIG-32-1),d2
2038 bne Ldivdf$2
2039 bra 1b
2041 Lround$exit:
2042 | This is a common exit point for __muldf3 and __divdf3. When they enter
2043 | this point the sign of the result is in d7, the result in d0-d1, normalized
2044 | so that 2^21 <= d0 < 2^22, and the exponent is in the lower byte of d4.
2046 | First check for underlow in the exponent:
2047 #ifndef __mcoldfire__
2048 cmpw IMM (-DBL_MANT_DIG-1),d4
2049 #else
2050 cmpl IMM (-DBL_MANT_DIG-1),d4
2051 #endif
2052 blt Ld$underflow
2053 | It could happen that the exponent is less than 1, in which case the
2054 | number is denormalized. In this case we shift right and adjust the
2055 | exponent until it becomes 1 or the fraction is zero (in the latter case
2056 | we signal underflow and return zero).
2057 movel d7,a0 |
2058 movel IMM (0),d6 | use d6-d7 to collect bits flushed right
2059 movel d6,d7 | use d6-d7 to collect bits flushed right
2060 #ifndef __mcoldfire__
2061 cmpw IMM (1),d4 | if the exponent is less than 1 we
2062 #else
2063 cmpl IMM (1),d4 | if the exponent is less than 1 we
2064 #endif
2065 bge 2f | have to shift right (denormalize)
2067 #ifndef __mcoldfire__
2068 addw IMM (1),d4 | adjust the exponent
2069 lsrl IMM (1),d0 | shift right once
2070 roxrl IMM (1),d1 |
2071 roxrl IMM (1),d2 |
2072 roxrl IMM (1),d3 |
2073 roxrl IMM (1),d6 |
2074 roxrl IMM (1),d7 |
2075 cmpw IMM (1),d4 | is the exponent 1 already?
2076 #else
2077 addl IMM (1),d4 | adjust the exponent
2078 lsrl IMM (1),d7
2079 btst IMM (0),d6
2080 beq 13f
2081 bset IMM (31),d7
2082 13: lsrl IMM (1),d6
2083 btst IMM (0),d3
2084 beq 14f
2085 bset IMM (31),d6
2086 14: lsrl IMM (1),d3
2087 btst IMM (0),d2
2088 beq 10f
2089 bset IMM (31),d3
2090 10: lsrl IMM (1),d2
2091 btst IMM (0),d1
2092 beq 11f
2093 bset IMM (31),d2
2094 11: lsrl IMM (1),d1
2095 btst IMM (0),d0
2096 beq 12f
2097 bset IMM (31),d1
2098 12: lsrl IMM (1),d0
2099 cmpl IMM (1),d4 | is the exponent 1 already?
2100 #endif
2101 beq 2f | if not loop back
2102 bra 1b |
2103 bra Ld$underflow | safety check, shouldn't execute '
2104 2: orl d6,d2 | this is a trick so we don't lose '
2105 orl d7,d3 | the bits which were flushed right
2106 movel a0,d7 | get back sign bit into d7
2107 | Now call the rounding routine (which takes care of denormalized numbers):
2108 lea pc@(Lround$0),a0 | to return from rounding routine
2109 PICLEA SYM (_fpCCR),a1 | check the rounding mode
2110 #ifdef __mcoldfire__
2111 clrl d6
2112 #endif
2113 movew a1@(6),d6 | rounding mode in d6
2114 beq Lround$to$nearest
2115 #ifndef __mcoldfire__
2116 cmpw IMM (ROUND_TO_PLUS),d6
2117 #else
2118 cmpl IMM (ROUND_TO_PLUS),d6
2119 #endif
2120 bhi Lround$to$minus
2121 blt Lround$to$zero
2122 bra Lround$to$plus
2123 Lround$0:
2124 | Here we have a correctly rounded result (either normalized or denormalized).
2126 | Here we should have either a normalized number or a denormalized one, and
2127 | the exponent is necessarily larger or equal to 1 (so we don't have to '
2128 | check again for underflow!). We have to check for overflow or for a
2129 | denormalized number (which also signals underflow).
2130 | Check for overflow (i.e., exponent >= 0x7ff).
2131 #ifndef __mcoldfire__
2132 cmpw IMM (0x07ff),d4
2133 #else
2134 cmpl IMM (0x07ff),d4
2135 #endif
2136 bge Ld$overflow
2137 | Now check for a denormalized number (exponent==0):
2138 movew d4,d4
2139 beq Ld$den
2141 | Put back the exponents and sign and return.
2142 #ifndef __mcoldfire__
2143 lslw IMM (4),d4 | exponent back to fourth byte
2144 #else
2145 lsll IMM (4),d4 | exponent back to fourth byte
2146 #endif
2147 bclr IMM (DBL_MANT_DIG-32-1),d0
2148 swap d0 | and put back exponent
2149 #ifndef __mcoldfire__
2150 orw d4,d0 |
2151 #else
2152 orl d4,d0 |
2153 #endif
2154 swap d0 |
2155 orl d7,d0 | and sign also
2157 PICLEA SYM (_fpCCR),a0
2158 movew IMM (0),a0@
2159 #ifndef __mcoldfire__
2160 moveml sp@+,d2-d7
2161 #else
2162 moveml sp@,d2-d7
2163 | XXX if frame pointer is ever removed, stack pointer must
2164 | be adjusted here.
2165 #endif
2166 unlk a6
2169 |=============================================================================
2170 | __negdf2
2171 |=============================================================================
2173 | double __negdf2(double, double);
2174 SYM (__negdf2):
2175 #ifndef __mcoldfire__
2176 link a6,IMM (0)
2177 moveml d2-d7,sp@-
2178 #else
2179 link a6,IMM (-24)
2180 moveml d2-d7,sp@
2181 #endif
2182 movew IMM (NEGATE),d5
2183 movel a6@(8),d0 | get number to negate in d0-d1
2184 movel a6@(12),d1 |
2185 bchg IMM (31),d0 | negate
2186 movel d0,d2 | make a positive copy (for the tests)
2187 bclr IMM (31),d2 |
2188 movel d2,d4 | check for zero
2189 orl d1,d4 |
2190 beq 2f | if zero (either sign) return +zero
2191 cmpl IMM (0x7ff00000),d2 | compare to +INFINITY
2192 blt 1f | if finite, return
2193 bhi Ld$inop | if larger (fraction not zero) is NaN
2194 tstl d1 | if d2 == 0x7ff00000 check d1
2195 bne Ld$inop |
2196 movel d0,d7 | else get sign and return INFINITY
2197 andl IMM (0x80000000),d7
2198 bra Ld$infty
2199 1: PICLEA SYM (_fpCCR),a0
2200 movew IMM (0),a0@
2201 #ifndef __mcoldfire__
2202 moveml sp@+,d2-d7
2203 #else
2204 moveml sp@,d2-d7
2205 | XXX if frame pointer is ever removed, stack pointer must
2206 | be adjusted here.
2207 #endif
2208 unlk a6
2210 2: bclr IMM (31),d0
2211 bra 1b
2213 |=============================================================================
2214 | __cmpdf2
2215 |=============================================================================
2217 GREATER = 1
2218 LESS = -1
2219 EQUAL = 0
2221 | int __cmpdf2(double, double);
2222 SYM (__cmpdf2):
2223 #ifndef __mcoldfire__
2224 link a6,IMM (0)
2225 moveml d2-d7,sp@- | save registers
2226 #else
2227 link a6,IMM (-24)
2228 moveml d2-d7,sp@
2229 #endif
2230 movew IMM (COMPARE),d5
2231 movel a6@(8),d0 | get first operand
2232 movel a6@(12),d1 |
2233 movel a6@(16),d2 | get second operand
2234 movel a6@(20),d3 |
2235 | First check if a and/or b are (+/-) zero and in that case clear
2236 | the sign bit.
2237 movel d0,d6 | copy signs into d6 (a) and d7(b)
2238 bclr IMM (31),d0 | and clear signs in d0 and d2
2239 movel d2,d7 |
2240 bclr IMM (31),d2 |
2241 cmpl IMM (0x7fff0000),d0 | check for a == NaN
2242 bhi Ld$inop | if d0 > 0x7ff00000, a is NaN
2243 beq Lcmpdf$a$nf | if equal can be INFINITY, so check d1
2244 movel d0,d4 | copy into d4 to test for zero
2245 orl d1,d4 |
2246 beq Lcmpdf$a$0 |
2247 Lcmpdf$0:
2248 cmpl IMM (0x7fff0000),d2 | check for b == NaN
2249 bhi Ld$inop | if d2 > 0x7ff00000, b is NaN
2250 beq Lcmpdf$b$nf | if equal can be INFINITY, so check d3
2251 movel d2,d4 |
2252 orl d3,d4 |
2253 beq Lcmpdf$b$0 |
2254 Lcmpdf$1:
2255 | Check the signs
2256 eorl d6,d7
2257 bpl 1f
2258 | If the signs are not equal check if a >= 0
2259 tstl d6
2260 bpl Lcmpdf$a$gt$b | if (a >= 0 && b < 0) => a > b
2261 bmi Lcmpdf$b$gt$a | if (a < 0 && b >= 0) => a < b
2263 | If the signs are equal check for < 0
2264 tstl d6
2265 bpl 1f
2266 | If both are negative exchange them
2267 #ifndef __mcoldfire__
2268 exg d0,d2
2269 exg d1,d3
2270 #else
2271 movel d0,d7
2272 movel d2,d0
2273 movel d7,d2
2274 movel d1,d7
2275 movel d3,d1
2276 movel d7,d3
2277 #endif
2279 | Now that they are positive we just compare them as longs (does this also
2280 | work for denormalized numbers?).
2281 cmpl d0,d2
2282 bhi Lcmpdf$b$gt$a | |b| > |a|
2283 bne Lcmpdf$a$gt$b | |b| < |a|
2284 | If we got here d0 == d2, so we compare d1 and d3.
2285 cmpl d1,d3
2286 bhi Lcmpdf$b$gt$a | |b| > |a|
2287 bne Lcmpdf$a$gt$b | |b| < |a|
2288 | If we got here a == b.
2289 movel IMM (EQUAL),d0
2290 #ifndef __mcoldfire__
2291 moveml sp@+,d2-d7 | put back the registers
2292 #else
2293 moveml sp@,d2-d7
2294 | XXX if frame pointer is ever removed, stack pointer must
2295 | be adjusted here.
2296 #endif
2297 unlk a6
2299 Lcmpdf$a$gt$b:
2300 movel IMM (GREATER),d0
2301 #ifndef __mcoldfire__
2302 moveml sp@+,d2-d7 | put back the registers
2303 #else
2304 moveml sp@,d2-d7
2305 | XXX if frame pointer is ever removed, stack pointer must
2306 | be adjusted here.
2307 #endif
2308 unlk a6
2310 Lcmpdf$b$gt$a:
2311 movel IMM (LESS),d0
2312 #ifndef __mcoldfire__
2313 moveml sp@+,d2-d7 | put back the registers
2314 #else
2315 moveml sp@,d2-d7
2316 | XXX if frame pointer is ever removed, stack pointer must
2317 | be adjusted here.
2318 #endif
2319 unlk a6
2322 Lcmpdf$a$0:
2323 bclr IMM (31),d6
2324 bra Lcmpdf$0
2325 Lcmpdf$b$0:
2326 bclr IMM (31),d7
2327 bra Lcmpdf$1
2329 Lcmpdf$a$nf:
2330 tstl d1
2331 bne Ld$inop
2332 bra Lcmpdf$0
2334 Lcmpdf$b$nf:
2335 tstl d3
2336 bne Ld$inop
2337 bra Lcmpdf$1
2339 |=============================================================================
2340 | rounding routines
2341 |=============================================================================
2343 | The rounding routines expect the number to be normalized in registers
2344 | d0-d1-d2-d3, with the exponent in register d4. They assume that the
2345 | exponent is larger or equal to 1. They return a properly normalized number
2346 | if possible, and a denormalized number otherwise. The exponent is returned
2347 | in d4.
2349 Lround$to$nearest:
2350 | We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
2351 | Here we assume that the exponent is not too small (this should be checked
2352 | before entering the rounding routine), but the number could be denormalized.
2354 | Check for denormalized numbers:
2355 1: btst IMM (DBL_MANT_DIG-32),d0
2356 bne 2f | if set the number is normalized
2357 | Normalize shifting left until bit #DBL_MANT_DIG-32 is set or the exponent
2358 | is one (remember that a denormalized number corresponds to an
2359 | exponent of -D_BIAS+1).
2360 #ifndef __mcoldfire__
2361 cmpw IMM (1),d4 | remember that the exponent is at least one
2362 #else
2363 cmpl IMM (1),d4 | remember that the exponent is at least one
2364 #endif
2365 beq 2f | an exponent of one means denormalized
2366 addl d3,d3 | else shift and adjust the exponent
2367 addxl d2,d2 |
2368 addxl d1,d1 |
2369 addxl d0,d0 |
2370 #ifndef __mcoldfire__
2371 dbra d4,1b |
2372 #else
2373 subql IMM (1), d4
2374 bpl 1b
2375 #endif
2377 | Now round: we do it as follows: after the shifting we can write the
2378 | fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
2379 | If delta < 1, do nothing. If delta > 1, add 1 to f.
2380 | If delta == 1, we make sure the rounded number will be even (odd?)
2381 | (after shifting).
2382 btst IMM (0),d1 | is delta < 1?
2383 beq 2f | if so, do not do anything
2384 orl d2,d3 | is delta == 1?
2385 bne 1f | if so round to even
2386 movel d1,d3 |
2387 andl IMM (2),d3 | bit 1 is the last significant bit
2388 movel IMM (0),d2 |
2389 addl d3,d1 |
2390 addxl d2,d0 |
2391 bra 2f |
2392 1: movel IMM (1),d3 | else add 1
2393 movel IMM (0),d2 |
2394 addl d3,d1 |
2395 addxl d2,d0
2396 | Shift right once (because we used bit #DBL_MANT_DIG-32!).
2398 #ifndef __mcoldfire__
2399 lsrl IMM (1),d0
2400 roxrl IMM (1),d1
2401 #else
2402 lsrl IMM (1),d1
2403 btst IMM (0),d0
2404 beq 10f
2405 bset IMM (31),d1
2406 10: lsrl IMM (1),d0
2407 #endif
2409 | Now check again bit #DBL_MANT_DIG-32 (rounding could have produced a
2410 | 'fraction overflow' ...).
2411 btst IMM (DBL_MANT_DIG-32),d0
2412 beq 1f
2413 #ifndef __mcoldfire__
2414 lsrl IMM (1),d0
2415 roxrl IMM (1),d1
2416 addw IMM (1),d4
2417 #else
2418 lsrl IMM (1),d1
2419 btst IMM (0),d0
2420 beq 10f
2421 bset IMM (31),d1
2422 10: lsrl IMM (1),d0
2423 addl IMM (1),d4
2424 #endif
2426 | If bit #DBL_MANT_DIG-32-1 is clear we have a denormalized number, so we
2427 | have to put the exponent to zero and return a denormalized number.
2428 btst IMM (DBL_MANT_DIG-32-1),d0
2429 beq 1f
2430 jmp a0@
2431 1: movel IMM (0),d4
2432 jmp a0@
2434 Lround$to$zero:
2435 Lround$to$plus:
2436 Lround$to$minus:
2437 jmp a0@
2438 #endif /* L_double */
2440 #ifdef L_float
2442 .globl SYM (_fpCCR)
2443 .globl $_exception_handler
2445 QUIET_NaN = 0xffffffff
2446 SIGNL_NaN = 0x7f800001
2447 INFINITY = 0x7f800000
2449 F_MAX_EXP = 0xff
2450 F_BIAS = 126
2451 FLT_MAX_EXP = F_MAX_EXP - F_BIAS
2452 FLT_MIN_EXP = 1 - F_BIAS
2453 FLT_MANT_DIG = 24
2455 INEXACT_RESULT = 0x0001
2456 UNDERFLOW = 0x0002
2457 OVERFLOW = 0x0004
2458 DIVIDE_BY_ZERO = 0x0008
2459 INVALID_OPERATION = 0x0010
2461 SINGLE_FLOAT = 1
2463 NOOP = 0
2464 ADD = 1
2465 MULTIPLY = 2
2466 DIVIDE = 3
2467 NEGATE = 4
2468 COMPARE = 5
2469 EXTENDSFDF = 6
2470 TRUNCDFSF = 7
2472 UNKNOWN = -1
2473 ROUND_TO_NEAREST = 0 | round result to nearest representable value
2474 ROUND_TO_ZERO = 1 | round result towards zero
2475 ROUND_TO_PLUS = 2 | round result towards plus infinity
2476 ROUND_TO_MINUS = 3 | round result towards minus infinity
2478 | Entry points:
2480 .globl SYM (__addsf3)
2481 .globl SYM (__subsf3)
2482 .globl SYM (__mulsf3)
2483 .globl SYM (__divsf3)
2484 .globl SYM (__negsf2)
2485 .globl SYM (__cmpsf2)
2487 | These are common routines to return and signal exceptions.
2489 .text
2490 .even
2492 Lf$den:
2493 | Return and signal a denormalized number
2494 orl d7,d0
2495 movew IMM (INEXACT_RESULT+UNDERFLOW),d7
2496 moveq IMM (SINGLE_FLOAT),d6
2497 PICJUMP $_exception_handler
2499 Lf$infty:
2500 Lf$overflow:
2501 | Return a properly signed INFINITY and set the exception flags
2502 movel IMM (INFINITY),d0
2503 orl d7,d0
2504 movew IMM (INEXACT_RESULT+OVERFLOW),d7
2505 moveq IMM (SINGLE_FLOAT),d6
2506 PICJUMP $_exception_handler
2508 Lf$underflow:
2509 | Return 0 and set the exception flags
2510 movel IMM (0),d0
2511 movew IMM (INEXACT_RESULT+UNDERFLOW),d7
2512 moveq IMM (SINGLE_FLOAT),d6
2513 PICJUMP $_exception_handler
2515 Lf$inop:
2516 | Return a quiet NaN and set the exception flags
2517 movel IMM (QUIET_NaN),d0
2518 movew IMM (INEXACT_RESULT+INVALID_OPERATION),d7
2519 moveq IMM (SINGLE_FLOAT),d6
2520 PICJUMP $_exception_handler
2522 Lf$div$0:
2523 | Return a properly signed INFINITY and set the exception flags
2524 movel IMM (INFINITY),d0
2525 orl d7,d0
2526 movew IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
2527 moveq IMM (SINGLE_FLOAT),d6
2528 PICJUMP $_exception_handler
2530 |=============================================================================
2531 |=============================================================================
2532 | single precision routines
2533 |=============================================================================
2534 |=============================================================================
2536 | A single precision floating point number (float) has the format:
2538 | struct _float {
2539 | unsigned int sign : 1; /* sign bit */
2540 | unsigned int exponent : 8; /* exponent, shifted by 126 */
2541 | unsigned int fraction : 23; /* fraction */
2542 | } float;
2544 | Thus sizeof(float) = 4 (32 bits).
2546 | All the routines are callable from C programs, and return the result
2547 | in the single register d0. They also preserve all registers except
2548 | d0-d1 and a0-a1.
2550 |=============================================================================
2551 | __subsf3
2552 |=============================================================================
2554 | float __subsf3(float, float);
2555 SYM (__subsf3):
2556 bchg IMM (31),sp@(8) | change sign of second operand
2557 | and fall through
2558 |=============================================================================
2559 | __addsf3
2560 |=============================================================================
2562 | float __addsf3(float, float);
2563 SYM (__addsf3):
2564 #ifndef __mcoldfire__
2565 link a6,IMM (0) | everything will be done in registers
2566 moveml d2-d7,sp@- | save all data registers but d0-d1
2567 #else
2568 link a6,IMM (-24)
2569 moveml d2-d7,sp@
2570 #endif
2571 movel a6@(8),d0 | get first operand
2572 movel a6@(12),d1 | get second operand
2573 movel d0,d6 | get d0's sign bit '
2574 addl d0,d0 | check and clear sign bit of a
2575 beq Laddsf$b | if zero return second operand
2576 movel d1,d7 | save b's sign bit '
2577 addl d1,d1 | get rid of sign bit
2578 beq Laddsf$a | if zero return first operand
2580 movel d6,a0 | save signs in address registers
2581 movel d7,a1 | so we can use d6 and d7
2583 | Get the exponents and check for denormalized and/or infinity.
2585 movel IMM (0x00ffffff),d4 | mask to get fraction
2586 movel IMM (0x01000000),d5 | mask to put hidden bit back
2588 movel d0,d6 | save a to get exponent
2589 andl d4,d0 | get fraction in d0
2590 notl d4 | make d4 into a mask for the exponent
2591 andl d4,d6 | get exponent in d6
2592 beq Laddsf$a$den | branch if a is denormalized
2593 cmpl d4,d6 | check for INFINITY or NaN
2594 beq Laddsf$nf
2595 swap d6 | put exponent into first word
2596 orl d5,d0 | and put hidden bit back
2597 Laddsf$1:
2598 | Now we have a's exponent in d6 (second byte) and the mantissa in d0. '
2599 movel d1,d7 | get exponent in d7
2600 andl d4,d7 |
2601 beq Laddsf$b$den | branch if b is denormalized
2602 cmpl d4,d7 | check for INFINITY or NaN
2603 beq Laddsf$nf
2604 swap d7 | put exponent into first word
2605 notl d4 | make d4 into a mask for the fraction
2606 andl d4,d1 | get fraction in d1
2607 orl d5,d1 | and put hidden bit back
2608 Laddsf$2:
2609 | Now we have b's exponent in d7 (second byte) and the mantissa in d1. '
2611 | Note that the hidden bit corresponds to bit #FLT_MANT_DIG-1, and we
2612 | shifted right once, so bit #FLT_MANT_DIG is set (so we have one extra
2613 | bit).
2615 movel d1,d2 | move b to d2, since we want to use
2616 | two registers to do the sum
2617 movel IMM (0),d1 | and clear the new ones
2618 movel d1,d3 |
2620 | Here we shift the numbers in registers d0 and d1 so the exponents are the
2621 | same, and put the largest exponent in d6. Note that we are using two
2622 | registers for each number (see the discussion by D. Knuth in "Seminumerical
2623 | Algorithms").
2624 #ifndef __mcoldfire__
2625 cmpw d6,d7 | compare exponents
2626 #else
2627 cmpl d6,d7 | compare exponents
2628 #endif
2629 beq Laddsf$3 | if equal don't shift '
2630 bhi 5f | branch if second exponent largest
2632 subl d6,d7 | keep the largest exponent
2633 negl d7
2634 #ifndef __mcoldfire__
2635 lsrw IMM (8),d7 | put difference in lower byte
2636 #else
2637 lsrl IMM (8),d7 | put difference in lower byte
2638 #endif
2639 | if difference is too large we don't shift (actually, we can just exit) '
2640 #ifndef __mcoldfire__
2641 cmpw IMM (FLT_MANT_DIG+2),d7
2642 #else
2643 cmpl IMM (FLT_MANT_DIG+2),d7
2644 #endif
2645 bge Laddsf$b$small
2646 #ifndef __mcoldfire__
2647 cmpw IMM (16),d7 | if difference >= 16 swap
2648 #else
2649 cmpl IMM (16),d7 | if difference >= 16 swap
2650 #endif
2651 bge 4f
2653 #ifndef __mcoldfire__
2654 subw IMM (1),d7
2655 #else
2656 subql IMM (1), d7
2657 #endif
2659 #ifndef __mcoldfire__
2660 lsrl IMM (1),d2 | shift right second operand
2661 roxrl IMM (1),d3
2662 dbra d7,3b
2663 #else
2664 lsrl IMM (1),d3
2665 btst IMM (0),d2
2666 beq 10f
2667 bset IMM (31),d3
2668 10: lsrl IMM (1),d2
2669 subql IMM (1), d7
2670 bpl 3b
2671 #endif
2672 bra Laddsf$3
2674 movew d2,d3
2675 swap d3
2676 movew d3,d2
2677 swap d2
2678 #ifndef __mcoldfire__
2679 subw IMM (16),d7
2680 #else
2681 subl IMM (16),d7
2682 #endif
2683 bne 2b | if still more bits, go back to normal case
2684 bra Laddsf$3
2686 #ifndef __mcoldfire__
2687 exg d6,d7 | exchange the exponents
2688 #else
2689 eorl d6,d7
2690 eorl d7,d6
2691 eorl d6,d7
2692 #endif
2693 subl d6,d7 | keep the largest exponent
2694 negl d7 |
2695 #ifndef __mcoldfire__
2696 lsrw IMM (8),d7 | put difference in lower byte
2697 #else
2698 lsrl IMM (8),d7 | put difference in lower byte
2699 #endif
2700 | if difference is too large we don't shift (and exit!) '
2701 #ifndef __mcoldfire__
2702 cmpw IMM (FLT_MANT_DIG+2),d7
2703 #else
2704 cmpl IMM (FLT_MANT_DIG+2),d7
2705 #endif
2706 bge Laddsf$a$small
2707 #ifndef __mcoldfire__
2708 cmpw IMM (16),d7 | if difference >= 16 swap
2709 #else
2710 cmpl IMM (16),d7 | if difference >= 16 swap
2711 #endif
2712 bge 8f
2714 #ifndef __mcoldfire__
2715 subw IMM (1),d7
2716 #else
2717 subl IMM (1),d7
2718 #endif
2720 #ifndef __mcoldfire__
2721 lsrl IMM (1),d0 | shift right first operand
2722 roxrl IMM (1),d1
2723 dbra d7,7b
2724 #else
2725 lsrl IMM (1),d1
2726 btst IMM (0),d0
2727 beq 10f
2728 bset IMM (31),d1
2729 10: lsrl IMM (1),d0
2730 subql IMM (1),d7
2731 bpl 7b
2732 #endif
2733 bra Laddsf$3
2735 movew d0,d1
2736 swap d1
2737 movew d1,d0
2738 swap d0
2739 #ifndef __mcoldfire__
2740 subw IMM (16),d7
2741 #else
2742 subl IMM (16),d7
2743 #endif
2744 bne 6b | if still more bits, go back to normal case
2745 | otherwise we fall through
2747 | Now we have a in d0-d1, b in d2-d3, and the largest exponent in d6 (the
2748 | signs are stored in a0 and a1).
2750 Laddsf$3:
2751 | Here we have to decide whether to add or subtract the numbers
2752 #ifndef __mcoldfire__
2753 exg d6,a0 | get signs back
2754 exg d7,a1 | and save the exponents
2755 #else
2756 movel d6,d4
2757 movel a0,d6
2758 movel d4,a0
2759 movel d7,d4
2760 movel a1,d7
2761 movel d4,a1
2762 #endif
2763 eorl d6,d7 | combine sign bits
2764 bmi Lsubsf$0 | if negative a and b have opposite
2765 | sign so we actually subtract the
2766 | numbers
2768 | Here we have both positive or both negative
2769 #ifndef __mcoldfire__
2770 exg d6,a0 | now we have the exponent in d6
2771 #else
2772 movel d6,d4
2773 movel a0,d6
2774 movel d4,a0
2775 #endif
2776 movel a0,d7 | and sign in d7
2777 andl IMM (0x80000000),d7
2778 | Here we do the addition.
2779 addl d3,d1
2780 addxl d2,d0
2781 | Note: now we have d2, d3, d4 and d5 to play with!
2783 | Put the exponent, in the first byte, in d2, to use the "standard" rounding
2784 | routines:
2785 movel d6,d2
2786 #ifndef __mcoldfire__
2787 lsrw IMM (8),d2
2788 #else
2789 lsrl IMM (8),d2
2790 #endif
2792 | Before rounding normalize so bit #FLT_MANT_DIG is set (we will consider
2793 | the case of denormalized numbers in the rounding routine itself).
2794 | As in the addition (not in the subtraction!) we could have set
2795 | one more bit we check this:
2796 btst IMM (FLT_MANT_DIG+1),d0
2797 beq 1f
2798 #ifndef __mcoldfire__
2799 lsrl IMM (1),d0
2800 roxrl IMM (1),d1
2801 #else
2802 lsrl IMM (1),d1
2803 btst IMM (0),d0
2804 beq 10f
2805 bset IMM (31),d1
2806 10: lsrl IMM (1),d0
2807 #endif
2808 addl IMM (1),d2
2810 lea pc@(Laddsf$4),a0 | to return from rounding routine
2811 PICLEA SYM (_fpCCR),a1 | check the rounding mode
2812 #ifdef __mcoldfire__
2813 clrl d6
2814 #endif
2815 movew a1@(6),d6 | rounding mode in d6
2816 beq Lround$to$nearest
2817 #ifndef __mcoldfire__
2818 cmpw IMM (ROUND_TO_PLUS),d6
2819 #else
2820 cmpl IMM (ROUND_TO_PLUS),d6
2821 #endif
2822 bhi Lround$to$minus
2823 blt Lround$to$zero
2824 bra Lround$to$plus
2825 Laddsf$4:
2826 | Put back the exponent, but check for overflow.
2827 #ifndef __mcoldfire__
2828 cmpw IMM (0xff),d2
2829 #else
2830 cmpl IMM (0xff),d2
2831 #endif
2832 bhi 1f
2833 bclr IMM (FLT_MANT_DIG-1),d0
2834 #ifndef __mcoldfire__
2835 lslw IMM (7),d2
2836 #else
2837 lsll IMM (7),d2
2838 #endif
2839 swap d2
2840 orl d2,d0
2841 bra Laddsf$ret
2843 movew IMM (ADD),d5
2844 bra Lf$overflow
2846 Lsubsf$0:
2847 | We are here if a > 0 and b < 0 (sign bits cleared).
2848 | Here we do the subtraction.
2849 movel d6,d7 | put sign in d7
2850 andl IMM (0x80000000),d7
2852 subl d3,d1 | result in d0-d1
2853 subxl d2,d0 |
2854 beq Laddsf$ret | if zero just exit
2855 bpl 1f | if positive skip the following
2856 bchg IMM (31),d7 | change sign bit in d7
2857 negl d1
2858 negxl d0
2860 #ifndef __mcoldfire__
2861 exg d2,a0 | now we have the exponent in d2
2862 lsrw IMM (8),d2 | put it in the first byte
2863 #else
2864 movel d2,d4
2865 movel a0,d2
2866 movel d4,a0
2867 lsrl IMM (8),d2 | put it in the first byte
2868 #endif
2870 | Now d0-d1 is positive and the sign bit is in d7.
2872 | Note that we do not have to normalize, since in the subtraction bit
2873 | #FLT_MANT_DIG+1 is never set, and denormalized numbers are handled by
2874 | the rounding routines themselves.
2875 lea pc@(Lsubsf$1),a0 | to return from rounding routine
2876 PICLEA SYM (_fpCCR),a1 | check the rounding mode
2877 #ifdef __mcoldfire__
2878 clrl d6
2879 #endif
2880 movew a1@(6),d6 | rounding mode in d6
2881 beq Lround$to$nearest
2882 #ifndef __mcoldfire__
2883 cmpw IMM (ROUND_TO_PLUS),d6
2884 #else
2885 cmpl IMM (ROUND_TO_PLUS),d6
2886 #endif
2887 bhi Lround$to$minus
2888 blt Lround$to$zero
2889 bra Lround$to$plus
2890 Lsubsf$1:
2891 | Put back the exponent (we can't have overflow!). '
2892 bclr IMM (FLT_MANT_DIG-1),d0
2893 #ifndef __mcoldfire__
2894 lslw IMM (7),d2
2895 #else
2896 lsll IMM (7),d2
2897 #endif
2898 swap d2
2899 orl d2,d0
2900 bra Laddsf$ret
2902 | If one of the numbers was too small (difference of exponents >=
2903 | FLT_MANT_DIG+2) we return the other (and now we don't have to '
2904 | check for finiteness or zero).
2905 Laddsf$a$small:
2906 movel a6@(12),d0
2907 PICLEA SYM (_fpCCR),a0
2908 movew IMM (0),a0@
2909 #ifndef __mcoldfire__
2910 moveml sp@+,d2-d7 | restore data registers
2911 #else
2912 moveml sp@,d2-d7
2913 | XXX if frame pointer is ever removed, stack pointer must
2914 | be adjusted here.
2915 #endif
2916 unlk a6 | and return
2919 Laddsf$b$small:
2920 movel a6@(8),d0
2921 PICLEA SYM (_fpCCR),a0
2922 movew IMM (0),a0@
2923 #ifndef __mcoldfire__
2924 moveml sp@+,d2-d7 | restore data registers
2925 #else
2926 moveml sp@,d2-d7
2927 | XXX if frame pointer is ever removed, stack pointer must
2928 | be adjusted here.
2929 #endif
2930 unlk a6 | and return
2933 | If the numbers are denormalized remember to put exponent equal to 1.
2935 Laddsf$a$den:
2936 movel d5,d6 | d5 contains 0x01000000
2937 swap d6
2938 bra Laddsf$1
2940 Laddsf$b$den:
2941 movel d5,d7
2942 swap d7
2943 notl d4 | make d4 into a mask for the fraction
2944 | (this was not executed after the jump)
2945 bra Laddsf$2
2947 | The rest is mainly code for the different results which can be
2948 | returned (checking always for +/-INFINITY and NaN).
2950 Laddsf$b:
2951 | Return b (if a is zero).
2952 movel a6@(12),d0
2953 bra 1f
2954 Laddsf$a:
2955 | Return a (if b is zero).
2956 movel a6@(8),d0
2958 movew IMM (ADD),d5
2959 | We have to check for NaN and +/-infty.
2960 movel d0,d7
2961 andl IMM (0x80000000),d7 | put sign in d7
2962 bclr IMM (31),d0 | clear sign
2963 cmpl IMM (INFINITY),d0 | check for infty or NaN
2964 bge 2f
2965 movel d0,d0 | check for zero (we do this because we don't '
2966 bne Laddsf$ret | want to return -0 by mistake
2967 bclr IMM (31),d7 | if zero be sure to clear sign
2968 bra Laddsf$ret | if everything OK just return
2970 | The value to be returned is either +/-infty or NaN
2971 andl IMM (0x007fffff),d0 | check for NaN
2972 bne Lf$inop | if mantissa not zero is NaN
2973 bra Lf$infty
2975 Laddsf$ret:
2976 | Normal exit (a and b nonzero, result is not NaN nor +/-infty).
2977 | We have to clear the exception flags (just the exception type).
2978 PICLEA SYM (_fpCCR),a0
2979 movew IMM (0),a0@
2980 orl d7,d0 | put sign bit
2981 #ifndef __mcoldfire__
2982 moveml sp@+,d2-d7 | restore data registers
2983 #else
2984 moveml sp@,d2-d7
2985 | XXX if frame pointer is ever removed, stack pointer must
2986 | be adjusted here.
2987 #endif
2988 unlk a6 | and return
2991 Laddsf$ret$den:
2992 | Return a denormalized number (for addition we don't signal underflow) '
2993 lsrl IMM (1),d0 | remember to shift right back once
2994 bra Laddsf$ret | and return
2996 | Note: when adding two floats of the same sign if either one is
2997 | NaN we return NaN without regard to whether the other is finite or
2998 | not. When subtracting them (i.e., when adding two numbers of
2999 | opposite signs) things are more complicated: if both are INFINITY
3000 | we return NaN, if only one is INFINITY and the other is NaN we return
3001 | NaN, but if it is finite we return INFINITY with the corresponding sign.
3003 Laddsf$nf:
3004 movew IMM (ADD),d5
3005 | This could be faster but it is not worth the effort, since it is not
3006 | executed very often. We sacrifice speed for clarity here.
3007 movel a6@(8),d0 | get the numbers back (remember that we
3008 movel a6@(12),d1 | did some processing already)
3009 movel IMM (INFINITY),d4 | useful constant (INFINITY)
3010 movel d0,d2 | save sign bits
3011 movel d1,d3
3012 bclr IMM (31),d0 | clear sign bits
3013 bclr IMM (31),d1
3014 | We know that one of them is either NaN of +/-INFINITY
3015 | Check for NaN (if either one is NaN return NaN)
3016 cmpl d4,d0 | check first a (d0)
3017 bhi Lf$inop
3018 cmpl d4,d1 | check now b (d1)
3019 bhi Lf$inop
3020 | Now comes the check for +/-INFINITY. We know that both are (maybe not
3021 | finite) numbers, but we have to check if both are infinite whether we
3022 | are adding or subtracting them.
3023 eorl d3,d2 | to check sign bits
3024 bmi 1f
3025 movel d0,d7
3026 andl IMM (0x80000000),d7 | get (common) sign bit
3027 bra Lf$infty
3029 | We know one (or both) are infinite, so we test for equality between the
3030 | two numbers (if they are equal they have to be infinite both, so we
3031 | return NaN).
3032 cmpl d1,d0 | are both infinite?
3033 beq Lf$inop | if so return NaN
3035 movel d0,d7
3036 andl IMM (0x80000000),d7 | get a's sign bit '
3037 cmpl d4,d0 | test now for infinity
3038 beq Lf$infty | if a is INFINITY return with this sign
3039 bchg IMM (31),d7 | else we know b is INFINITY and has
3040 bra Lf$infty | the opposite sign
3042 |=============================================================================
3043 | __mulsf3
3044 |=============================================================================
3046 | float __mulsf3(float, float);
3047 SYM (__mulsf3):
3048 #ifndef __mcoldfire__
3049 link a6,IMM (0)
3050 moveml d2-d7,sp@-
3051 #else
3052 link a6,IMM (-24)
3053 moveml d2-d7,sp@
3054 #endif
3055 movel a6@(8),d0 | get a into d0
3056 movel a6@(12),d1 | and b into d1
3057 movel d0,d7 | d7 will hold the sign of the product
3058 eorl d1,d7 |
3059 andl IMM (0x80000000),d7
3060 movel IMM (INFINITY),d6 | useful constant (+INFINITY)
3061 movel d6,d5 | another (mask for fraction)
3062 notl d5 |
3063 movel IMM (0x00800000),d4 | this is to put hidden bit back
3064 bclr IMM (31),d0 | get rid of a's sign bit '
3065 movel d0,d2 |
3066 beq Lmulsf$a$0 | branch if a is zero
3067 bclr IMM (31),d1 | get rid of b's sign bit '
3068 movel d1,d3 |
3069 beq Lmulsf$b$0 | branch if b is zero
3070 cmpl d6,d0 | is a big?
3071 bhi Lmulsf$inop | if a is NaN return NaN
3072 beq Lmulsf$inf | if a is INFINITY we have to check b
3073 cmpl d6,d1 | now compare b with INFINITY
3074 bhi Lmulsf$inop | is b NaN?
3075 beq Lmulsf$overflow | is b INFINITY?
3076 | Here we have both numbers finite and nonzero (and with no sign bit).
3077 | Now we get the exponents into d2 and d3.
3078 andl d6,d2 | and isolate exponent in d2
3079 beq Lmulsf$a$den | if exponent is zero we have a denormalized
3080 andl d5,d0 | and isolate fraction
3081 orl d4,d0 | and put hidden bit back
3082 swap d2 | I like exponents in the first byte
3083 #ifndef __mcoldfire__
3084 lsrw IMM (7),d2 |
3085 #else
3086 lsrl IMM (7),d2 |
3087 #endif
3088 Lmulsf$1: | number
3089 andl d6,d3 |
3090 beq Lmulsf$b$den |
3091 andl d5,d1 |
3092 orl d4,d1 |
3093 swap d3 |
3094 #ifndef __mcoldfire__
3095 lsrw IMM (7),d3 |
3096 #else
3097 lsrl IMM (7),d3 |
3098 #endif
3099 Lmulsf$2: |
3100 #ifndef __mcoldfire__
3101 addw d3,d2 | add exponents
3102 subw IMM (F_BIAS+1),d2 | and subtract bias (plus one)
3103 #else
3104 addl d3,d2 | add exponents
3105 subl IMM (F_BIAS+1),d2 | and subtract bias (plus one)
3106 #endif
3108 | We are now ready to do the multiplication. The situation is as follows:
3109 | both a and b have bit FLT_MANT_DIG-1 set (even if they were
3110 | denormalized to start with!), which means that in the product
3111 | bit 2*(FLT_MANT_DIG-1) (that is, bit 2*FLT_MANT_DIG-2-32 of the
3112 | high long) is set.
3114 | To do the multiplication let us move the number a little bit around ...
3115 movel d1,d6 | second operand in d6
3116 movel d0,d5 | first operand in d4-d5
3117 movel IMM (0),d4
3118 movel d4,d1 | the sums will go in d0-d1
3119 movel d4,d0
3121 | now bit FLT_MANT_DIG-1 becomes bit 31:
3122 lsll IMM (31-FLT_MANT_DIG+1),d6
3124 | Start the loop (we loop #FLT_MANT_DIG times):
3125 movew IMM (FLT_MANT_DIG-1),d3
3126 1: addl d1,d1 | shift sum
3127 addxl d0,d0
3128 lsll IMM (1),d6 | get bit bn
3129 bcc 2f | if not set skip sum
3130 addl d5,d1 | add a
3131 addxl d4,d0
3133 #ifndef __mcoldfire__
3134 dbf d3,1b | loop back
3135 #else
3136 subql IMM (1),d3
3137 bpl 1b
3138 #endif
3140 | Now we have the product in d0-d1, with bit (FLT_MANT_DIG - 1) + FLT_MANT_DIG
3141 | (mod 32) of d0 set. The first thing to do now is to normalize it so bit
3142 | FLT_MANT_DIG is set (to do the rounding).
3143 #ifndef __mcoldfire__
3144 rorl IMM (6),d1
3145 swap d1
3146 movew d1,d3
3147 andw IMM (0x03ff),d3
3148 andw IMM (0xfd00),d1
3149 #else
3150 movel d1,d3
3151 lsll IMM (8),d1
3152 addl d1,d1
3153 addl d1,d1
3154 moveq IMM (22),d5
3155 lsrl d5,d3
3156 orl d3,d1
3157 andl IMM (0xfffffd00),d1
3158 #endif
3159 lsll IMM (8),d0
3160 addl d0,d0
3161 addl d0,d0
3162 #ifndef __mcoldfire__
3163 orw d3,d0
3164 #else
3165 orl d3,d0
3166 #endif
3168 movew IMM (MULTIPLY),d5
3170 btst IMM (FLT_MANT_DIG+1),d0
3171 beq Lround$exit
3172 #ifndef __mcoldfire__
3173 lsrl IMM (1),d0
3174 roxrl IMM (1),d1
3175 addw IMM (1),d2
3176 #else
3177 lsrl IMM (1),d1
3178 btst IMM (0),d0
3179 beq 10f
3180 bset IMM (31),d1
3181 10: lsrl IMM (1),d0
3182 addql IMM (1),d2
3183 #endif
3184 bra Lround$exit
3186 Lmulsf$inop:
3187 movew IMM (MULTIPLY),d5
3188 bra Lf$inop
3190 Lmulsf$overflow:
3191 movew IMM (MULTIPLY),d5
3192 bra Lf$overflow
3194 Lmulsf$inf:
3195 movew IMM (MULTIPLY),d5
3196 | If either is NaN return NaN; else both are (maybe infinite) numbers, so
3197 | return INFINITY with the correct sign (which is in d7).
3198 cmpl d6,d1 | is b NaN?
3199 bhi Lf$inop | if so return NaN
3200 bra Lf$overflow | else return +/-INFINITY
3202 | If either number is zero return zero, unless the other is +/-INFINITY,
3203 | or NaN, in which case we return NaN.
3204 Lmulsf$b$0:
3205 | Here d1 (==b) is zero.
3206 movel d1,d0 | put b into d0 (just a zero)
3207 movel a6@(8),d1 | get a again to check for non-finiteness
3208 bra 1f
3209 Lmulsf$a$0:
3210 movel a6@(12),d1 | get b again to check for non-finiteness
3211 1: bclr IMM (31),d1 | clear sign bit
3212 cmpl IMM (INFINITY),d1 | and check for a large exponent
3213 bge Lf$inop | if b is +/-INFINITY or NaN return NaN
3214 PICLEA SYM (_fpCCR),a0 | else return zero
3215 movew IMM (0),a0@ |
3216 #ifndef __mcoldfire__
3217 moveml sp@+,d2-d7 |
3218 #else
3219 moveml sp@,d2-d7
3220 | XXX if frame pointer is ever removed, stack pointer must
3221 | be adjusted here.
3222 #endif
3223 unlk a6 |
3224 rts |
3226 | If a number is denormalized we put an exponent of 1 but do not put the
3227 | hidden bit back into the fraction; instead we shift left until bit 23
3228 | (the hidden bit) is set, adjusting the exponent accordingly. We do this
3229 | to ensure that the product of the fractions is close to 1.
3230 Lmulsf$a$den:
3231 movel IMM (1),d2
3232 andl d5,d0
3233 1: addl d0,d0 | shift a left (until bit 23 is set)
3234 #ifndef __mcoldfire__
3235 subw IMM (1),d2 | and adjust exponent
3236 #else
3237 subql IMM (1),d2 | and adjust exponent
3238 #endif
3239 btst IMM (FLT_MANT_DIG-1),d0
3240 bne Lmulsf$1 |
3241 bra 1b | else loop back
3243 Lmulsf$b$den:
3244 movel IMM (1),d3
3245 andl d5,d1
3246 1: addl d1,d1 | shift b left until bit 23 is set
3247 #ifndef __mcoldfire__
3248 subw IMM (1),d3 | and adjust exponent
3249 #else
3250 subl IMM (1),d3 | and adjust exponent
3251 #endif
3252 btst IMM (FLT_MANT_DIG-1),d1
3253 bne Lmulsf$2 |
3254 bra 1b | else loop back
3256 |=============================================================================
3257 | __divsf3
3258 |=============================================================================
3260 | float __divsf3(float, float);
3261 SYM (__divsf3):
3262 #ifndef __mcoldfire__
3263 link a6,IMM (0)
3264 moveml d2-d7,sp@-
3265 #else
3266 link a6,IMM (-24)
3267 moveml d2-d7,sp@
3268 #endif
3269 movel a6@(8),d0 | get a into d0
3270 movel a6@(12),d1 | and b into d1
3271 movel d0,d7 | d7 will hold the sign of the result
3272 eorl d1,d7 |
3273 andl IMM (0x80000000),d7 |
3274 movel IMM (INFINITY),d6 | useful constant (+INFINITY)
3275 movel d6,d5 | another (mask for fraction)
3276 notl d5 |
3277 movel IMM (0x00800000),d4 | this is to put hidden bit back
3278 bclr IMM (31),d0 | get rid of a's sign bit '
3279 movel d0,d2 |
3280 beq Ldivsf$a$0 | branch if a is zero
3281 bclr IMM (31),d1 | get rid of b's sign bit '
3282 movel d1,d3 |
3283 beq Ldivsf$b$0 | branch if b is zero
3284 cmpl d6,d0 | is a big?
3285 bhi Ldivsf$inop | if a is NaN return NaN
3286 beq Ldivsf$inf | if a is INFINITY we have to check b
3287 cmpl d6,d1 | now compare b with INFINITY
3288 bhi Ldivsf$inop | if b is NaN return NaN
3289 beq Ldivsf$underflow
3290 | Here we have both numbers finite and nonzero (and with no sign bit).
3291 | Now we get the exponents into d2 and d3 and normalize the numbers to
3292 | ensure that the ratio of the fractions is close to 1. We do this by
3293 | making sure that bit #FLT_MANT_DIG-1 (hidden bit) is set.
3294 andl d6,d2 | and isolate exponent in d2
3295 beq Ldivsf$a$den | if exponent is zero we have a denormalized
3296 andl d5,d0 | and isolate fraction
3297 orl d4,d0 | and put hidden bit back
3298 swap d2 | I like exponents in the first byte
3299 #ifndef __mcoldfire__
3300 lsrw IMM (7),d2 |
3301 #else
3302 lsrl IMM (7),d2 |
3303 #endif
3304 Ldivsf$1: |
3305 andl d6,d3 |
3306 beq Ldivsf$b$den |
3307 andl d5,d1 |
3308 orl d4,d1 |
3309 swap d3 |
3310 #ifndef __mcoldfire__
3311 lsrw IMM (7),d3 |
3312 #else
3313 lsrl IMM (7),d3 |
3314 #endif
3315 Ldivsf$2: |
3316 #ifndef __mcoldfire__
3317 subw d3,d2 | subtract exponents
3318 addw IMM (F_BIAS),d2 | and add bias
3319 #else
3320 subl d3,d2 | subtract exponents
3321 addl IMM (F_BIAS),d2 | and add bias
3322 #endif
3324 | We are now ready to do the division. We have prepared things in such a way
3325 | that the ratio of the fractions will be less than 2 but greater than 1/2.
3326 | At this point the registers in use are:
3327 | d0 holds a (first operand, bit FLT_MANT_DIG=0, bit FLT_MANT_DIG-1=1)
3328 | d1 holds b (second operand, bit FLT_MANT_DIG=1)
3329 | d2 holds the difference of the exponents, corrected by the bias
3330 | d7 holds the sign of the ratio
3331 | d4, d5, d6 hold some constants
3332 movel d7,a0 | d6-d7 will hold the ratio of the fractions
3333 movel IMM (0),d6 |
3334 movel d6,d7
3336 movew IMM (FLT_MANT_DIG+1),d3
3337 1: cmpl d0,d1 | is a < b?
3338 bhi 2f |
3339 bset d3,d6 | set a bit in d6
3340 subl d1,d0 | if a >= b a <-- a-b
3341 beq 3f | if a is zero, exit
3342 2: addl d0,d0 | multiply a by 2
3343 #ifndef __mcoldfire__
3344 dbra d3,1b
3345 #else
3346 subql IMM (1),d3
3347 bpl 1b
3348 #endif
3350 | Now we keep going to set the sticky bit ...
3351 movew IMM (FLT_MANT_DIG),d3
3352 1: cmpl d0,d1
3353 ble 2f
3354 addl d0,d0
3355 #ifndef __mcoldfire__
3356 dbra d3,1b
3357 #else
3358 subql IMM(1),d3
3359 bpl 1b
3360 #endif
3361 movel IMM (0),d1
3362 bra 3f
3363 2: movel IMM (0),d1
3364 #ifndef __mcoldfire__
3365 subw IMM (FLT_MANT_DIG),d3
3366 addw IMM (31),d3
3367 #else
3368 subl IMM (FLT_MANT_DIG),d3
3369 addl IMM (31),d3
3370 #endif
3371 bset d3,d1
3373 movel d6,d0 | put the ratio in d0-d1
3374 movel a0,d7 | get sign back
3376 | Because of the normalization we did before we are guaranteed that
3377 | d0 is smaller than 2^26 but larger than 2^24. Thus bit 26 is not set,
3378 | bit 25 could be set, and if it is not set then bit 24 is necessarily set.
3379 btst IMM (FLT_MANT_DIG+1),d0
3380 beq 1f | if it is not set, then bit 24 is set
3381 lsrl IMM (1),d0 |
3382 #ifndef __mcoldfire__
3383 addw IMM (1),d2 |
3384 #else
3385 addl IMM (1),d2 |
3386 #endif
3388 | Now round, check for over- and underflow, and exit.
3389 movew IMM (DIVIDE),d5
3390 bra Lround$exit
3392 Ldivsf$inop:
3393 movew IMM (DIVIDE),d5
3394 bra Lf$inop
3396 Ldivsf$overflow:
3397 movew IMM (DIVIDE),d5
3398 bra Lf$overflow
3400 Ldivsf$underflow:
3401 movew IMM (DIVIDE),d5
3402 bra Lf$underflow
3404 Ldivsf$a$0:
3405 movew IMM (DIVIDE),d5
3406 | If a is zero check to see whether b is zero also. In that case return
3407 | NaN; then check if b is NaN, and return NaN also in that case. Else
3408 | return zero.
3409 andl IMM (0x7fffffff),d1 | clear sign bit and test b
3410 beq Lf$inop | if b is also zero return NaN
3411 cmpl IMM (INFINITY),d1 | check for NaN
3412 bhi Lf$inop |
3413 movel IMM (0),d0 | else return zero
3414 PICLEA SYM (_fpCCR),a0 |
3415 movew IMM (0),a0@ |
3416 #ifndef __mcoldfire__
3417 moveml sp@+,d2-d7 |
3418 #else
3419 moveml sp@,d2-d7 |
3420 | XXX if frame pointer is ever removed, stack pointer must
3421 | be adjusted here.
3422 #endif
3423 unlk a6 |
3424 rts |
3426 Ldivsf$b$0:
3427 movew IMM (DIVIDE),d5
3428 | If we got here a is not zero. Check if a is NaN; in that case return NaN,
3429 | else return +/-INFINITY. Remember that a is in d0 with the sign bit
3430 | cleared already.
3431 cmpl IMM (INFINITY),d0 | compare d0 with INFINITY
3432 bhi Lf$inop | if larger it is NaN
3433 bra Lf$div$0 | else signal DIVIDE_BY_ZERO
3435 Ldivsf$inf:
3436 movew IMM (DIVIDE),d5
3437 | If a is INFINITY we have to check b
3438 cmpl IMM (INFINITY),d1 | compare b with INFINITY
3439 bge Lf$inop | if b is NaN or INFINITY return NaN
3440 bra Lf$overflow | else return overflow
3442 | If a number is denormalized we put an exponent of 1 but do not put the
3443 | bit back into the fraction.
3444 Ldivsf$a$den:
3445 movel IMM (1),d2
3446 andl d5,d0
3447 1: addl d0,d0 | shift a left until bit FLT_MANT_DIG-1 is set
3448 #ifndef __mcoldfire__
3449 subw IMM (1),d2 | and adjust exponent
3450 #else
3451 subl IMM (1),d2 | and adjust exponent
3452 #endif
3453 btst IMM (FLT_MANT_DIG-1),d0
3454 bne Ldivsf$1
3455 bra 1b
3457 Ldivsf$b$den:
3458 movel IMM (1),d3
3459 andl d5,d1
3460 1: addl d1,d1 | shift b left until bit FLT_MANT_DIG is set
3461 #ifndef __mcoldfire__
3462 subw IMM (1),d3 | and adjust exponent
3463 #else
3464 subl IMM (1),d3 | and adjust exponent
3465 #endif
3466 btst IMM (FLT_MANT_DIG-1),d1
3467 bne Ldivsf$2
3468 bra 1b
3470 Lround$exit:
3471 | This is a common exit point for __mulsf3 and __divsf3.
3473 | First check for underlow in the exponent:
3474 #ifndef __mcoldfire__
3475 cmpw IMM (-FLT_MANT_DIG-1),d2
3476 #else
3477 cmpl IMM (-FLT_MANT_DIG-1),d2
3478 #endif
3479 blt Lf$underflow
3480 | It could happen that the exponent is less than 1, in which case the
3481 | number is denormalized. In this case we shift right and adjust the
3482 | exponent until it becomes 1 or the fraction is zero (in the latter case
3483 | we signal underflow and return zero).
3484 movel IMM (0),d6 | d6 is used temporarily
3485 #ifndef __mcoldfire__
3486 cmpw IMM (1),d2 | if the exponent is less than 1 we
3487 #else
3488 cmpl IMM (1),d2 | if the exponent is less than 1 we
3489 #endif
3490 bge 2f | have to shift right (denormalize)
3492 #ifndef __mcoldfire__
3493 addw IMM (1),d2 | adjust the exponent
3494 lsrl IMM (1),d0 | shift right once
3495 roxrl IMM (1),d1 |
3496 roxrl IMM (1),d6 | d6 collect bits we would lose otherwise
3497 cmpw IMM (1),d2 | is the exponent 1 already?
3498 #else
3499 addql IMM (1),d2 | adjust the exponent
3500 lsrl IMM (1),d6
3501 btst IMM (0),d1
3502 beq 11f
3503 bset IMM (31),d6
3504 11: lsrl IMM (1),d1
3505 btst IMM (0),d0
3506 beq 10f
3507 bset IMM (31),d1
3508 10: lsrl IMM (1),d0
3509 cmpl IMM (1),d2 | is the exponent 1 already?
3510 #endif
3511 beq 2f | if not loop back
3512 bra 1b |
3513 bra Lf$underflow | safety check, shouldn't execute '
3514 2: orl d6,d1 | this is a trick so we don't lose '
3515 | the extra bits which were flushed right
3516 | Now call the rounding routine (which takes care of denormalized numbers):
3517 lea pc@(Lround$0),a0 | to return from rounding routine
3518 PICLEA SYM (_fpCCR),a1 | check the rounding mode
3519 #ifdef __mcoldfire__
3520 clrl d6
3521 #endif
3522 movew a1@(6),d6 | rounding mode in d6
3523 beq Lround$to$nearest
3524 #ifndef __mcoldfire__
3525 cmpw IMM (ROUND_TO_PLUS),d6
3526 #else
3527 cmpl IMM (ROUND_TO_PLUS),d6
3528 #endif
3529 bhi Lround$to$minus
3530 blt Lround$to$zero
3531 bra Lround$to$plus
3532 Lround$0:
3533 | Here we have a correctly rounded result (either normalized or denormalized).
3535 | Here we should have either a normalized number or a denormalized one, and
3536 | the exponent is necessarily larger or equal to 1 (so we don't have to '
3537 | check again for underflow!). We have to check for overflow or for a
3538 | denormalized number (which also signals underflow).
3539 | Check for overflow (i.e., exponent >= 255).
3540 #ifndef __mcoldfire__
3541 cmpw IMM (0x00ff),d2
3542 #else
3543 cmpl IMM (0x00ff),d2
3544 #endif
3545 bge Lf$overflow
3546 | Now check for a denormalized number (exponent==0).
3547 movew d2,d2
3548 beq Lf$den
3550 | Put back the exponents and sign and return.
3551 #ifndef __mcoldfire__
3552 lslw IMM (7),d2 | exponent back to fourth byte
3553 #else
3554 lsll IMM (7),d2 | exponent back to fourth byte
3555 #endif
3556 bclr IMM (FLT_MANT_DIG-1),d0
3557 swap d0 | and put back exponent
3558 #ifndef __mcoldfire__
3559 orw d2,d0 |
3560 #else
3561 orl d2,d0
3562 #endif
3563 swap d0 |
3564 orl d7,d0 | and sign also
3566 PICLEA SYM (_fpCCR),a0
3567 movew IMM (0),a0@
3568 #ifndef __mcoldfire__
3569 moveml sp@+,d2-d7
3570 #else
3571 moveml sp@,d2-d7
3572 | XXX if frame pointer is ever removed, stack pointer must
3573 | be adjusted here.
3574 #endif
3575 unlk a6
3578 |=============================================================================
3579 | __negsf2
3580 |=============================================================================
3582 | This is trivial and could be shorter if we didn't bother checking for NaN '
3583 | and +/-INFINITY.
3585 | float __negsf2(float);
3586 SYM (__negsf2):
3587 #ifndef __mcoldfire__
3588 link a6,IMM (0)
3589 moveml d2-d7,sp@-
3590 #else
3591 link a6,IMM (-24)
3592 moveml d2-d7,sp@
3593 #endif
3594 movew IMM (NEGATE),d5
3595 movel a6@(8),d0 | get number to negate in d0
3596 bchg IMM (31),d0 | negate
3597 movel d0,d1 | make a positive copy
3598 bclr IMM (31),d1 |
3599 tstl d1 | check for zero
3600 beq 2f | if zero (either sign) return +zero
3601 cmpl IMM (INFINITY),d1 | compare to +INFINITY
3602 blt 1f |
3603 bhi Lf$inop | if larger (fraction not zero) is NaN
3604 movel d0,d7 | else get sign and return INFINITY
3605 andl IMM (0x80000000),d7
3606 bra Lf$infty
3607 1: PICLEA SYM (_fpCCR),a0
3608 movew IMM (0),a0@
3609 #ifndef __mcoldfire__
3610 moveml sp@+,d2-d7
3611 #else
3612 moveml sp@,d2-d7
3613 | XXX if frame pointer is ever removed, stack pointer must
3614 | be adjusted here.
3615 #endif
3616 unlk a6
3618 2: bclr IMM (31),d0
3619 bra 1b
3621 |=============================================================================
3622 | __cmpsf2
3623 |=============================================================================
3625 GREATER = 1
3626 LESS = -1
3627 EQUAL = 0
3629 | int __cmpsf2(float, float);
3630 SYM (__cmpsf2):
3631 #ifndef __mcoldfire__
3632 link a6,IMM (0)
3633 moveml d2-d7,sp@- | save registers
3634 #else
3635 link a6,IMM (-24)
3636 moveml d2-d7,sp@
3637 #endif
3638 movew IMM (COMPARE),d5
3639 movel a6@(8),d0 | get first operand
3640 movel a6@(12),d1 | get second operand
3641 | Check if either is NaN, and in that case return garbage and signal
3642 | INVALID_OPERATION. Check also if either is zero, and clear the signs
3643 | if necessary.
3644 movel d0,d6
3645 andl IMM (0x7fffffff),d0
3646 beq Lcmpsf$a$0
3647 cmpl IMM (0x7f800000),d0
3648 bhi Lf$inop
3649 Lcmpsf$1:
3650 movel d1,d7
3651 andl IMM (0x7fffffff),d1
3652 beq Lcmpsf$b$0
3653 cmpl IMM (0x7f800000),d1
3654 bhi Lf$inop
3655 Lcmpsf$2:
3656 | Check the signs
3657 eorl d6,d7
3658 bpl 1f
3659 | If the signs are not equal check if a >= 0
3660 tstl d6
3661 bpl Lcmpsf$a$gt$b | if (a >= 0 && b < 0) => a > b
3662 bmi Lcmpsf$b$gt$a | if (a < 0 && b >= 0) => a < b
3664 | If the signs are equal check for < 0
3665 tstl d6
3666 bpl 1f
3667 | If both are negative exchange them
3668 #ifndef __mcoldfire__
3669 exg d0,d1
3670 #else
3671 movel d0,d7
3672 movel d1,d0
3673 movel d7,d1
3674 #endif
3676 | Now that they are positive we just compare them as longs (does this also
3677 | work for denormalized numbers?).
3678 cmpl d0,d1
3679 bhi Lcmpsf$b$gt$a | |b| > |a|
3680 bne Lcmpsf$a$gt$b | |b| < |a|
3681 | If we got here a == b.
3682 movel IMM (EQUAL),d0
3683 #ifndef __mcoldfire__
3684 moveml sp@+,d2-d7 | put back the registers
3685 #else
3686 moveml sp@,d2-d7
3687 #endif
3688 unlk a6
3690 Lcmpsf$a$gt$b:
3691 movel IMM (GREATER),d0
3692 #ifndef __mcoldfire__
3693 moveml sp@+,d2-d7 | put back the registers
3694 #else
3695 moveml sp@,d2-d7
3696 | XXX if frame pointer is ever removed, stack pointer must
3697 | be adjusted here.
3698 #endif
3699 unlk a6
3701 Lcmpsf$b$gt$a:
3702 movel IMM (LESS),d0
3703 #ifndef __mcoldfire__
3704 moveml sp@+,d2-d7 | put back the registers
3705 #else
3706 moveml sp@,d2-d7
3707 | XXX if frame pointer is ever removed, stack pointer must
3708 | be adjusted here.
3709 #endif
3710 unlk a6
3713 Lcmpsf$a$0:
3714 bclr IMM (31),d6
3715 bra Lcmpsf$1
3716 Lcmpsf$b$0:
3717 bclr IMM (31),d7
3718 bra Lcmpsf$2
3720 |=============================================================================
3721 | rounding routines
3722 |=============================================================================
3724 | The rounding routines expect the number to be normalized in registers
3725 | d0-d1, with the exponent in register d2. They assume that the
3726 | exponent is larger or equal to 1. They return a properly normalized number
3727 | if possible, and a denormalized number otherwise. The exponent is returned
3728 | in d2.
3730 Lround$to$nearest:
3731 | We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
3732 | Here we assume that the exponent is not too small (this should be checked
3733 | before entering the rounding routine), but the number could be denormalized.
3735 | Check for denormalized numbers:
3736 1: btst IMM (FLT_MANT_DIG),d0
3737 bne 2f | if set the number is normalized
3738 | Normalize shifting left until bit #FLT_MANT_DIG is set or the exponent
3739 | is one (remember that a denormalized number corresponds to an
3740 | exponent of -F_BIAS+1).
3741 #ifndef __mcoldfire__
3742 cmpw IMM (1),d2 | remember that the exponent is at least one
3743 #else
3744 cmpl IMM (1),d2 | remember that the exponent is at least one
3745 #endif
3746 beq 2f | an exponent of one means denormalized
3747 addl d1,d1 | else shift and adjust the exponent
3748 addxl d0,d0 |
3749 #ifndef __mcoldfire__
3750 dbra d2,1b |
3751 #else
3752 subql IMM (1),d2
3753 bpl 1b
3754 #endif
3756 | Now round: we do it as follows: after the shifting we can write the
3757 | fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
3758 | If delta < 1, do nothing. If delta > 1, add 1 to f.
3759 | If delta == 1, we make sure the rounded number will be even (odd?)
3760 | (after shifting).
3761 btst IMM (0),d0 | is delta < 1?
3762 beq 2f | if so, do not do anything
3763 tstl d1 | is delta == 1?
3764 bne 1f | if so round to even
3765 movel d0,d1 |
3766 andl IMM (2),d1 | bit 1 is the last significant bit
3767 addl d1,d0 |
3768 bra 2f |
3769 1: movel IMM (1),d1 | else add 1
3770 addl d1,d0 |
3771 | Shift right once (because we used bit #FLT_MANT_DIG!).
3772 2: lsrl IMM (1),d0
3773 | Now check again bit #FLT_MANT_DIG (rounding could have produced a
3774 | 'fraction overflow' ...).
3775 btst IMM (FLT_MANT_DIG),d0
3776 beq 1f
3777 lsrl IMM (1),d0
3778 #ifndef __mcoldfire__
3779 addw IMM (1),d2
3780 #else
3781 addql IMM (1),d2
3782 #endif
3784 | If bit #FLT_MANT_DIG-1 is clear we have a denormalized number, so we
3785 | have to put the exponent to zero and return a denormalized number.
3786 btst IMM (FLT_MANT_DIG-1),d0
3787 beq 1f
3788 jmp a0@
3789 1: movel IMM (0),d2
3790 jmp a0@
3792 Lround$to$zero:
3793 Lround$to$plus:
3794 Lround$to$minus:
3795 jmp a0@
3796 #endif /* L_float */
3798 | gcc expects the routines __eqdf2, __nedf2, __gtdf2, __gedf2,
3799 | __ledf2, __ltdf2 to all return the same value as a direct call to
3800 | __cmpdf2 would. In this implementation, each of these routines
3801 | simply calls __cmpdf2. It would be more efficient to give the
3802 | __cmpdf2 routine several names, but separating them out will make it
3803 | easier to write efficient versions of these routines someday.
3805 #ifdef L_eqdf2
3806 .text
3807 .proc
3808 .globl SYM (__eqdf2)
3809 SYM (__eqdf2):
3810 link a6,IMM (0)
3811 movl a6@(20),sp@-
3812 movl a6@(16),sp@-
3813 movl a6@(12),sp@-
3814 movl a6@(8),sp@-
3815 PICCALL SYM (__cmpdf2)
3816 unlk a6
3818 #endif /* L_eqdf2 */
3820 #ifdef L_nedf2
3821 .text
3822 .proc
3823 .globl SYM (__nedf2)
3824 SYM (__nedf2):
3825 link a6,IMM (0)
3826 movl a6@(20),sp@-
3827 movl a6@(16),sp@-
3828 movl a6@(12),sp@-
3829 movl a6@(8),sp@-
3830 PICCALL SYM (__cmpdf2)
3831 unlk a6
3833 #endif /* L_nedf2 */
3835 #ifdef L_gtdf2
3836 .text
3837 .proc
3838 .globl SYM (__gtdf2)
3839 SYM (__gtdf2):
3840 link a6,IMM (0)
3841 movl a6@(20),sp@-
3842 movl a6@(16),sp@-
3843 movl a6@(12),sp@-
3844 movl a6@(8),sp@-
3845 PICCALL SYM (__cmpdf2)
3846 unlk a6
3848 #endif /* L_gtdf2 */
3850 #ifdef L_gedf2
3851 .text
3852 .proc
3853 .globl SYM (__gedf2)
3854 SYM (__gedf2):
3855 link a6,IMM (0)
3856 movl a6@(20),sp@-
3857 movl a6@(16),sp@-
3858 movl a6@(12),sp@-
3859 movl a6@(8),sp@-
3860 PICCALL SYM (__cmpdf2)
3861 unlk a6
3863 #endif /* L_gedf2 */
3865 #ifdef L_ltdf2
3866 .text
3867 .proc
3868 .globl SYM (__ltdf2)
3869 SYM (__ltdf2):
3870 link a6,IMM (0)
3871 movl a6@(20),sp@-
3872 movl a6@(16),sp@-
3873 movl a6@(12),sp@-
3874 movl a6@(8),sp@-
3875 PICCALL SYM (__cmpdf2)
3876 unlk a6
3878 #endif /* L_ltdf2 */
3880 #ifdef L_ledf2
3881 .text
3882 .proc
3883 .globl SYM (__ledf2)
3884 SYM (__ledf2):
3885 link a6,IMM (0)
3886 movl a6@(20),sp@-
3887 movl a6@(16),sp@-
3888 movl a6@(12),sp@-
3889 movl a6@(8),sp@-
3890 PICCALL SYM (__cmpdf2)
3891 unlk a6
3893 #endif /* L_ledf2 */
3895 | The comments above about __eqdf2, et. al., also apply to __eqsf2,
3896 | et. al., except that the latter call __cmpsf2 rather than __cmpdf2.
3898 #ifdef L_eqsf2
3899 .text
3900 .proc
3901 .globl SYM (__eqsf2)
3902 SYM (__eqsf2):
3903 link a6,IMM (0)
3904 movl a6@(12),sp@-
3905 movl a6@(8),sp@-
3906 PICCALL SYM (__cmpsf2)
3907 unlk a6
3909 #endif /* L_eqsf2 */
3911 #ifdef L_nesf2
3912 .text
3913 .proc
3914 .globl SYM (__nesf2)
3915 SYM (__nesf2):
3916 link a6,IMM (0)
3917 movl a6@(12),sp@-
3918 movl a6@(8),sp@-
3919 PICCALL SYM (__cmpsf2)
3920 unlk a6
3922 #endif /* L_nesf2 */
3924 #ifdef L_gtsf2
3925 .text
3926 .proc
3927 .globl SYM (__gtsf2)
3928 SYM (__gtsf2):
3929 link a6,IMM (0)
3930 movl a6@(12),sp@-
3931 movl a6@(8),sp@-
3932 PICCALL SYM (__cmpsf2)
3933 unlk a6
3935 #endif /* L_gtsf2 */
3937 #ifdef L_gesf2
3938 .text
3939 .proc
3940 .globl SYM (__gesf2)
3941 SYM (__gesf2):
3942 link a6,IMM (0)
3943 movl a6@(12),sp@-
3944 movl a6@(8),sp@-
3945 PICCALL SYM (__cmpsf2)
3946 unlk a6
3948 #endif /* L_gesf2 */
3950 #ifdef L_ltsf2
3951 .text
3952 .proc
3953 .globl SYM (__ltsf2)
3954 SYM (__ltsf2):
3955 link a6,IMM (0)
3956 movl a6@(12),sp@-
3957 movl a6@(8),sp@-
3958 PICCALL SYM (__cmpsf2)
3959 unlk a6
3961 #endif /* L_ltsf2 */
3963 #ifdef L_lesf2
3964 .text
3965 .proc
3966 .globl SYM (__lesf2)
3967 SYM (__lesf2):
3968 link a6,IMM (0)
3969 movl a6@(12),sp@-
3970 movl a6@(8),sp@-
3971 PICCALL SYM (__cmpsf2)
3972 unlk a6
3974 #endif /* L_lesf2 */