* config/m68k/lb1sf68.asm: Add PIC macros for Linux targets.
[official-gcc.git] / gcc / config / m68k / lb1sf68.asm
blobe03b7b30d874737118f48c457a46afa1b5e1eae7
1 /* libgcc routines for 68000 w/o floating-point hardware.
2 Copyright (C) 1994, 1996, 1997, 1998 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
9 later version.
11 In addition to the permissions in the GNU General Public License, the
12 Free Software Foundation gives you unlimited permission to link the
13 compiled version of this file with other programs, and to distribute
14 those programs without any restriction coming from the use of this
15 file. (The General Public License restrictions do apply in other
16 respects; for example, they cover modification of the file, and
17 distribution when not linked into another program.)
19 This file is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
27 Boston, MA 02110-1301, USA. */
29 /* As a special exception, if you link this library with files
30 compiled with GCC to produce an executable, this does not cause
31 the resulting executable to be covered by the GNU General Public License.
32 This exception does not however invalidate any other reasons why
33 the executable file might be covered by the GNU General Public License. */
35 /* Use this one for any 680x0; assumes no floating point hardware.
36 The trailing " '" appearing on some lines is for ANSI preprocessors. Yuk.
37 Some of this code comes from MINIX, via the folks at ericsson.
38 D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
41 /* These are predefined by new versions of GNU cpp. */
43 #ifndef __USER_LABEL_PREFIX__
44 #define __USER_LABEL_PREFIX__ _
45 #endif
47 #ifndef __REGISTER_PREFIX__
48 #define __REGISTER_PREFIX__
49 #endif
51 #ifndef __IMMEDIATE_PREFIX__
52 #define __IMMEDIATE_PREFIX__ #
53 #endif
55 /* ANSI concatenation macros. */
57 #define CONCAT1(a, b) CONCAT2(a, b)
58 #define CONCAT2(a, b) a ## b
60 /* Use the right prefix for global labels. */
62 #define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
64 /* Note that X is a function. */
66 #ifdef __ELF__
67 #define FUNC(x) .type SYM(x),function
68 #else
69 /* The .proc pseudo-op is accepted, but ignored, by GAS. We could just
70 define this to the empty string for non-ELF systems, but defining it
71 to .proc means that the information is available to the assembler if
72 the need arises. */
73 #define FUNC(x) .proc
74 #endif
76 /* Use the right prefix for registers. */
78 #define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
80 /* Use the right prefix for immediate values. */
82 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
84 #define d0 REG (d0)
85 #define d1 REG (d1)
86 #define d2 REG (d2)
87 #define d3 REG (d3)
88 #define d4 REG (d4)
89 #define d5 REG (d5)
90 #define d6 REG (d6)
91 #define d7 REG (d7)
92 #define a0 REG (a0)
93 #define a1 REG (a1)
94 #define a2 REG (a2)
95 #define a3 REG (a3)
96 #define a4 REG (a4)
97 #define a5 REG (a5)
98 #define a6 REG (a6)
99 #define fp REG (fp)
100 #define sp REG (sp)
101 #define pc REG (pc)
103 /* Provide a few macros to allow for PIC code support.
104 * With PIC, data is stored A5 relative so we've got to take a bit of special
105 * care to ensure that all loads of global data is via A5. PIC also requires
106 * jumps and subroutine calls to be PC relative rather than absolute. We cheat
107 * a little on this and in the PIC case, we use short offset branches and
108 * hope that the final object code is within range (which it should be).
110 #ifndef __PIC__
112 /* Non PIC (absolute/relocatable) versions */
114 .macro PICCALL addr
115 jbsr \addr
116 .endm
118 .macro PICJUMP addr
119 jmp \addr
120 .endm
122 .macro PICLEA sym, reg
123 lea \sym, \reg
124 .endm
126 .macro PICPEA sym, areg
127 pea \sym
128 .endm
130 #else /* __PIC__ */
132 /* Common for Linux and uClinux, the latter with either
133 -mid-shared-library or -msep-data. */
135 .macro PICCALL addr
136 #if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
137 lea \addr-.-8,a0
138 jsr pc@(a0)
139 #else
140 bsr \addr
141 #endif
142 .endm
144 .macro PICJUMP addr
145 /* ISA C has no bra.l instruction, and since this assembly file
146 gets assembled into multiple object files, we avoid the
147 bra instruction entirely. */
148 #if defined (__mcoldfire__) && !defined (__mcfisab__)
149 lea \addr-.-8,a0
150 jmp pc@(a0)
151 #else
152 bra \addr
153 #endif
154 .endm
156 # if defined (__uClinux__)
158 /* Versions for uClinux */
160 # if defined(__ID_SHARED_LIBRARY__)
162 /* -mid-shared-library versions */
164 .macro PICLEA sym, reg
165 movel a5@(_current_shared_library_a5_offset_), \reg
166 movel \sym@GOT(\reg), \reg
167 .endm
169 .macro PICPEA sym, areg
170 movel a5@(_current_shared_library_a5_offset_), \areg
171 movel \sym@GOT(\areg), sp@-
172 .endm
174 # else /* !__ID_SHARED_LIBRARY__ */
176 /* Versions for -msep-data */
178 .macro PICLEA sym, reg
179 movel \sym@GOT(a5), \reg
180 .endm
182 .macro PICPEA sym, areg
183 movel \sym@GOT(a5), sp@-
184 .endm
186 # endif
188 # else /* !__uClinux__ */
190 /* Versions for Linux */
192 .macro PICLEA sym, reg
193 movel #_GLOBAL_OFFSET_TABLE_@GOTPC, \reg
194 lea (-6, pc, \reg), \reg
195 movel \sym@GOT(\reg), \reg
196 .endm
198 .macro PICPEA sym, areg
199 movel #_GLOBAL_OFFSET_TABLE_@GOTPC, \areg
200 lea (-6, pc, \areg), \areg
201 movel \sym@GOT(\areg), sp@-
202 .endm
204 # endif
205 #endif /* __PIC__ */
208 #ifdef L_floatex
210 | This is an attempt at a decent floating point (single, double and
211 | extended double) code for the GNU C compiler. It should be easy to
212 | adapt to other compilers (but beware of the local labels!).
214 | Starting date: 21 October, 1990
216 | It is convenient to introduce the notation (s,e,f) for a floating point
217 | number, where s=sign, e=exponent, f=fraction. We will call a floating
218 | point number fpn to abbreviate, independently of the precision.
219 | Let MAX_EXP be in each case the maximum exponent (255 for floats, 1023
220 | for doubles and 16383 for long doubles). We then have the following
221 | different cases:
222 | 1. Normalized fpns have 0 < e < MAX_EXP. They correspond to
223 | (-1)^s x 1.f x 2^(e-bias-1).
224 | 2. Denormalized fpns have e=0. They correspond to numbers of the form
225 | (-1)^s x 0.f x 2^(-bias).
226 | 3. +/-INFINITY have e=MAX_EXP, f=0.
227 | 4. Quiet NaN (Not a Number) have all bits set.
228 | 5. Signaling NaN (Not a Number) have s=0, e=MAX_EXP, f=1.
230 |=============================================================================
231 | exceptions
232 |=============================================================================
234 | This is the floating point condition code register (_fpCCR):
236 | struct {
237 | short _exception_bits;
238 | short _trap_enable_bits;
239 | short _sticky_bits;
240 | short _rounding_mode;
241 | short _format;
242 | short _last_operation;
243 | union {
244 | float sf;
245 | double df;
246 | } _operand1;
247 | union {
248 | float sf;
249 | double df;
250 | } _operand2;
251 | } _fpCCR;
253 .data
254 .even
256 .globl SYM (_fpCCR)
258 SYM (_fpCCR):
259 __exception_bits:
260 .word 0
261 __trap_enable_bits:
262 .word 0
263 __sticky_bits:
264 .word 0
265 __rounding_mode:
266 .word ROUND_TO_NEAREST
267 __format:
268 .word NIL
269 __last_operation:
270 .word NOOP
271 __operand1:
272 .long 0
273 .long 0
274 __operand2:
275 .long 0
276 .long 0
278 | Offsets:
279 EBITS = __exception_bits - SYM (_fpCCR)
280 TRAPE = __trap_enable_bits - SYM (_fpCCR)
281 STICK = __sticky_bits - SYM (_fpCCR)
282 ROUND = __rounding_mode - SYM (_fpCCR)
283 FORMT = __format - SYM (_fpCCR)
284 LASTO = __last_operation - SYM (_fpCCR)
285 OPER1 = __operand1 - SYM (_fpCCR)
286 OPER2 = __operand2 - SYM (_fpCCR)
288 | The following exception types are supported:
289 INEXACT_RESULT = 0x0001
290 UNDERFLOW = 0x0002
291 OVERFLOW = 0x0004
292 DIVIDE_BY_ZERO = 0x0008
293 INVALID_OPERATION = 0x0010
295 | The allowed rounding modes are:
296 UNKNOWN = -1
297 ROUND_TO_NEAREST = 0 | round result to nearest representable value
298 ROUND_TO_ZERO = 1 | round result towards zero
299 ROUND_TO_PLUS = 2 | round result towards plus infinity
300 ROUND_TO_MINUS = 3 | round result towards minus infinity
302 | The allowed values of format are:
303 NIL = 0
304 SINGLE_FLOAT = 1
305 DOUBLE_FLOAT = 2
306 LONG_FLOAT = 3
308 | The allowed values for the last operation are:
309 NOOP = 0
310 ADD = 1
311 MULTIPLY = 2
312 DIVIDE = 3
313 NEGATE = 4
314 COMPARE = 5
315 EXTENDSFDF = 6
316 TRUNCDFSF = 7
318 |=============================================================================
319 | __clear_sticky_bits
320 |=============================================================================
322 | The sticky bits are normally not cleared (thus the name), whereas the
323 | exception type and exception value reflect the last computation.
324 | This routine is provided to clear them (you can also write to _fpCCR,
325 | since it is globally visible).
327 .globl SYM (__clear_sticky_bit)
329 .text
330 .even
332 | void __clear_sticky_bits(void);
333 SYM (__clear_sticky_bit):
334 PICLEA SYM (_fpCCR),a0
335 #ifndef __mcoldfire__
336 movew IMM (0),a0@(STICK)
337 #else
338 clr.w a0@(STICK)
339 #endif
342 |=============================================================================
343 | $_exception_handler
344 |=============================================================================
346 .globl $_exception_handler
348 .text
349 .even
351 | This is the common exit point if an exception occurs.
352 | NOTE: it is NOT callable from C!
353 | It expects the exception type in d7, the format (SINGLE_FLOAT,
354 | DOUBLE_FLOAT or LONG_FLOAT) in d6, and the last operation code in d5.
355 | It sets the corresponding exception and sticky bits, and the format.
356 | Depending on the format if fills the corresponding slots for the
357 | operands which produced the exception (all this information is provided
358 | so if you write your own exception handlers you have enough information
359 | to deal with the problem).
360 | Then checks to see if the corresponding exception is trap-enabled,
361 | in which case it pushes the address of _fpCCR and traps through
362 | trap FPTRAP (15 for the moment).
364 FPTRAP = 15
366 $_exception_handler:
367 PICLEA SYM (_fpCCR),a0
368 movew d7,a0@(EBITS) | set __exception_bits
369 #ifndef __mcoldfire__
370 orw d7,a0@(STICK) | and __sticky_bits
371 #else
372 movew a0@(STICK),d4
373 orl d7,d4
374 movew d4,a0@(STICK)
375 #endif
376 movew d6,a0@(FORMT) | and __format
377 movew d5,a0@(LASTO) | and __last_operation
379 | Now put the operands in place:
380 #ifndef __mcoldfire__
381 cmpw IMM (SINGLE_FLOAT),d6
382 #else
383 cmpl IMM (SINGLE_FLOAT),d6
384 #endif
385 beq 1f
386 movel a6@(8),a0@(OPER1)
387 movel a6@(12),a0@(OPER1+4)
388 movel a6@(16),a0@(OPER2)
389 movel a6@(20),a0@(OPER2+4)
390 bra 2f
391 1: movel a6@(8),a0@(OPER1)
392 movel a6@(12),a0@(OPER2)
394 | And check whether the exception is trap-enabled:
395 #ifndef __mcoldfire__
396 andw a0@(TRAPE),d7 | is exception trap-enabled?
397 #else
398 clrl d6
399 movew a0@(TRAPE),d6
400 andl d6,d7
401 #endif
402 beq 1f | no, exit
403 PICPEA SYM (_fpCCR),a1 | yes, push address of _fpCCR
404 trap IMM (FPTRAP) | and trap
405 #ifndef __mcoldfire__
406 1: moveml sp@+,d2-d7 | restore data registers
407 #else
408 1: moveml sp@,d2-d7
409 | XXX if frame pointer is ever removed, stack pointer must
410 | be adjusted here.
411 #endif
412 unlk a6 | and return
414 #endif /* L_floatex */
416 #ifdef L_mulsi3
417 .text
418 FUNC(__mulsi3)
419 .globl SYM (__mulsi3)
420 SYM (__mulsi3):
421 movew sp@(4), d0 /* x0 -> d0 */
422 muluw sp@(10), d0 /* x0*y1 */
423 movew sp@(6), d1 /* x1 -> d1 */
424 muluw sp@(8), d1 /* x1*y0 */
425 #ifndef __mcoldfire__
426 addw d1, d0
427 #else
428 addl d1, d0
429 #endif
430 swap d0
431 clrw d0
432 movew sp@(6), d1 /* x1 -> d1 */
433 muluw sp@(10), d1 /* x1*y1 */
434 addl d1, d0
437 #endif /* L_mulsi3 */
439 #ifdef L_udivsi3
440 .text
441 FUNC(__udivsi3)
442 .globl SYM (__udivsi3)
443 SYM (__udivsi3):
444 #ifndef __mcoldfire__
445 movel d2, sp@-
446 movel sp@(12), d1 /* d1 = divisor */
447 movel sp@(8), d0 /* d0 = dividend */
449 cmpl IMM (0x10000), d1 /* divisor >= 2 ^ 16 ? */
450 jcc L3 /* then try next algorithm */
451 movel d0, d2
452 clrw d2
453 swap d2
454 divu d1, d2 /* high quotient in lower word */
455 movew d2, d0 /* save high quotient */
456 swap d0
457 movew sp@(10), d2 /* get low dividend + high rest */
458 divu d1, d2 /* low quotient */
459 movew d2, d0
460 jra L6
462 L3: movel d1, d2 /* use d2 as divisor backup */
463 L4: lsrl IMM (1), d1 /* shift divisor */
464 lsrl IMM (1), d0 /* shift dividend */
465 cmpl IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ? */
466 jcc L4
467 divu d1, d0 /* now we have 16-bit divisor */
468 andl IMM (0xffff), d0 /* mask out divisor, ignore remainder */
470 /* Multiply the 16-bit tentative quotient with the 32-bit divisor. Because of
471 the operand ranges, this might give a 33-bit product. If this product is
472 greater than the dividend, the tentative quotient was too large. */
473 movel d2, d1
474 mulu d0, d1 /* low part, 32 bits */
475 swap d2
476 mulu d0, d2 /* high part, at most 17 bits */
477 swap d2 /* align high part with low part */
478 tstw d2 /* high part 17 bits? */
479 jne L5 /* if 17 bits, quotient was too large */
480 addl d2, d1 /* add parts */
481 jcs L5 /* if sum is 33 bits, quotient was too large */
482 cmpl sp@(8), d1 /* compare the sum with the dividend */
483 jls L6 /* if sum > dividend, quotient was too large */
484 L5: subql IMM (1), d0 /* adjust quotient */
486 L6: movel sp@+, d2
489 #else /* __mcoldfire__ */
491 /* ColdFire implementation of non-restoring division algorithm from
492 Hennessy & Patterson, Appendix A. */
493 link a6,IMM (-12)
494 moveml d2-d4,sp@
495 movel a6@(8),d0
496 movel a6@(12),d1
497 clrl d2 | clear p
498 moveq IMM (31),d4
499 L1: addl d0,d0 | shift reg pair (p,a) one bit left
500 addxl d2,d2
501 movl d2,d3 | subtract b from p, store in tmp.
502 subl d1,d3
503 jcs L2 | if no carry,
504 bset IMM (0),d0 | set the low order bit of a to 1,
505 movl d3,d2 | and store tmp in p.
506 L2: subql IMM (1),d4
507 jcc L1
508 moveml sp@,d2-d4 | restore data registers
509 unlk a6 | and return
511 #endif /* __mcoldfire__ */
513 #endif /* L_udivsi3 */
515 #ifdef L_divsi3
516 .text
517 FUNC(__divsi3)
518 .globl SYM (__divsi3)
519 SYM (__divsi3):
520 movel d2, sp@-
522 moveq IMM (1), d2 /* sign of result stored in d2 (=1 or =-1) */
523 movel sp@(12), d1 /* d1 = divisor */
524 jpl L1
525 negl d1
526 #ifndef __mcoldfire__
527 negb d2 /* change sign because divisor <0 */
528 #else
529 negl d2 /* change sign because divisor <0 */
530 #endif
531 L1: movel sp@(8), d0 /* d0 = dividend */
532 jpl L2
533 negl d0
534 #ifndef __mcoldfire__
535 negb d2
536 #else
537 negl d2
538 #endif
540 L2: movel d1, sp@-
541 movel d0, sp@-
542 PICCALL SYM (__udivsi3) /* divide abs(dividend) by abs(divisor) */
543 addql IMM (8), sp
545 tstb d2
546 jpl L3
547 negl d0
549 L3: movel sp@+, d2
551 #endif /* L_divsi3 */
553 #ifdef L_umodsi3
554 .text
555 FUNC(__umodsi3)
556 .globl SYM (__umodsi3)
557 SYM (__umodsi3):
558 movel sp@(8), d1 /* d1 = divisor */
559 movel sp@(4), d0 /* d0 = dividend */
560 movel d1, sp@-
561 movel d0, sp@-
562 PICCALL SYM (__udivsi3)
563 addql IMM (8), sp
564 movel sp@(8), d1 /* d1 = divisor */
565 #ifndef __mcoldfire__
566 movel d1, sp@-
567 movel d0, sp@-
568 PICCALL SYM (__mulsi3) /* d0 = (a/b)*b */
569 addql IMM (8), sp
570 #else
571 mulsl d1,d0
572 #endif
573 movel sp@(4), d1 /* d1 = dividend */
574 subl d0, d1 /* d1 = a - (a/b)*b */
575 movel d1, d0
577 #endif /* L_umodsi3 */
579 #ifdef L_modsi3
580 .text
581 FUNC(__modsi3)
582 .globl SYM (__modsi3)
583 SYM (__modsi3):
584 movel sp@(8), d1 /* d1 = divisor */
585 movel sp@(4), d0 /* d0 = dividend */
586 movel d1, sp@-
587 movel d0, sp@-
588 PICCALL SYM (__divsi3)
589 addql IMM (8), sp
590 movel sp@(8), d1 /* d1 = divisor */
591 #ifndef __mcoldfire__
592 movel d1, sp@-
593 movel d0, sp@-
594 PICCALL SYM (__mulsi3) /* d0 = (a/b)*b */
595 addql IMM (8), sp
596 #else
597 mulsl d1,d0
598 #endif
599 movel sp@(4), d1 /* d1 = dividend */
600 subl d0, d1 /* d1 = a - (a/b)*b */
601 movel d1, d0
603 #endif /* L_modsi3 */
606 #ifdef L_double
608 .globl SYM (_fpCCR)
609 .globl $_exception_handler
611 QUIET_NaN = 0xffffffff
613 D_MAX_EXP = 0x07ff
614 D_BIAS = 1022
615 DBL_MAX_EXP = D_MAX_EXP - D_BIAS
616 DBL_MIN_EXP = 1 - D_BIAS
617 DBL_MANT_DIG = 53
619 INEXACT_RESULT = 0x0001
620 UNDERFLOW = 0x0002
621 OVERFLOW = 0x0004
622 DIVIDE_BY_ZERO = 0x0008
623 INVALID_OPERATION = 0x0010
625 DOUBLE_FLOAT = 2
627 NOOP = 0
628 ADD = 1
629 MULTIPLY = 2
630 DIVIDE = 3
631 NEGATE = 4
632 COMPARE = 5
633 EXTENDSFDF = 6
634 TRUNCDFSF = 7
636 UNKNOWN = -1
637 ROUND_TO_NEAREST = 0 | round result to nearest representable value
638 ROUND_TO_ZERO = 1 | round result towards zero
639 ROUND_TO_PLUS = 2 | round result towards plus infinity
640 ROUND_TO_MINUS = 3 | round result towards minus infinity
642 | Entry points:
644 .globl SYM (__adddf3)
645 .globl SYM (__subdf3)
646 .globl SYM (__muldf3)
647 .globl SYM (__divdf3)
648 .globl SYM (__negdf2)
649 .globl SYM (__cmpdf2)
650 .globl SYM (__cmpdf2_internal)
652 .text
653 .even
655 | These are common routines to return and signal exceptions.
657 Ld$den:
658 | Return and signal a denormalized number
659 orl d7,d0
660 movew IMM (INEXACT_RESULT+UNDERFLOW),d7
661 moveq IMM (DOUBLE_FLOAT),d6
662 PICJUMP $_exception_handler
664 Ld$infty:
665 Ld$overflow:
666 | Return a properly signed INFINITY and set the exception flags
667 movel IMM (0x7ff00000),d0
668 movel IMM (0),d1
669 orl d7,d0
670 movew IMM (INEXACT_RESULT+OVERFLOW),d7
671 moveq IMM (DOUBLE_FLOAT),d6
672 PICJUMP $_exception_handler
674 Ld$underflow:
675 | Return 0 and set the exception flags
676 movel IMM (0),d0
677 movel d0,d1
678 movew IMM (INEXACT_RESULT+UNDERFLOW),d7
679 moveq IMM (DOUBLE_FLOAT),d6
680 PICJUMP $_exception_handler
682 Ld$inop:
683 | Return a quiet NaN and set the exception flags
684 movel IMM (QUIET_NaN),d0
685 movel d0,d1
686 movew IMM (INEXACT_RESULT+INVALID_OPERATION),d7
687 moveq IMM (DOUBLE_FLOAT),d6
688 PICJUMP $_exception_handler
690 Ld$div$0:
691 | Return a properly signed INFINITY and set the exception flags
692 movel IMM (0x7ff00000),d0
693 movel IMM (0),d1
694 orl d7,d0
695 movew IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
696 moveq IMM (DOUBLE_FLOAT),d6
697 PICJUMP $_exception_handler
699 |=============================================================================
700 |=============================================================================
701 | double precision routines
702 |=============================================================================
703 |=============================================================================
705 | A double precision floating point number (double) has the format:
707 | struct _double {
708 | unsigned int sign : 1; /* sign bit */
709 | unsigned int exponent : 11; /* exponent, shifted by 126 */
710 | unsigned int fraction : 52; /* fraction */
711 | } double;
713 | Thus sizeof(double) = 8 (64 bits).
715 | All the routines are callable from C programs, and return the result
716 | in the register pair d0-d1. They also preserve all registers except
717 | d0-d1 and a0-a1.
719 |=============================================================================
720 | __subdf3
721 |=============================================================================
723 | double __subdf3(double, double);
724 FUNC(__subdf3)
725 SYM (__subdf3):
726 bchg IMM (31),sp@(12) | change sign of second operand
727 | and fall through, so we always add
728 |=============================================================================
729 | __adddf3
730 |=============================================================================
732 | double __adddf3(double, double);
733 FUNC(__adddf3)
734 SYM (__adddf3):
735 #ifndef __mcoldfire__
736 link a6,IMM (0) | everything will be done in registers
737 moveml d2-d7,sp@- | save all data registers and a2 (but d0-d1)
738 #else
739 link a6,IMM (-24)
740 moveml d2-d7,sp@
741 #endif
742 movel a6@(8),d0 | get first operand
743 movel a6@(12),d1 |
744 movel a6@(16),d2 | get second operand
745 movel a6@(20),d3 |
747 movel d0,d7 | get d0's sign bit in d7 '
748 addl d1,d1 | check and clear sign bit of a, and gain one
749 addxl d0,d0 | bit of extra precision
750 beq Ladddf$b | if zero return second operand
752 movel d2,d6 | save sign in d6
753 addl d3,d3 | get rid of sign bit and gain one bit of
754 addxl d2,d2 | extra precision
755 beq Ladddf$a | if zero return first operand
757 andl IMM (0x80000000),d7 | isolate a's sign bit '
758 swap d6 | and also b's sign bit '
759 #ifndef __mcoldfire__
760 andw IMM (0x8000),d6 |
761 orw d6,d7 | and combine them into d7, so that a's sign '
762 | bit is in the high word and b's is in the '
763 | low word, so d6 is free to be used
764 #else
765 andl IMM (0x8000),d6
766 orl d6,d7
767 #endif
768 movel d7,a0 | now save d7 into a0, so d7 is free to
769 | be used also
771 | Get the exponents and check for denormalized and/or infinity.
773 movel IMM (0x001fffff),d6 | mask for the fraction
774 movel IMM (0x00200000),d7 | mask to put hidden bit back
776 movel d0,d4 |
777 andl d6,d0 | get fraction in d0
778 notl d6 | make d6 into mask for the exponent
779 andl d6,d4 | get exponent in d4
780 beq Ladddf$a$den | branch if a is denormalized
781 cmpl d6,d4 | check for INFINITY or NaN
782 beq Ladddf$nf |
783 orl d7,d0 | and put hidden bit back
784 Ladddf$1:
785 swap d4 | shift right exponent so that it starts
786 #ifndef __mcoldfire__
787 lsrw IMM (5),d4 | in bit 0 and not bit 20
788 #else
789 lsrl IMM (5),d4 | in bit 0 and not bit 20
790 #endif
791 | Now we have a's exponent in d4 and fraction in d0-d1 '
792 movel d2,d5 | save b to get exponent
793 andl d6,d5 | get exponent in d5
794 beq Ladddf$b$den | branch if b is denormalized
795 cmpl d6,d5 | check for INFINITY or NaN
796 beq Ladddf$nf
797 notl d6 | make d6 into mask for the fraction again
798 andl d6,d2 | and get fraction in d2
799 orl d7,d2 | and put hidden bit back
800 Ladddf$2:
801 swap d5 | shift right exponent so that it starts
802 #ifndef __mcoldfire__
803 lsrw IMM (5),d5 | in bit 0 and not bit 20
804 #else
805 lsrl IMM (5),d5 | in bit 0 and not bit 20
806 #endif
808 | Now we have b's exponent in d5 and fraction in d2-d3. '
810 | The situation now is as follows: the signs are combined in a0, the
811 | numbers are in d0-d1 (a) and d2-d3 (b), and the exponents in d4 (a)
812 | and d5 (b). To do the rounding correctly we need to keep all the
813 | bits until the end, so we need to use d0-d1-d2-d3 for the first number
814 | and d4-d5-d6-d7 for the second. To do this we store (temporarily) the
815 | exponents in a2-a3.
817 #ifndef __mcoldfire__
818 moveml a2-a3,sp@- | save the address registers
819 #else
820 movel a2,sp@-
821 movel a3,sp@-
822 movel a4,sp@-
823 #endif
825 movel d4,a2 | save the exponents
826 movel d5,a3 |
828 movel IMM (0),d7 | and move the numbers around
829 movel d7,d6 |
830 movel d3,d5 |
831 movel d2,d4 |
832 movel d7,d3 |
833 movel d7,d2 |
835 | Here we shift the numbers until the exponents are the same, and put
836 | the largest exponent in a2.
837 #ifndef __mcoldfire__
838 exg d4,a2 | get exponents back
839 exg d5,a3 |
840 cmpw d4,d5 | compare the exponents
841 #else
842 movel d4,a4 | get exponents back
843 movel a2,d4
844 movel a4,a2
845 movel d5,a4
846 movel a3,d5
847 movel a4,a3
848 cmpl d4,d5 | compare the exponents
849 #endif
850 beq Ladddf$3 | if equal don't shift '
851 bhi 9f | branch if second exponent is higher
853 | Here we have a's exponent larger than b's, so we have to shift b. We do
854 | this by using as counter d2:
855 1: movew d4,d2 | move largest exponent to d2
856 #ifndef __mcoldfire__
857 subw d5,d2 | and subtract second exponent
858 exg d4,a2 | get back the longs we saved
859 exg d5,a3 |
860 #else
861 subl d5,d2 | and subtract second exponent
862 movel d4,a4 | get back the longs we saved
863 movel a2,d4
864 movel a4,a2
865 movel d5,a4
866 movel a3,d5
867 movel a4,a3
868 #endif
869 | if difference is too large we don't shift (actually, we can just exit) '
870 #ifndef __mcoldfire__
871 cmpw IMM (DBL_MANT_DIG+2),d2
872 #else
873 cmpl IMM (DBL_MANT_DIG+2),d2
874 #endif
875 bge Ladddf$b$small
876 #ifndef __mcoldfire__
877 cmpw IMM (32),d2 | if difference >= 32, shift by longs
878 #else
879 cmpl IMM (32),d2 | if difference >= 32, shift by longs
880 #endif
881 bge 5f
883 #ifndef __mcoldfire__
884 cmpw IMM (16),d2 | if difference >= 16, shift by words
885 #else
886 cmpl IMM (16),d2 | if difference >= 16, shift by words
887 #endif
888 bge 6f
889 bra 3f | enter dbra loop
892 #ifndef __mcoldfire__
893 lsrl IMM (1),d4
894 roxrl IMM (1),d5
895 roxrl IMM (1),d6
896 roxrl IMM (1),d7
897 #else
898 lsrl IMM (1),d7
899 btst IMM (0),d6
900 beq 10f
901 bset IMM (31),d7
902 10: lsrl IMM (1),d6
903 btst IMM (0),d5
904 beq 11f
905 bset IMM (31),d6
906 11: lsrl IMM (1),d5
907 btst IMM (0),d4
908 beq 12f
909 bset IMM (31),d5
910 12: lsrl IMM (1),d4
911 #endif
913 #ifndef __mcoldfire__
914 dbra d2,4b
915 #else
916 subql IMM (1),d2
917 bpl 4b
918 #endif
919 movel IMM (0),d2
920 movel d2,d3
921 bra Ladddf$4
923 movel d6,d7
924 movel d5,d6
925 movel d4,d5
926 movel IMM (0),d4
927 #ifndef __mcoldfire__
928 subw IMM (32),d2
929 #else
930 subl IMM (32),d2
931 #endif
932 bra 2b
934 movew d6,d7
935 swap d7
936 movew d5,d6
937 swap d6
938 movew d4,d5
939 swap d5
940 movew IMM (0),d4
941 swap d4
942 #ifndef __mcoldfire__
943 subw IMM (16),d2
944 #else
945 subl IMM (16),d2
946 #endif
947 bra 3b
950 #ifndef __mcoldfire__
951 exg d4,d5
952 movew d4,d6
953 subw d5,d6 | keep d5 (largest exponent) in d4
954 exg d4,a2
955 exg d5,a3
956 #else
957 movel d5,d6
958 movel d4,d5
959 movel d6,d4
960 subl d5,d6
961 movel d4,a4
962 movel a2,d4
963 movel a4,a2
964 movel d5,a4
965 movel a3,d5
966 movel a4,a3
967 #endif
968 | if difference is too large we don't shift (actually, we can just exit) '
969 #ifndef __mcoldfire__
970 cmpw IMM (DBL_MANT_DIG+2),d6
971 #else
972 cmpl IMM (DBL_MANT_DIG+2),d6
973 #endif
974 bge Ladddf$a$small
975 #ifndef __mcoldfire__
976 cmpw IMM (32),d6 | if difference >= 32, shift by longs
977 #else
978 cmpl IMM (32),d6 | if difference >= 32, shift by longs
979 #endif
980 bge 5f
982 #ifndef __mcoldfire__
983 cmpw IMM (16),d6 | if difference >= 16, shift by words
984 #else
985 cmpl IMM (16),d6 | if difference >= 16, shift by words
986 #endif
987 bge 6f
988 bra 3f | enter dbra loop
991 #ifndef __mcoldfire__
992 lsrl IMM (1),d0
993 roxrl IMM (1),d1
994 roxrl IMM (1),d2
995 roxrl IMM (1),d3
996 #else
997 lsrl IMM (1),d3
998 btst IMM (0),d2
999 beq 10f
1000 bset IMM (31),d3
1001 10: lsrl IMM (1),d2
1002 btst IMM (0),d1
1003 beq 11f
1004 bset IMM (31),d2
1005 11: lsrl IMM (1),d1
1006 btst IMM (0),d0
1007 beq 12f
1008 bset IMM (31),d1
1009 12: lsrl IMM (1),d0
1010 #endif
1012 #ifndef __mcoldfire__
1013 dbra d6,4b
1014 #else
1015 subql IMM (1),d6
1016 bpl 4b
1017 #endif
1018 movel IMM (0),d7
1019 movel d7,d6
1020 bra Ladddf$4
1022 movel d2,d3
1023 movel d1,d2
1024 movel d0,d1
1025 movel IMM (0),d0
1026 #ifndef __mcoldfire__
1027 subw IMM (32),d6
1028 #else
1029 subl IMM (32),d6
1030 #endif
1031 bra 2b
1033 movew d2,d3
1034 swap d3
1035 movew d1,d2
1036 swap d2
1037 movew d0,d1
1038 swap d1
1039 movew IMM (0),d0
1040 swap d0
1041 #ifndef __mcoldfire__
1042 subw IMM (16),d6
1043 #else
1044 subl IMM (16),d6
1045 #endif
1046 bra 3b
1047 Ladddf$3:
1048 #ifndef __mcoldfire__
1049 exg d4,a2
1050 exg d5,a3
1051 #else
1052 movel d4,a4
1053 movel a2,d4
1054 movel a4,a2
1055 movel d5,a4
1056 movel a3,d5
1057 movel a4,a3
1058 #endif
1059 Ladddf$4:
1060 | Now we have the numbers in d0--d3 and d4--d7, the exponent in a2, and
1061 | the signs in a4.
1063 | Here we have to decide whether to add or subtract the numbers:
1064 #ifndef __mcoldfire__
1065 exg d7,a0 | get the signs
1066 exg d6,a3 | a3 is free to be used
1067 #else
1068 movel d7,a4
1069 movel a0,d7
1070 movel a4,a0
1071 movel d6,a4
1072 movel a3,d6
1073 movel a4,a3
1074 #endif
1075 movel d7,d6 |
1076 movew IMM (0),d7 | get a's sign in d7 '
1077 swap d6 |
1078 movew IMM (0),d6 | and b's sign in d6 '
1079 eorl d7,d6 | compare the signs
1080 bmi Lsubdf$0 | if the signs are different we have
1081 | to subtract
1082 #ifndef __mcoldfire__
1083 exg d7,a0 | else we add the numbers
1084 exg d6,a3 |
1085 #else
1086 movel d7,a4
1087 movel a0,d7
1088 movel a4,a0
1089 movel d6,a4
1090 movel a3,d6
1091 movel a4,a3
1092 #endif
1093 addl d7,d3 |
1094 addxl d6,d2 |
1095 addxl d5,d1 |
1096 addxl d4,d0 |
1098 movel a2,d4 | return exponent to d4
1099 movel a0,d7 |
1100 andl IMM (0x80000000),d7 | d7 now has the sign
1102 #ifndef __mcoldfire__
1103 moveml sp@+,a2-a3
1104 #else
1105 movel sp@+,a4
1106 movel sp@+,a3
1107 movel sp@+,a2
1108 #endif
1110 | Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
1111 | the case of denormalized numbers in the rounding routine itself).
1112 | As in the addition (not in the subtraction!) we could have set
1113 | one more bit we check this:
1114 btst IMM (DBL_MANT_DIG+1),d0
1115 beq 1f
1116 #ifndef __mcoldfire__
1117 lsrl IMM (1),d0
1118 roxrl IMM (1),d1
1119 roxrl IMM (1),d2
1120 roxrl IMM (1),d3
1121 addw IMM (1),d4
1122 #else
1123 lsrl IMM (1),d3
1124 btst IMM (0),d2
1125 beq 10f
1126 bset IMM (31),d3
1127 10: lsrl IMM (1),d2
1128 btst IMM (0),d1
1129 beq 11f
1130 bset IMM (31),d2
1131 11: lsrl IMM (1),d1
1132 btst IMM (0),d0
1133 beq 12f
1134 bset IMM (31),d1
1135 12: lsrl IMM (1),d0
1136 addl IMM (1),d4
1137 #endif
1139 lea pc@(Ladddf$5),a0 | to return from rounding routine
1140 PICLEA SYM (_fpCCR),a1 | check the rounding mode
1141 #ifdef __mcoldfire__
1142 clrl d6
1143 #endif
1144 movew a1@(6),d6 | rounding mode in d6
1145 beq Lround$to$nearest
1146 #ifndef __mcoldfire__
1147 cmpw IMM (ROUND_TO_PLUS),d6
1148 #else
1149 cmpl IMM (ROUND_TO_PLUS),d6
1150 #endif
1151 bhi Lround$to$minus
1152 blt Lround$to$zero
1153 bra Lround$to$plus
1154 Ladddf$5:
1155 | Put back the exponent and check for overflow
1156 #ifndef __mcoldfire__
1157 cmpw IMM (0x7ff),d4 | is the exponent big?
1158 #else
1159 cmpl IMM (0x7ff),d4 | is the exponent big?
1160 #endif
1161 bge 1f
1162 bclr IMM (DBL_MANT_DIG-1),d0
1163 #ifndef __mcoldfire__
1164 lslw IMM (4),d4 | put exponent back into position
1165 #else
1166 lsll IMM (4),d4 | put exponent back into position
1167 #endif
1168 swap d0 |
1169 #ifndef __mcoldfire__
1170 orw d4,d0 |
1171 #else
1172 orl d4,d0 |
1173 #endif
1174 swap d0 |
1175 bra Ladddf$ret
1177 moveq IMM (ADD),d5
1178 bra Ld$overflow
1180 Lsubdf$0:
1181 | Here we do the subtraction.
1182 #ifndef __mcoldfire__
1183 exg d7,a0 | put sign back in a0
1184 exg d6,a3 |
1185 #else
1186 movel d7,a4
1187 movel a0,d7
1188 movel a4,a0
1189 movel d6,a4
1190 movel a3,d6
1191 movel a4,a3
1192 #endif
1193 subl d7,d3 |
1194 subxl d6,d2 |
1195 subxl d5,d1 |
1196 subxl d4,d0 |
1197 beq Ladddf$ret$1 | if zero just exit
1198 bpl 1f | if positive skip the following
1199 movel a0,d7 |
1200 bchg IMM (31),d7 | change sign bit in d7
1201 movel d7,a0 |
1202 negl d3 |
1203 negxl d2 |
1204 negxl d1 | and negate result
1205 negxl d0 |
1207 movel a2,d4 | return exponent to d4
1208 movel a0,d7
1209 andl IMM (0x80000000),d7 | isolate sign bit
1210 #ifndef __mcoldfire__
1211 moveml sp@+,a2-a3 |
1212 #else
1213 movel sp@+,a4
1214 movel sp@+,a3
1215 movel sp@+,a2
1216 #endif
1218 | Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
1219 | the case of denormalized numbers in the rounding routine itself).
1220 | As in the addition (not in the subtraction!) we could have set
1221 | one more bit we check this:
1222 btst IMM (DBL_MANT_DIG+1),d0
1223 beq 1f
1224 #ifndef __mcoldfire__
1225 lsrl IMM (1),d0
1226 roxrl IMM (1),d1
1227 roxrl IMM (1),d2
1228 roxrl IMM (1),d3
1229 addw IMM (1),d4
1230 #else
1231 lsrl IMM (1),d3
1232 btst IMM (0),d2
1233 beq 10f
1234 bset IMM (31),d3
1235 10: lsrl IMM (1),d2
1236 btst IMM (0),d1
1237 beq 11f
1238 bset IMM (31),d2
1239 11: lsrl IMM (1),d1
1240 btst IMM (0),d0
1241 beq 12f
1242 bset IMM (31),d1
1243 12: lsrl IMM (1),d0
1244 addl IMM (1),d4
1245 #endif
1247 lea pc@(Lsubdf$1),a0 | to return from rounding routine
1248 PICLEA SYM (_fpCCR),a1 | check the rounding mode
1249 #ifdef __mcoldfire__
1250 clrl d6
1251 #endif
1252 movew a1@(6),d6 | rounding mode in d6
1253 beq Lround$to$nearest
1254 #ifndef __mcoldfire__
1255 cmpw IMM (ROUND_TO_PLUS),d6
1256 #else
1257 cmpl IMM (ROUND_TO_PLUS),d6
1258 #endif
1259 bhi Lround$to$minus
1260 blt Lround$to$zero
1261 bra Lround$to$plus
1262 Lsubdf$1:
1263 | Put back the exponent and sign (we don't have overflow). '
1264 bclr IMM (DBL_MANT_DIG-1),d0
1265 #ifndef __mcoldfire__
1266 lslw IMM (4),d4 | put exponent back into position
1267 #else
1268 lsll IMM (4),d4 | put exponent back into position
1269 #endif
1270 swap d0 |
1271 #ifndef __mcoldfire__
1272 orw d4,d0 |
1273 #else
1274 orl d4,d0 |
1275 #endif
1276 swap d0 |
1277 bra Ladddf$ret
1279 | If one of the numbers was too small (difference of exponents >=
1280 | DBL_MANT_DIG+1) we return the other (and now we don't have to '
1281 | check for finiteness or zero).
1282 Ladddf$a$small:
1283 #ifndef __mcoldfire__
1284 moveml sp@+,a2-a3
1285 #else
1286 movel sp@+,a4
1287 movel sp@+,a3
1288 movel sp@+,a2
1289 #endif
1290 movel a6@(16),d0
1291 movel a6@(20),d1
1292 PICLEA SYM (_fpCCR),a0
1293 movew IMM (0),a0@
1294 #ifndef __mcoldfire__
1295 moveml sp@+,d2-d7 | restore data registers
1296 #else
1297 moveml sp@,d2-d7
1298 | XXX if frame pointer is ever removed, stack pointer must
1299 | be adjusted here.
1300 #endif
1301 unlk a6 | and return
1304 Ladddf$b$small:
1305 #ifndef __mcoldfire__
1306 moveml sp@+,a2-a3
1307 #else
1308 movel sp@+,a4
1309 movel sp@+,a3
1310 movel sp@+,a2
1311 #endif
1312 movel a6@(8),d0
1313 movel a6@(12),d1
1314 PICLEA SYM (_fpCCR),a0
1315 movew IMM (0),a0@
1316 #ifndef __mcoldfire__
1317 moveml sp@+,d2-d7 | restore data registers
1318 #else
1319 moveml sp@,d2-d7
1320 | XXX if frame pointer is ever removed, stack pointer must
1321 | be adjusted here.
1322 #endif
1323 unlk a6 | and return
1326 Ladddf$a$den:
1327 movel d7,d4 | d7 contains 0x00200000
1328 bra Ladddf$1
1330 Ladddf$b$den:
1331 movel d7,d5 | d7 contains 0x00200000
1332 notl d6
1333 bra Ladddf$2
1335 Ladddf$b:
1336 | Return b (if a is zero)
1337 movel d2,d0
1338 movel d3,d1
1339 bne 1f | Check if b is -0
1340 cmpl IMM (0x80000000),d0
1341 bne 1f
1342 andl IMM (0x80000000),d7 | Use the sign of a
1343 clrl d0
1344 bra Ladddf$ret
1345 Ladddf$a:
1346 movel a6@(8),d0
1347 movel a6@(12),d1
1349 moveq IMM (ADD),d5
1350 | Check for NaN and +/-INFINITY.
1351 movel d0,d7 |
1352 andl IMM (0x80000000),d7 |
1353 bclr IMM (31),d0 |
1354 cmpl IMM (0x7ff00000),d0 |
1355 bge 2f |
1356 movel d0,d0 | check for zero, since we don't '
1357 bne Ladddf$ret | want to return -0 by mistake
1358 bclr IMM (31),d7 |
1359 bra Ladddf$ret |
1361 andl IMM (0x000fffff),d0 | check for NaN (nonzero fraction)
1362 orl d1,d0 |
1363 bne Ld$inop |
1364 bra Ld$infty |
1366 Ladddf$ret$1:
1367 #ifndef __mcoldfire__
1368 moveml sp@+,a2-a3 | restore regs and exit
1369 #else
1370 movel sp@+,a4
1371 movel sp@+,a3
1372 movel sp@+,a2
1373 #endif
1375 Ladddf$ret:
1376 | Normal exit.
1377 PICLEA SYM (_fpCCR),a0
1378 movew IMM (0),a0@
1379 orl d7,d0 | put sign bit back
1380 #ifndef __mcoldfire__
1381 moveml sp@+,d2-d7
1382 #else
1383 moveml sp@,d2-d7
1384 | XXX if frame pointer is ever removed, stack pointer must
1385 | be adjusted here.
1386 #endif
1387 unlk a6
1390 Ladddf$ret$den:
1391 | Return a denormalized number.
1392 #ifndef __mcoldfire__
1393 lsrl IMM (1),d0 | shift right once more
1394 roxrl IMM (1),d1 |
1395 #else
1396 lsrl IMM (1),d1
1397 btst IMM (0),d0
1398 beq 10f
1399 bset IMM (31),d1
1400 10: lsrl IMM (1),d0
1401 #endif
1402 bra Ladddf$ret
1404 Ladddf$nf:
1405 moveq IMM (ADD),d5
1406 | This could be faster but it is not worth the effort, since it is not
1407 | executed very often. We sacrifice speed for clarity here.
1408 movel a6@(8),d0 | get the numbers back (remember that we
1409 movel a6@(12),d1 | did some processing already)
1410 movel a6@(16),d2 |
1411 movel a6@(20),d3 |
1412 movel IMM (0x7ff00000),d4 | useful constant (INFINITY)
1413 movel d0,d7 | save sign bits
1414 movel d2,d6 |
1415 bclr IMM (31),d0 | clear sign bits
1416 bclr IMM (31),d2 |
1417 | We know that one of them is either NaN of +/-INFINITY
1418 | Check for NaN (if either one is NaN return NaN)
1419 cmpl d4,d0 | check first a (d0)
1420 bhi Ld$inop | if d0 > 0x7ff00000 or equal and
1421 bne 2f
1422 tstl d1 | d1 > 0, a is NaN
1423 bne Ld$inop |
1424 2: cmpl d4,d2 | check now b (d1)
1425 bhi Ld$inop |
1426 bne 3f
1427 tstl d3 |
1428 bne Ld$inop |
1430 | Now comes the check for +/-INFINITY. We know that both are (maybe not
1431 | finite) numbers, but we have to check if both are infinite whether we
1432 | are adding or subtracting them.
1433 eorl d7,d6 | to check sign bits
1434 bmi 1f
1435 andl IMM (0x80000000),d7 | get (common) sign bit
1436 bra Ld$infty
1438 | We know one (or both) are infinite, so we test for equality between the
1439 | two numbers (if they are equal they have to be infinite both, so we
1440 | return NaN).
1441 cmpl d2,d0 | are both infinite?
1442 bne 1f | if d0 <> d2 they are not equal
1443 cmpl d3,d1 | if d0 == d2 test d3 and d1
1444 beq Ld$inop | if equal return NaN
1446 andl IMM (0x80000000),d7 | get a's sign bit '
1447 cmpl d4,d0 | test now for infinity
1448 beq Ld$infty | if a is INFINITY return with this sign
1449 bchg IMM (31),d7 | else we know b is INFINITY and has
1450 bra Ld$infty | the opposite sign
1452 |=============================================================================
1453 | __muldf3
1454 |=============================================================================
1456 | double __muldf3(double, double);
1457 FUNC(__muldf3)
1458 SYM (__muldf3):
1459 #ifndef __mcoldfire__
1460 link a6,IMM (0)
1461 moveml d2-d7,sp@-
1462 #else
1463 link a6,IMM (-24)
1464 moveml d2-d7,sp@
1465 #endif
1466 movel a6@(8),d0 | get a into d0-d1
1467 movel a6@(12),d1 |
1468 movel a6@(16),d2 | and b into d2-d3
1469 movel a6@(20),d3 |
1470 movel d0,d7 | d7 will hold the sign of the product
1471 eorl d2,d7 |
1472 andl IMM (0x80000000),d7 |
1473 movel d7,a0 | save sign bit into a0
1474 movel IMM (0x7ff00000),d7 | useful constant (+INFINITY)
1475 movel d7,d6 | another (mask for fraction)
1476 notl d6 |
1477 bclr IMM (31),d0 | get rid of a's sign bit '
1478 movel d0,d4 |
1479 orl d1,d4 |
1480 beq Lmuldf$a$0 | branch if a is zero
1481 movel d0,d4 |
1482 bclr IMM (31),d2 | get rid of b's sign bit '
1483 movel d2,d5 |
1484 orl d3,d5 |
1485 beq Lmuldf$b$0 | branch if b is zero
1486 movel d2,d5 |
1487 cmpl d7,d0 | is a big?
1488 bhi Lmuldf$inop | if a is NaN return NaN
1489 beq Lmuldf$a$nf | we still have to check d1 and b ...
1490 cmpl d7,d2 | now compare b with INFINITY
1491 bhi Lmuldf$inop | is b NaN?
1492 beq Lmuldf$b$nf | we still have to check d3 ...
1493 | Here we have both numbers finite and nonzero (and with no sign bit).
1494 | Now we get the exponents into d4 and d5.
1495 andl d7,d4 | isolate exponent in d4
1496 beq Lmuldf$a$den | if exponent zero, have denormalized
1497 andl d6,d0 | isolate fraction
1498 orl IMM (0x00100000),d0 | and put hidden bit back
1499 swap d4 | I like exponents in the first byte
1500 #ifndef __mcoldfire__
1501 lsrw IMM (4),d4 |
1502 #else
1503 lsrl IMM (4),d4 |
1504 #endif
1505 Lmuldf$1:
1506 andl d7,d5 |
1507 beq Lmuldf$b$den |
1508 andl d6,d2 |
1509 orl IMM (0x00100000),d2 | and put hidden bit back
1510 swap d5 |
1511 #ifndef __mcoldfire__
1512 lsrw IMM (4),d5 |
1513 #else
1514 lsrl IMM (4),d5 |
1515 #endif
1516 Lmuldf$2: |
1517 #ifndef __mcoldfire__
1518 addw d5,d4 | add exponents
1519 subw IMM (D_BIAS+1),d4 | and subtract bias (plus one)
1520 #else
1521 addl d5,d4 | add exponents
1522 subl IMM (D_BIAS+1),d4 | and subtract bias (plus one)
1523 #endif
1525 | We are now ready to do the multiplication. The situation is as follows:
1526 | both a and b have bit 52 ( bit 20 of d0 and d2) set (even if they were
1527 | denormalized to start with!), which means that in the product bit 104
1528 | (which will correspond to bit 8 of the fourth long) is set.
1530 | Here we have to do the product.
1531 | To do it we have to juggle the registers back and forth, as there are not
1532 | enough to keep everything in them. So we use the address registers to keep
1533 | some intermediate data.
1535 #ifndef __mcoldfire__
1536 moveml a2-a3,sp@- | save a2 and a3 for temporary use
1537 #else
1538 movel a2,sp@-
1539 movel a3,sp@-
1540 movel a4,sp@-
1541 #endif
1542 movel IMM (0),a2 | a2 is a null register
1543 movel d4,a3 | and a3 will preserve the exponent
1545 | First, shift d2-d3 so bit 20 becomes bit 31:
1546 #ifndef __mcoldfire__
1547 rorl IMM (5),d2 | rotate d2 5 places right
1548 swap d2 | and swap it
1549 rorl IMM (5),d3 | do the same thing with d3
1550 swap d3 |
1551 movew d3,d6 | get the rightmost 11 bits of d3
1552 andw IMM (0x07ff),d6 |
1553 orw d6,d2 | and put them into d2
1554 andw IMM (0xf800),d3 | clear those bits in d3
1555 #else
1556 moveq IMM (11),d7 | left shift d2 11 bits
1557 lsll d7,d2
1558 movel d3,d6 | get a copy of d3
1559 lsll d7,d3 | left shift d3 11 bits
1560 andl IMM (0xffe00000),d6 | get the top 11 bits of d3
1561 moveq IMM (21),d7 | right shift them 21 bits
1562 lsrl d7,d6
1563 orl d6,d2 | stick them at the end of d2
1564 #endif
1566 movel d2,d6 | move b into d6-d7
1567 movel d3,d7 | move a into d4-d5
1568 movel d0,d4 | and clear d0-d1-d2-d3 (to put result)
1569 movel d1,d5 |
1570 movel IMM (0),d3 |
1571 movel d3,d2 |
1572 movel d3,d1 |
1573 movel d3,d0 |
1575 | We use a1 as counter:
1576 movel IMM (DBL_MANT_DIG-1),a1
1577 #ifndef __mcoldfire__
1578 exg d7,a1
1579 #else
1580 movel d7,a4
1581 movel a1,d7
1582 movel a4,a1
1583 #endif
1586 #ifndef __mcoldfire__
1587 exg d7,a1 | put counter back in a1
1588 #else
1589 movel d7,a4
1590 movel a1,d7
1591 movel a4,a1
1592 #endif
1593 addl d3,d3 | shift sum once left
1594 addxl d2,d2 |
1595 addxl d1,d1 |
1596 addxl d0,d0 |
1597 addl d7,d7 |
1598 addxl d6,d6 |
1599 bcc 2f | if bit clear skip the following
1600 #ifndef __mcoldfire__
1601 exg d7,a2 |
1602 #else
1603 movel d7,a4
1604 movel a2,d7
1605 movel a4,a2
1606 #endif
1607 addl d5,d3 | else add a to the sum
1608 addxl d4,d2 |
1609 addxl d7,d1 |
1610 addxl d7,d0 |
1611 #ifndef __mcoldfire__
1612 exg d7,a2 |
1613 #else
1614 movel d7,a4
1615 movel a2,d7
1616 movel a4,a2
1617 #endif
1619 #ifndef __mcoldfire__
1620 exg d7,a1 | put counter in d7
1621 dbf d7,1b | decrement and branch
1622 #else
1623 movel d7,a4
1624 movel a1,d7
1625 movel a4,a1
1626 subql IMM (1),d7
1627 bpl 1b
1628 #endif
1630 movel a3,d4 | restore exponent
1631 #ifndef __mcoldfire__
1632 moveml sp@+,a2-a3
1633 #else
1634 movel sp@+,a4
1635 movel sp@+,a3
1636 movel sp@+,a2
1637 #endif
1639 | Now we have the product in d0-d1-d2-d3, with bit 8 of d0 set. The
1640 | first thing to do now is to normalize it so bit 8 becomes bit
1641 | DBL_MANT_DIG-32 (to do the rounding); later we will shift right.
1642 swap d0
1643 swap d1
1644 movew d1,d0
1645 swap d2
1646 movew d2,d1
1647 swap d3
1648 movew d3,d2
1649 movew IMM (0),d3
1650 #ifndef __mcoldfire__
1651 lsrl IMM (1),d0
1652 roxrl IMM (1),d1
1653 roxrl IMM (1),d2
1654 roxrl IMM (1),d3
1655 lsrl IMM (1),d0
1656 roxrl IMM (1),d1
1657 roxrl IMM (1),d2
1658 roxrl IMM (1),d3
1659 lsrl IMM (1),d0
1660 roxrl IMM (1),d1
1661 roxrl IMM (1),d2
1662 roxrl IMM (1),d3
1663 #else
1664 moveq IMM (29),d6
1665 lsrl IMM (3),d3
1666 movel d2,d7
1667 lsll d6,d7
1668 orl d7,d3
1669 lsrl IMM (3),d2
1670 movel d1,d7
1671 lsll d6,d7
1672 orl d7,d2
1673 lsrl IMM (3),d1
1674 movel d0,d7
1675 lsll d6,d7
1676 orl d7,d1
1677 lsrl IMM (3),d0
1678 #endif
1680 | Now round, check for over- and underflow, and exit.
1681 movel a0,d7 | get sign bit back into d7
1682 moveq IMM (MULTIPLY),d5
1684 btst IMM (DBL_MANT_DIG+1-32),d0
1685 beq Lround$exit
1686 #ifndef __mcoldfire__
1687 lsrl IMM (1),d0
1688 roxrl IMM (1),d1
1689 addw IMM (1),d4
1690 #else
1691 lsrl IMM (1),d1
1692 btst IMM (0),d0
1693 beq 10f
1694 bset IMM (31),d1
1695 10: lsrl IMM (1),d0
1696 addl IMM (1),d4
1697 #endif
1698 bra Lround$exit
1700 Lmuldf$inop:
1701 moveq IMM (MULTIPLY),d5
1702 bra Ld$inop
1704 Lmuldf$b$nf:
1705 moveq IMM (MULTIPLY),d5
1706 movel a0,d7 | get sign bit back into d7
1707 tstl d3 | we know d2 == 0x7ff00000, so check d3
1708 bne Ld$inop | if d3 <> 0 b is NaN
1709 bra Ld$overflow | else we have overflow (since a is finite)
1711 Lmuldf$a$nf:
1712 moveq IMM (MULTIPLY),d5
1713 movel a0,d7 | get sign bit back into d7
1714 tstl d1 | we know d0 == 0x7ff00000, so check d1
1715 bne Ld$inop | if d1 <> 0 a is NaN
1716 bra Ld$overflow | else signal overflow
1718 | If either number is zero return zero, unless the other is +/-INFINITY or
1719 | NaN, in which case we return NaN.
1720 Lmuldf$b$0:
1721 moveq IMM (MULTIPLY),d5
1722 #ifndef __mcoldfire__
1723 exg d2,d0 | put b (==0) into d0-d1
1724 exg d3,d1 | and a (with sign bit cleared) into d2-d3
1725 movel a0,d0 | set result sign
1726 #else
1727 movel d0,d2 | put a into d2-d3
1728 movel d1,d3
1729 movel a0,d0 | put result zero into d0-d1
1730 movq IMM(0),d1
1731 #endif
1732 bra 1f
1733 Lmuldf$a$0:
1734 movel a0,d0 | set result sign
1735 movel a6@(16),d2 | put b into d2-d3 again
1736 movel a6@(20),d3 |
1737 bclr IMM (31),d2 | clear sign bit
1738 1: cmpl IMM (0x7ff00000),d2 | check for non-finiteness
1739 bge Ld$inop | in case NaN or +/-INFINITY return NaN
1740 PICLEA SYM (_fpCCR),a0
1741 movew IMM (0),a0@
1742 #ifndef __mcoldfire__
1743 moveml sp@+,d2-d7
1744 #else
1745 moveml sp@,d2-d7
1746 | XXX if frame pointer is ever removed, stack pointer must
1747 | be adjusted here.
1748 #endif
1749 unlk a6
1752 | If a number is denormalized we put an exponent of 1 but do not put the
1753 | hidden bit back into the fraction; instead we shift left until bit 21
1754 | (the hidden bit) is set, adjusting the exponent accordingly. We do this
1755 | to ensure that the product of the fractions is close to 1.
1756 Lmuldf$a$den:
1757 movel IMM (1),d4
1758 andl d6,d0
1759 1: addl d1,d1 | shift a left until bit 20 is set
1760 addxl d0,d0 |
1761 #ifndef __mcoldfire__
1762 subw IMM (1),d4 | and adjust exponent
1763 #else
1764 subl IMM (1),d4 | and adjust exponent
1765 #endif
1766 btst IMM (20),d0 |
1767 bne Lmuldf$1 |
1768 bra 1b
1770 Lmuldf$b$den:
1771 movel IMM (1),d5
1772 andl d6,d2
1773 1: addl d3,d3 | shift b left until bit 20 is set
1774 addxl d2,d2 |
1775 #ifndef __mcoldfire__
1776 subw IMM (1),d5 | and adjust exponent
1777 #else
1778 subql IMM (1),d5 | and adjust exponent
1779 #endif
1780 btst IMM (20),d2 |
1781 bne Lmuldf$2 |
1782 bra 1b
1785 |=============================================================================
1786 | __divdf3
1787 |=============================================================================
1789 | double __divdf3(double, double);
1790 FUNC(__divdf3)
1791 SYM (__divdf3):
1792 #ifndef __mcoldfire__
1793 link a6,IMM (0)
1794 moveml d2-d7,sp@-
1795 #else
1796 link a6,IMM (-24)
1797 moveml d2-d7,sp@
1798 #endif
1799 movel a6@(8),d0 | get a into d0-d1
1800 movel a6@(12),d1 |
1801 movel a6@(16),d2 | and b into d2-d3
1802 movel a6@(20),d3 |
1803 movel d0,d7 | d7 will hold the sign of the result
1804 eorl d2,d7 |
1805 andl IMM (0x80000000),d7
1806 movel d7,a0 | save sign into a0
1807 movel IMM (0x7ff00000),d7 | useful constant (+INFINITY)
1808 movel d7,d6 | another (mask for fraction)
1809 notl d6 |
1810 bclr IMM (31),d0 | get rid of a's sign bit '
1811 movel d0,d4 |
1812 orl d1,d4 |
1813 beq Ldivdf$a$0 | branch if a is zero
1814 movel d0,d4 |
1815 bclr IMM (31),d2 | get rid of b's sign bit '
1816 movel d2,d5 |
1817 orl d3,d5 |
1818 beq Ldivdf$b$0 | branch if b is zero
1819 movel d2,d5
1820 cmpl d7,d0 | is a big?
1821 bhi Ldivdf$inop | if a is NaN return NaN
1822 beq Ldivdf$a$nf | if d0 == 0x7ff00000 we check d1
1823 cmpl d7,d2 | now compare b with INFINITY
1824 bhi Ldivdf$inop | if b is NaN return NaN
1825 beq Ldivdf$b$nf | if d2 == 0x7ff00000 we check d3
1826 | Here we have both numbers finite and nonzero (and with no sign bit).
1827 | Now we get the exponents into d4 and d5 and normalize the numbers to
1828 | ensure that the ratio of the fractions is around 1. We do this by
1829 | making sure that both numbers have bit #DBL_MANT_DIG-32-1 (hidden bit)
1830 | set, even if they were denormalized to start with.
1831 | Thus, the result will satisfy: 2 > result > 1/2.
1832 andl d7,d4 | and isolate exponent in d4
1833 beq Ldivdf$a$den | if exponent is zero we have a denormalized
1834 andl d6,d0 | and isolate fraction
1835 orl IMM (0x00100000),d0 | and put hidden bit back
1836 swap d4 | I like exponents in the first byte
1837 #ifndef __mcoldfire__
1838 lsrw IMM (4),d4 |
1839 #else
1840 lsrl IMM (4),d4 |
1841 #endif
1842 Ldivdf$1: |
1843 andl d7,d5 |
1844 beq Ldivdf$b$den |
1845 andl d6,d2 |
1846 orl IMM (0x00100000),d2
1847 swap d5 |
1848 #ifndef __mcoldfire__
1849 lsrw IMM (4),d5 |
1850 #else
1851 lsrl IMM (4),d5 |
1852 #endif
1853 Ldivdf$2: |
1854 #ifndef __mcoldfire__
1855 subw d5,d4 | subtract exponents
1856 addw IMM (D_BIAS),d4 | and add bias
1857 #else
1858 subl d5,d4 | subtract exponents
1859 addl IMM (D_BIAS),d4 | and add bias
1860 #endif
1862 | We are now ready to do the division. We have prepared things in such a way
1863 | that the ratio of the fractions will be less than 2 but greater than 1/2.
1864 | At this point the registers in use are:
1865 | d0-d1 hold a (first operand, bit DBL_MANT_DIG-32=0, bit
1866 | DBL_MANT_DIG-1-32=1)
1867 | d2-d3 hold b (second operand, bit DBL_MANT_DIG-32=1)
1868 | d4 holds the difference of the exponents, corrected by the bias
1869 | a0 holds the sign of the ratio
1871 | To do the rounding correctly we need to keep information about the
1872 | nonsignificant bits. One way to do this would be to do the division
1873 | using four registers; another is to use two registers (as originally
1874 | I did), but use a sticky bit to preserve information about the
1875 | fractional part. Note that we can keep that info in a1, which is not
1876 | used.
1877 movel IMM (0),d6 | d6-d7 will hold the result
1878 movel d6,d7 |
1879 movel IMM (0),a1 | and a1 will hold the sticky bit
1881 movel IMM (DBL_MANT_DIG-32+1),d5
1883 1: cmpl d0,d2 | is a < b?
1884 bhi 3f | if b > a skip the following
1885 beq 4f | if d0==d2 check d1 and d3
1886 2: subl d3,d1 |
1887 subxl d2,d0 | a <-- a - b
1888 bset d5,d6 | set the corresponding bit in d6
1889 3: addl d1,d1 | shift a by 1
1890 addxl d0,d0 |
1891 #ifndef __mcoldfire__
1892 dbra d5,1b | and branch back
1893 #else
1894 subql IMM (1), d5
1895 bpl 1b
1896 #endif
1897 bra 5f
1898 4: cmpl d1,d3 | here d0==d2, so check d1 and d3
1899 bhi 3b | if d1 > d2 skip the subtraction
1900 bra 2b | else go do it
1902 | Here we have to start setting the bits in the second long.
1903 movel IMM (31),d5 | again d5 is counter
1905 1: cmpl d0,d2 | is a < b?
1906 bhi 3f | if b > a skip the following
1907 beq 4f | if d0==d2 check d1 and d3
1908 2: subl d3,d1 |
1909 subxl d2,d0 | a <-- a - b
1910 bset d5,d7 | set the corresponding bit in d7
1911 3: addl d1,d1 | shift a by 1
1912 addxl d0,d0 |
1913 #ifndef __mcoldfire__
1914 dbra d5,1b | and branch back
1915 #else
1916 subql IMM (1), d5
1917 bpl 1b
1918 #endif
1919 bra 5f
1920 4: cmpl d1,d3 | here d0==d2, so check d1 and d3
1921 bhi 3b | if d1 > d2 skip the subtraction
1922 bra 2b | else go do it
1924 | Now go ahead checking until we hit a one, which we store in d2.
1925 movel IMM (DBL_MANT_DIG),d5
1926 1: cmpl d2,d0 | is a < b?
1927 bhi 4f | if b < a, exit
1928 beq 3f | if d0==d2 check d1 and d3
1929 2: addl d1,d1 | shift a by 1
1930 addxl d0,d0 |
1931 #ifndef __mcoldfire__
1932 dbra d5,1b | and branch back
1933 #else
1934 subql IMM (1), d5
1935 bpl 1b
1936 #endif
1937 movel IMM (0),d2 | here no sticky bit was found
1938 movel d2,d3
1939 bra 5f
1940 3: cmpl d1,d3 | here d0==d2, so check d1 and d3
1941 bhi 2b | if d1 > d2 go back
1943 | Here put the sticky bit in d2-d3 (in the position which actually corresponds
1944 | to it; if you don't do this the algorithm loses in some cases). '
1945 movel IMM (0),d2
1946 movel d2,d3
1947 #ifndef __mcoldfire__
1948 subw IMM (DBL_MANT_DIG),d5
1949 addw IMM (63),d5
1950 cmpw IMM (31),d5
1951 #else
1952 subl IMM (DBL_MANT_DIG),d5
1953 addl IMM (63),d5
1954 cmpl IMM (31),d5
1955 #endif
1956 bhi 2f
1957 1: bset d5,d3
1958 bra 5f
1959 #ifndef __mcoldfire__
1960 subw IMM (32),d5
1961 #else
1962 subl IMM (32),d5
1963 #endif
1964 2: bset d5,d2
1966 | Finally we are finished! Move the longs in the address registers to
1967 | their final destination:
1968 movel d6,d0
1969 movel d7,d1
1970 movel IMM (0),d3
1972 | Here we have finished the division, with the result in d0-d1-d2-d3, with
1973 | 2^21 <= d6 < 2^23. Thus bit 23 is not set, but bit 22 could be set.
1974 | If it is not, then definitely bit 21 is set. Normalize so bit 22 is
1975 | not set:
1976 btst IMM (DBL_MANT_DIG-32+1),d0
1977 beq 1f
1978 #ifndef __mcoldfire__
1979 lsrl IMM (1),d0
1980 roxrl IMM (1),d1
1981 roxrl IMM (1),d2
1982 roxrl IMM (1),d3
1983 addw IMM (1),d4
1984 #else
1985 lsrl IMM (1),d3
1986 btst IMM (0),d2
1987 beq 10f
1988 bset IMM (31),d3
1989 10: lsrl IMM (1),d2
1990 btst IMM (0),d1
1991 beq 11f
1992 bset IMM (31),d2
1993 11: lsrl IMM (1),d1
1994 btst IMM (0),d0
1995 beq 12f
1996 bset IMM (31),d1
1997 12: lsrl IMM (1),d0
1998 addl IMM (1),d4
1999 #endif
2001 | Now round, check for over- and underflow, and exit.
2002 movel a0,d7 | restore sign bit to d7
2003 moveq IMM (DIVIDE),d5
2004 bra Lround$exit
2006 Ldivdf$inop:
2007 moveq IMM (DIVIDE),d5
2008 bra Ld$inop
2010 Ldivdf$a$0:
2011 | If a is zero check to see whether b is zero also. In that case return
2012 | NaN; then check if b is NaN, and return NaN also in that case. Else
2013 | return a properly signed zero.
2014 moveq IMM (DIVIDE),d5
2015 bclr IMM (31),d2 |
2016 movel d2,d4 |
2017 orl d3,d4 |
2018 beq Ld$inop | if b is also zero return NaN
2019 cmpl IMM (0x7ff00000),d2 | check for NaN
2020 bhi Ld$inop |
2021 blt 1f |
2022 tstl d3 |
2023 bne Ld$inop |
2024 1: movel a0,d0 | else return signed zero
2025 moveq IMM(0),d1 |
2026 PICLEA SYM (_fpCCR),a0 | clear exception flags
2027 movew IMM (0),a0@ |
2028 #ifndef __mcoldfire__
2029 moveml sp@+,d2-d7 |
2030 #else
2031 moveml sp@,d2-d7 |
2032 | XXX if frame pointer is ever removed, stack pointer must
2033 | be adjusted here.
2034 #endif
2035 unlk a6 |
2036 rts |
2038 Ldivdf$b$0:
2039 moveq IMM (DIVIDE),d5
2040 | If we got here a is not zero. Check if a is NaN; in that case return NaN,
2041 | else return +/-INFINITY. Remember that a is in d0 with the sign bit
2042 | cleared already.
2043 movel a0,d7 | put a's sign bit back in d7 '
2044 cmpl IMM (0x7ff00000),d0 | compare d0 with INFINITY
2045 bhi Ld$inop | if larger it is NaN
2046 tstl d1 |
2047 bne Ld$inop |
2048 bra Ld$div$0 | else signal DIVIDE_BY_ZERO
2050 Ldivdf$b$nf:
2051 moveq IMM (DIVIDE),d5
2052 | If d2 == 0x7ff00000 we have to check d3.
2053 tstl d3 |
2054 bne Ld$inop | if d3 <> 0, b is NaN
2055 bra Ld$underflow | else b is +/-INFINITY, so signal underflow
2057 Ldivdf$a$nf:
2058 moveq IMM (DIVIDE),d5
2059 | If d0 == 0x7ff00000 we have to check d1.
2060 tstl d1 |
2061 bne Ld$inop | if d1 <> 0, a is NaN
2062 | If a is INFINITY we have to check b
2063 cmpl d7,d2 | compare b with INFINITY
2064 bge Ld$inop | if b is NaN or INFINITY return NaN
2065 tstl d3 |
2066 bne Ld$inop |
2067 bra Ld$overflow | else return overflow
2069 | If a number is denormalized we put an exponent of 1 but do not put the
2070 | bit back into the fraction.
2071 Ldivdf$a$den:
2072 movel IMM (1),d4
2073 andl d6,d0
2074 1: addl d1,d1 | shift a left until bit 20 is set
2075 addxl d0,d0
2076 #ifndef __mcoldfire__
2077 subw IMM (1),d4 | and adjust exponent
2078 #else
2079 subl IMM (1),d4 | and adjust exponent
2080 #endif
2081 btst IMM (DBL_MANT_DIG-32-1),d0
2082 bne Ldivdf$1
2083 bra 1b
2085 Ldivdf$b$den:
2086 movel IMM (1),d5
2087 andl d6,d2
2088 1: addl d3,d3 | shift b left until bit 20 is set
2089 addxl d2,d2
2090 #ifndef __mcoldfire__
2091 subw IMM (1),d5 | and adjust exponent
2092 #else
2093 subql IMM (1),d5 | and adjust exponent
2094 #endif
2095 btst IMM (DBL_MANT_DIG-32-1),d2
2096 bne Ldivdf$2
2097 bra 1b
2099 Lround$exit:
2100 | This is a common exit point for __muldf3 and __divdf3. When they enter
2101 | this point the sign of the result is in d7, the result in d0-d1, normalized
2102 | so that 2^21 <= d0 < 2^22, and the exponent is in the lower byte of d4.
2104 | First check for underlow in the exponent:
2105 #ifndef __mcoldfire__
2106 cmpw IMM (-DBL_MANT_DIG-1),d4
2107 #else
2108 cmpl IMM (-DBL_MANT_DIG-1),d4
2109 #endif
2110 blt Ld$underflow
2111 | It could happen that the exponent is less than 1, in which case the
2112 | number is denormalized. In this case we shift right and adjust the
2113 | exponent until it becomes 1 or the fraction is zero (in the latter case
2114 | we signal underflow and return zero).
2115 movel d7,a0 |
2116 movel IMM (0),d6 | use d6-d7 to collect bits flushed right
2117 movel d6,d7 | use d6-d7 to collect bits flushed right
2118 #ifndef __mcoldfire__
2119 cmpw IMM (1),d4 | if the exponent is less than 1 we
2120 #else
2121 cmpl IMM (1),d4 | if the exponent is less than 1 we
2122 #endif
2123 bge 2f | have to shift right (denormalize)
2125 #ifndef __mcoldfire__
2126 addw IMM (1),d4 | adjust the exponent
2127 lsrl IMM (1),d0 | shift right once
2128 roxrl IMM (1),d1 |
2129 roxrl IMM (1),d2 |
2130 roxrl IMM (1),d3 |
2131 roxrl IMM (1),d6 |
2132 roxrl IMM (1),d7 |
2133 cmpw IMM (1),d4 | is the exponent 1 already?
2134 #else
2135 addl IMM (1),d4 | adjust the exponent
2136 lsrl IMM (1),d7
2137 btst IMM (0),d6
2138 beq 13f
2139 bset IMM (31),d7
2140 13: lsrl IMM (1),d6
2141 btst IMM (0),d3
2142 beq 14f
2143 bset IMM (31),d6
2144 14: lsrl IMM (1),d3
2145 btst IMM (0),d2
2146 beq 10f
2147 bset IMM (31),d3
2148 10: lsrl IMM (1),d2
2149 btst IMM (0),d1
2150 beq 11f
2151 bset IMM (31),d2
2152 11: lsrl IMM (1),d1
2153 btst IMM (0),d0
2154 beq 12f
2155 bset IMM (31),d1
2156 12: lsrl IMM (1),d0
2157 cmpl IMM (1),d4 | is the exponent 1 already?
2158 #endif
2159 beq 2f | if not loop back
2160 bra 1b |
2161 bra Ld$underflow | safety check, shouldn't execute '
2162 2: orl d6,d2 | this is a trick so we don't lose '
2163 orl d7,d3 | the bits which were flushed right
2164 movel a0,d7 | get back sign bit into d7
2165 | Now call the rounding routine (which takes care of denormalized numbers):
2166 lea pc@(Lround$0),a0 | to return from rounding routine
2167 PICLEA SYM (_fpCCR),a1 | check the rounding mode
2168 #ifdef __mcoldfire__
2169 clrl d6
2170 #endif
2171 movew a1@(6),d6 | rounding mode in d6
2172 beq Lround$to$nearest
2173 #ifndef __mcoldfire__
2174 cmpw IMM (ROUND_TO_PLUS),d6
2175 #else
2176 cmpl IMM (ROUND_TO_PLUS),d6
2177 #endif
2178 bhi Lround$to$minus
2179 blt Lround$to$zero
2180 bra Lround$to$plus
2181 Lround$0:
2182 | Here we have a correctly rounded result (either normalized or denormalized).
2184 | Here we should have either a normalized number or a denormalized one, and
2185 | the exponent is necessarily larger or equal to 1 (so we don't have to '
2186 | check again for underflow!). We have to check for overflow or for a
2187 | denormalized number (which also signals underflow).
2188 | Check for overflow (i.e., exponent >= 0x7ff).
2189 #ifndef __mcoldfire__
2190 cmpw IMM (0x07ff),d4
2191 #else
2192 cmpl IMM (0x07ff),d4
2193 #endif
2194 bge Ld$overflow
2195 | Now check for a denormalized number (exponent==0):
2196 movew d4,d4
2197 beq Ld$den
2199 | Put back the exponents and sign and return.
2200 #ifndef __mcoldfire__
2201 lslw IMM (4),d4 | exponent back to fourth byte
2202 #else
2203 lsll IMM (4),d4 | exponent back to fourth byte
2204 #endif
2205 bclr IMM (DBL_MANT_DIG-32-1),d0
2206 swap d0 | and put back exponent
2207 #ifndef __mcoldfire__
2208 orw d4,d0 |
2209 #else
2210 orl d4,d0 |
2211 #endif
2212 swap d0 |
2213 orl d7,d0 | and sign also
2215 PICLEA SYM (_fpCCR),a0
2216 movew IMM (0),a0@
2217 #ifndef __mcoldfire__
2218 moveml sp@+,d2-d7
2219 #else
2220 moveml sp@,d2-d7
2221 | XXX if frame pointer is ever removed, stack pointer must
2222 | be adjusted here.
2223 #endif
2224 unlk a6
2227 |=============================================================================
2228 | __negdf2
2229 |=============================================================================
2231 | double __negdf2(double, double);
2232 FUNC(__negdf2)
2233 SYM (__negdf2):
2234 #ifndef __mcoldfire__
2235 link a6,IMM (0)
2236 moveml d2-d7,sp@-
2237 #else
2238 link a6,IMM (-24)
2239 moveml d2-d7,sp@
2240 #endif
2241 moveq IMM (NEGATE),d5
2242 movel a6@(8),d0 | get number to negate in d0-d1
2243 movel a6@(12),d1 |
2244 bchg IMM (31),d0 | negate
2245 movel d0,d2 | make a positive copy (for the tests)
2246 bclr IMM (31),d2 |
2247 movel d2,d4 | check for zero
2248 orl d1,d4 |
2249 beq 2f | if zero (either sign) return +zero
2250 cmpl IMM (0x7ff00000),d2 | compare to +INFINITY
2251 blt 1f | if finite, return
2252 bhi Ld$inop | if larger (fraction not zero) is NaN
2253 tstl d1 | if d2 == 0x7ff00000 check d1
2254 bne Ld$inop |
2255 movel d0,d7 | else get sign and return INFINITY
2256 andl IMM (0x80000000),d7
2257 bra Ld$infty
2258 1: PICLEA SYM (_fpCCR),a0
2259 movew IMM (0),a0@
2260 #ifndef __mcoldfire__
2261 moveml sp@+,d2-d7
2262 #else
2263 moveml sp@,d2-d7
2264 | XXX if frame pointer is ever removed, stack pointer must
2265 | be adjusted here.
2266 #endif
2267 unlk a6
2269 2: bclr IMM (31),d0
2270 bra 1b
2272 |=============================================================================
2273 | __cmpdf2
2274 |=============================================================================
2276 GREATER = 1
2277 LESS = -1
2278 EQUAL = 0
2280 | int __cmpdf2_internal(double, double, int);
2281 SYM (__cmpdf2_internal):
2282 #ifndef __mcoldfire__
2283 link a6,IMM (0)
2284 moveml d2-d7,sp@- | save registers
2285 #else
2286 link a6,IMM (-24)
2287 moveml d2-d7,sp@
2288 #endif
2289 moveq IMM (COMPARE),d5
2290 movel a6@(8),d0 | get first operand
2291 movel a6@(12),d1 |
2292 movel a6@(16),d2 | get second operand
2293 movel a6@(20),d3 |
2294 | First check if a and/or b are (+/-) zero and in that case clear
2295 | the sign bit.
2296 movel d0,d6 | copy signs into d6 (a) and d7(b)
2297 bclr IMM (31),d0 | and clear signs in d0 and d2
2298 movel d2,d7 |
2299 bclr IMM (31),d2 |
2300 cmpl IMM (0x7ff00000),d0 | check for a == NaN
2301 bhi Lcmpd$inop | if d0 > 0x7ff00000, a is NaN
2302 beq Lcmpdf$a$nf | if equal can be INFINITY, so check d1
2303 movel d0,d4 | copy into d4 to test for zero
2304 orl d1,d4 |
2305 beq Lcmpdf$a$0 |
2306 Lcmpdf$0:
2307 cmpl IMM (0x7ff00000),d2 | check for b == NaN
2308 bhi Lcmpd$inop | if d2 > 0x7ff00000, b is NaN
2309 beq Lcmpdf$b$nf | if equal can be INFINITY, so check d3
2310 movel d2,d4 |
2311 orl d3,d4 |
2312 beq Lcmpdf$b$0 |
2313 Lcmpdf$1:
2314 | Check the signs
2315 eorl d6,d7
2316 bpl 1f
2317 | If the signs are not equal check if a >= 0
2318 tstl d6
2319 bpl Lcmpdf$a$gt$b | if (a >= 0 && b < 0) => a > b
2320 bmi Lcmpdf$b$gt$a | if (a < 0 && b >= 0) => a < b
2322 | If the signs are equal check for < 0
2323 tstl d6
2324 bpl 1f
2325 | If both are negative exchange them
2326 #ifndef __mcoldfire__
2327 exg d0,d2
2328 exg d1,d3
2329 #else
2330 movel d0,d7
2331 movel d2,d0
2332 movel d7,d2
2333 movel d1,d7
2334 movel d3,d1
2335 movel d7,d3
2336 #endif
2338 | Now that they are positive we just compare them as longs (does this also
2339 | work for denormalized numbers?).
2340 cmpl d0,d2
2341 bhi Lcmpdf$b$gt$a | |b| > |a|
2342 bne Lcmpdf$a$gt$b | |b| < |a|
2343 | If we got here d0 == d2, so we compare d1 and d3.
2344 cmpl d1,d3
2345 bhi Lcmpdf$b$gt$a | |b| > |a|
2346 bne Lcmpdf$a$gt$b | |b| < |a|
2347 | If we got here a == b.
2348 movel IMM (EQUAL),d0
2349 #ifndef __mcoldfire__
2350 moveml sp@+,d2-d7 | put back the registers
2351 #else
2352 moveml sp@,d2-d7
2353 | XXX if frame pointer is ever removed, stack pointer must
2354 | be adjusted here.
2355 #endif
2356 unlk a6
2358 Lcmpdf$a$gt$b:
2359 movel IMM (GREATER),d0
2360 #ifndef __mcoldfire__
2361 moveml sp@+,d2-d7 | put back the registers
2362 #else
2363 moveml sp@,d2-d7
2364 | XXX if frame pointer is ever removed, stack pointer must
2365 | be adjusted here.
2366 #endif
2367 unlk a6
2369 Lcmpdf$b$gt$a:
2370 movel IMM (LESS),d0
2371 #ifndef __mcoldfire__
2372 moveml sp@+,d2-d7 | put back the registers
2373 #else
2374 moveml sp@,d2-d7
2375 | XXX if frame pointer is ever removed, stack pointer must
2376 | be adjusted here.
2377 #endif
2378 unlk a6
2381 Lcmpdf$a$0:
2382 bclr IMM (31),d6
2383 bra Lcmpdf$0
2384 Lcmpdf$b$0:
2385 bclr IMM (31),d7
2386 bra Lcmpdf$1
2388 Lcmpdf$a$nf:
2389 tstl d1
2390 bne Ld$inop
2391 bra Lcmpdf$0
2393 Lcmpdf$b$nf:
2394 tstl d3
2395 bne Ld$inop
2396 bra Lcmpdf$1
2398 Lcmpd$inop:
2399 movl a6@(24),d0
2400 moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
2401 moveq IMM (DOUBLE_FLOAT),d6
2402 PICJUMP $_exception_handler
2404 | int __cmpdf2(double, double);
2405 FUNC(__cmpdf2)
2406 SYM (__cmpdf2):
2407 link a6,IMM (0)
2408 pea 1
2409 movl a6@(20),sp@-
2410 movl a6@(16),sp@-
2411 movl a6@(12),sp@-
2412 movl a6@(8),sp@-
2413 bsr SYM (__cmpdf2_internal)
2414 unlk a6
2417 |=============================================================================
2418 | rounding routines
2419 |=============================================================================
2421 | The rounding routines expect the number to be normalized in registers
2422 | d0-d1-d2-d3, with the exponent in register d4. They assume that the
2423 | exponent is larger or equal to 1. They return a properly normalized number
2424 | if possible, and a denormalized number otherwise. The exponent is returned
2425 | in d4.
2427 Lround$to$nearest:
2428 | We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
2429 | Here we assume that the exponent is not too small (this should be checked
2430 | before entering the rounding routine), but the number could be denormalized.
2432 | Check for denormalized numbers:
2433 1: btst IMM (DBL_MANT_DIG-32),d0
2434 bne 2f | if set the number is normalized
2435 | Normalize shifting left until bit #DBL_MANT_DIG-32 is set or the exponent
2436 | is one (remember that a denormalized number corresponds to an
2437 | exponent of -D_BIAS+1).
2438 #ifndef __mcoldfire__
2439 cmpw IMM (1),d4 | remember that the exponent is at least one
2440 #else
2441 cmpl IMM (1),d4 | remember that the exponent is at least one
2442 #endif
2443 beq 2f | an exponent of one means denormalized
2444 addl d3,d3 | else shift and adjust the exponent
2445 addxl d2,d2 |
2446 addxl d1,d1 |
2447 addxl d0,d0 |
2448 #ifndef __mcoldfire__
2449 dbra d4,1b |
2450 #else
2451 subql IMM (1), d4
2452 bpl 1b
2453 #endif
2455 | Now round: we do it as follows: after the shifting we can write the
2456 | fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
2457 | If delta < 1, do nothing. If delta > 1, add 1 to f.
2458 | If delta == 1, we make sure the rounded number will be even (odd?)
2459 | (after shifting).
2460 btst IMM (0),d1 | is delta < 1?
2461 beq 2f | if so, do not do anything
2462 orl d2,d3 | is delta == 1?
2463 bne 1f | if so round to even
2464 movel d1,d3 |
2465 andl IMM (2),d3 | bit 1 is the last significant bit
2466 movel IMM (0),d2 |
2467 addl d3,d1 |
2468 addxl d2,d0 |
2469 bra 2f |
2470 1: movel IMM (1),d3 | else add 1
2471 movel IMM (0),d2 |
2472 addl d3,d1 |
2473 addxl d2,d0
2474 | Shift right once (because we used bit #DBL_MANT_DIG-32!).
2476 #ifndef __mcoldfire__
2477 lsrl IMM (1),d0
2478 roxrl IMM (1),d1
2479 #else
2480 lsrl IMM (1),d1
2481 btst IMM (0),d0
2482 beq 10f
2483 bset IMM (31),d1
2484 10: lsrl IMM (1),d0
2485 #endif
2487 | Now check again bit #DBL_MANT_DIG-32 (rounding could have produced a
2488 | 'fraction overflow' ...).
2489 btst IMM (DBL_MANT_DIG-32),d0
2490 beq 1f
2491 #ifndef __mcoldfire__
2492 lsrl IMM (1),d0
2493 roxrl IMM (1),d1
2494 addw IMM (1),d4
2495 #else
2496 lsrl IMM (1),d1
2497 btst IMM (0),d0
2498 beq 10f
2499 bset IMM (31),d1
2500 10: lsrl IMM (1),d0
2501 addl IMM (1),d4
2502 #endif
2504 | If bit #DBL_MANT_DIG-32-1 is clear we have a denormalized number, so we
2505 | have to put the exponent to zero and return a denormalized number.
2506 btst IMM (DBL_MANT_DIG-32-1),d0
2507 beq 1f
2508 jmp a0@
2509 1: movel IMM (0),d4
2510 jmp a0@
2512 Lround$to$zero:
2513 Lround$to$plus:
2514 Lround$to$minus:
2515 jmp a0@
2516 #endif /* L_double */
2518 #ifdef L_float
2520 .globl SYM (_fpCCR)
2521 .globl $_exception_handler
2523 QUIET_NaN = 0xffffffff
2524 SIGNL_NaN = 0x7f800001
2525 INFINITY = 0x7f800000
2527 F_MAX_EXP = 0xff
2528 F_BIAS = 126
2529 FLT_MAX_EXP = F_MAX_EXP - F_BIAS
2530 FLT_MIN_EXP = 1 - F_BIAS
2531 FLT_MANT_DIG = 24
2533 INEXACT_RESULT = 0x0001
2534 UNDERFLOW = 0x0002
2535 OVERFLOW = 0x0004
2536 DIVIDE_BY_ZERO = 0x0008
2537 INVALID_OPERATION = 0x0010
2539 SINGLE_FLOAT = 1
2541 NOOP = 0
2542 ADD = 1
2543 MULTIPLY = 2
2544 DIVIDE = 3
2545 NEGATE = 4
2546 COMPARE = 5
2547 EXTENDSFDF = 6
2548 TRUNCDFSF = 7
2550 UNKNOWN = -1
2551 ROUND_TO_NEAREST = 0 | round result to nearest representable value
2552 ROUND_TO_ZERO = 1 | round result towards zero
2553 ROUND_TO_PLUS = 2 | round result towards plus infinity
2554 ROUND_TO_MINUS = 3 | round result towards minus infinity
2556 | Entry points:
2558 .globl SYM (__addsf3)
2559 .globl SYM (__subsf3)
2560 .globl SYM (__mulsf3)
2561 .globl SYM (__divsf3)
2562 .globl SYM (__negsf2)
2563 .globl SYM (__cmpsf2)
2564 .globl SYM (__cmpsf2_internal)
2566 | These are common routines to return and signal exceptions.
2568 .text
2569 .even
2571 Lf$den:
2572 | Return and signal a denormalized number
2573 orl d7,d0
2574 moveq IMM (INEXACT_RESULT+UNDERFLOW),d7
2575 moveq IMM (SINGLE_FLOAT),d6
2576 PICJUMP $_exception_handler
2578 Lf$infty:
2579 Lf$overflow:
2580 | Return a properly signed INFINITY and set the exception flags
2581 movel IMM (INFINITY),d0
2582 orl d7,d0
2583 moveq IMM (INEXACT_RESULT+OVERFLOW),d7
2584 moveq IMM (SINGLE_FLOAT),d6
2585 PICJUMP $_exception_handler
2587 Lf$underflow:
2588 | Return 0 and set the exception flags
2589 moveq IMM (0),d0
2590 moveq IMM (INEXACT_RESULT+UNDERFLOW),d7
2591 moveq IMM (SINGLE_FLOAT),d6
2592 PICJUMP $_exception_handler
2594 Lf$inop:
2595 | Return a quiet NaN and set the exception flags
2596 movel IMM (QUIET_NaN),d0
2597 moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
2598 moveq IMM (SINGLE_FLOAT),d6
2599 PICJUMP $_exception_handler
2601 Lf$div$0:
2602 | Return a properly signed INFINITY and set the exception flags
2603 movel IMM (INFINITY),d0
2604 orl d7,d0
2605 moveq IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
2606 moveq IMM (SINGLE_FLOAT),d6
2607 PICJUMP $_exception_handler
2609 |=============================================================================
2610 |=============================================================================
2611 | single precision routines
2612 |=============================================================================
2613 |=============================================================================
2615 | A single precision floating point number (float) has the format:
2617 | struct _float {
2618 | unsigned int sign : 1; /* sign bit */
2619 | unsigned int exponent : 8; /* exponent, shifted by 126 */
2620 | unsigned int fraction : 23; /* fraction */
2621 | } float;
2623 | Thus sizeof(float) = 4 (32 bits).
2625 | All the routines are callable from C programs, and return the result
2626 | in the single register d0. They also preserve all registers except
2627 | d0-d1 and a0-a1.
2629 |=============================================================================
2630 | __subsf3
2631 |=============================================================================
2633 | float __subsf3(float, float);
2634 FUNC(__subsf3)
2635 SYM (__subsf3):
2636 bchg IMM (31),sp@(8) | change sign of second operand
2637 | and fall through
2638 |=============================================================================
2639 | __addsf3
2640 |=============================================================================
2642 | float __addsf3(float, float);
2643 FUNC(__addsf3)
2644 SYM (__addsf3):
2645 #ifndef __mcoldfire__
2646 link a6,IMM (0) | everything will be done in registers
2647 moveml d2-d7,sp@- | save all data registers but d0-d1
2648 #else
2649 link a6,IMM (-24)
2650 moveml d2-d7,sp@
2651 #endif
2652 movel a6@(8),d0 | get first operand
2653 movel a6@(12),d1 | get second operand
2654 movel d0,a0 | get d0's sign bit '
2655 addl d0,d0 | check and clear sign bit of a
2656 beq Laddsf$b | if zero return second operand
2657 movel d1,a1 | save b's sign bit '
2658 addl d1,d1 | get rid of sign bit
2659 beq Laddsf$a | if zero return first operand
2661 | Get the exponents and check for denormalized and/or infinity.
2663 movel IMM (0x00ffffff),d4 | mask to get fraction
2664 movel IMM (0x01000000),d5 | mask to put hidden bit back
2666 movel d0,d6 | save a to get exponent
2667 andl d4,d0 | get fraction in d0
2668 notl d4 | make d4 into a mask for the exponent
2669 andl d4,d6 | get exponent in d6
2670 beq Laddsf$a$den | branch if a is denormalized
2671 cmpl d4,d6 | check for INFINITY or NaN
2672 beq Laddsf$nf
2673 swap d6 | put exponent into first word
2674 orl d5,d0 | and put hidden bit back
2675 Laddsf$1:
2676 | Now we have a's exponent in d6 (second byte) and the mantissa in d0. '
2677 movel d1,d7 | get exponent in d7
2678 andl d4,d7 |
2679 beq Laddsf$b$den | branch if b is denormalized
2680 cmpl d4,d7 | check for INFINITY or NaN
2681 beq Laddsf$nf
2682 swap d7 | put exponent into first word
2683 notl d4 | make d4 into a mask for the fraction
2684 andl d4,d1 | get fraction in d1
2685 orl d5,d1 | and put hidden bit back
2686 Laddsf$2:
2687 | Now we have b's exponent in d7 (second byte) and the mantissa in d1. '
2689 | Note that the hidden bit corresponds to bit #FLT_MANT_DIG-1, and we
2690 | shifted right once, so bit #FLT_MANT_DIG is set (so we have one extra
2691 | bit).
2693 movel d1,d2 | move b to d2, since we want to use
2694 | two registers to do the sum
2695 movel IMM (0),d1 | and clear the new ones
2696 movel d1,d3 |
2698 | Here we shift the numbers in registers d0 and d1 so the exponents are the
2699 | same, and put the largest exponent in d6. Note that we are using two
2700 | registers for each number (see the discussion by D. Knuth in "Seminumerical
2701 | Algorithms").
2702 #ifndef __mcoldfire__
2703 cmpw d6,d7 | compare exponents
2704 #else
2705 cmpl d6,d7 | compare exponents
2706 #endif
2707 beq Laddsf$3 | if equal don't shift '
2708 bhi 5f | branch if second exponent largest
2710 subl d6,d7 | keep the largest exponent
2711 negl d7
2712 #ifndef __mcoldfire__
2713 lsrw IMM (8),d7 | put difference in lower byte
2714 #else
2715 lsrl IMM (8),d7 | put difference in lower byte
2716 #endif
2717 | if difference is too large we don't shift (actually, we can just exit) '
2718 #ifndef __mcoldfire__
2719 cmpw IMM (FLT_MANT_DIG+2),d7
2720 #else
2721 cmpl IMM (FLT_MANT_DIG+2),d7
2722 #endif
2723 bge Laddsf$b$small
2724 #ifndef __mcoldfire__
2725 cmpw IMM (16),d7 | if difference >= 16 swap
2726 #else
2727 cmpl IMM (16),d7 | if difference >= 16 swap
2728 #endif
2729 bge 4f
2731 #ifndef __mcoldfire__
2732 subw IMM (1),d7
2733 #else
2734 subql IMM (1), d7
2735 #endif
2737 #ifndef __mcoldfire__
2738 lsrl IMM (1),d2 | shift right second operand
2739 roxrl IMM (1),d3
2740 dbra d7,3b
2741 #else
2742 lsrl IMM (1),d3
2743 btst IMM (0),d2
2744 beq 10f
2745 bset IMM (31),d3
2746 10: lsrl IMM (1),d2
2747 subql IMM (1), d7
2748 bpl 3b
2749 #endif
2750 bra Laddsf$3
2752 movew d2,d3
2753 swap d3
2754 movew d3,d2
2755 swap d2
2756 #ifndef __mcoldfire__
2757 subw IMM (16),d7
2758 #else
2759 subl IMM (16),d7
2760 #endif
2761 bne 2b | if still more bits, go back to normal case
2762 bra Laddsf$3
2764 #ifndef __mcoldfire__
2765 exg d6,d7 | exchange the exponents
2766 #else
2767 eorl d6,d7
2768 eorl d7,d6
2769 eorl d6,d7
2770 #endif
2771 subl d6,d7 | keep the largest exponent
2772 negl d7 |
2773 #ifndef __mcoldfire__
2774 lsrw IMM (8),d7 | put difference in lower byte
2775 #else
2776 lsrl IMM (8),d7 | put difference in lower byte
2777 #endif
2778 | if difference is too large we don't shift (and exit!) '
2779 #ifndef __mcoldfire__
2780 cmpw IMM (FLT_MANT_DIG+2),d7
2781 #else
2782 cmpl IMM (FLT_MANT_DIG+2),d7
2783 #endif
2784 bge Laddsf$a$small
2785 #ifndef __mcoldfire__
2786 cmpw IMM (16),d7 | if difference >= 16 swap
2787 #else
2788 cmpl IMM (16),d7 | if difference >= 16 swap
2789 #endif
2790 bge 8f
2792 #ifndef __mcoldfire__
2793 subw IMM (1),d7
2794 #else
2795 subl IMM (1),d7
2796 #endif
2798 #ifndef __mcoldfire__
2799 lsrl IMM (1),d0 | shift right first operand
2800 roxrl IMM (1),d1
2801 dbra d7,7b
2802 #else
2803 lsrl IMM (1),d1
2804 btst IMM (0),d0
2805 beq 10f
2806 bset IMM (31),d1
2807 10: lsrl IMM (1),d0
2808 subql IMM (1),d7
2809 bpl 7b
2810 #endif
2811 bra Laddsf$3
2813 movew d0,d1
2814 swap d1
2815 movew d1,d0
2816 swap d0
2817 #ifndef __mcoldfire__
2818 subw IMM (16),d7
2819 #else
2820 subl IMM (16),d7
2821 #endif
2822 bne 6b | if still more bits, go back to normal case
2823 | otherwise we fall through
2825 | Now we have a in d0-d1, b in d2-d3, and the largest exponent in d6 (the
2826 | signs are stored in a0 and a1).
2828 Laddsf$3:
2829 | Here we have to decide whether to add or subtract the numbers
2830 #ifndef __mcoldfire__
2831 exg d6,a0 | get signs back
2832 exg d7,a1 | and save the exponents
2833 #else
2834 movel d6,d4
2835 movel a0,d6
2836 movel d4,a0
2837 movel d7,d4
2838 movel a1,d7
2839 movel d4,a1
2840 #endif
2841 eorl d6,d7 | combine sign bits
2842 bmi Lsubsf$0 | if negative a and b have opposite
2843 | sign so we actually subtract the
2844 | numbers
2846 | Here we have both positive or both negative
2847 #ifndef __mcoldfire__
2848 exg d6,a0 | now we have the exponent in d6
2849 #else
2850 movel d6,d4
2851 movel a0,d6
2852 movel d4,a0
2853 #endif
2854 movel a0,d7 | and sign in d7
2855 andl IMM (0x80000000),d7
2856 | Here we do the addition.
2857 addl d3,d1
2858 addxl d2,d0
2859 | Note: now we have d2, d3, d4 and d5 to play with!
2861 | Put the exponent, in the first byte, in d2, to use the "standard" rounding
2862 | routines:
2863 movel d6,d2
2864 #ifndef __mcoldfire__
2865 lsrw IMM (8),d2
2866 #else
2867 lsrl IMM (8),d2
2868 #endif
2870 | Before rounding normalize so bit #FLT_MANT_DIG is set (we will consider
2871 | the case of denormalized numbers in the rounding routine itself).
2872 | As in the addition (not in the subtraction!) we could have set
2873 | one more bit we check this:
2874 btst IMM (FLT_MANT_DIG+1),d0
2875 beq 1f
2876 #ifndef __mcoldfire__
2877 lsrl IMM (1),d0
2878 roxrl IMM (1),d1
2879 #else
2880 lsrl IMM (1),d1
2881 btst IMM (0),d0
2882 beq 10f
2883 bset IMM (31),d1
2884 10: lsrl IMM (1),d0
2885 #endif
2886 addl IMM (1),d2
2888 lea pc@(Laddsf$4),a0 | to return from rounding routine
2889 PICLEA SYM (_fpCCR),a1 | check the rounding mode
2890 #ifdef __mcoldfire__
2891 clrl d6
2892 #endif
2893 movew a1@(6),d6 | rounding mode in d6
2894 beq Lround$to$nearest
2895 #ifndef __mcoldfire__
2896 cmpw IMM (ROUND_TO_PLUS),d6
2897 #else
2898 cmpl IMM (ROUND_TO_PLUS),d6
2899 #endif
2900 bhi Lround$to$minus
2901 blt Lround$to$zero
2902 bra Lround$to$plus
2903 Laddsf$4:
2904 | Put back the exponent, but check for overflow.
2905 #ifndef __mcoldfire__
2906 cmpw IMM (0xff),d2
2907 #else
2908 cmpl IMM (0xff),d2
2909 #endif
2910 bhi 1f
2911 bclr IMM (FLT_MANT_DIG-1),d0
2912 #ifndef __mcoldfire__
2913 lslw IMM (7),d2
2914 #else
2915 lsll IMM (7),d2
2916 #endif
2917 swap d2
2918 orl d2,d0
2919 bra Laddsf$ret
2921 moveq IMM (ADD),d5
2922 bra Lf$overflow
2924 Lsubsf$0:
2925 | We are here if a > 0 and b < 0 (sign bits cleared).
2926 | Here we do the subtraction.
2927 movel d6,d7 | put sign in d7
2928 andl IMM (0x80000000),d7
2930 subl d3,d1 | result in d0-d1
2931 subxl d2,d0 |
2932 beq Laddsf$ret | if zero just exit
2933 bpl 1f | if positive skip the following
2934 bchg IMM (31),d7 | change sign bit in d7
2935 negl d1
2936 negxl d0
2938 #ifndef __mcoldfire__
2939 exg d2,a0 | now we have the exponent in d2
2940 lsrw IMM (8),d2 | put it in the first byte
2941 #else
2942 movel d2,d4
2943 movel a0,d2
2944 movel d4,a0
2945 lsrl IMM (8),d2 | put it in the first byte
2946 #endif
2948 | Now d0-d1 is positive and the sign bit is in d7.
2950 | Note that we do not have to normalize, since in the subtraction bit
2951 | #FLT_MANT_DIG+1 is never set, and denormalized numbers are handled by
2952 | the rounding routines themselves.
2953 lea pc@(Lsubsf$1),a0 | to return from rounding routine
2954 PICLEA SYM (_fpCCR),a1 | check the rounding mode
2955 #ifdef __mcoldfire__
2956 clrl d6
2957 #endif
2958 movew a1@(6),d6 | rounding mode in d6
2959 beq Lround$to$nearest
2960 #ifndef __mcoldfire__
2961 cmpw IMM (ROUND_TO_PLUS),d6
2962 #else
2963 cmpl IMM (ROUND_TO_PLUS),d6
2964 #endif
2965 bhi Lround$to$minus
2966 blt Lround$to$zero
2967 bra Lround$to$plus
2968 Lsubsf$1:
2969 | Put back the exponent (we can't have overflow!). '
2970 bclr IMM (FLT_MANT_DIG-1),d0
2971 #ifndef __mcoldfire__
2972 lslw IMM (7),d2
2973 #else
2974 lsll IMM (7),d2
2975 #endif
2976 swap d2
2977 orl d2,d0
2978 bra Laddsf$ret
2980 | If one of the numbers was too small (difference of exponents >=
2981 | FLT_MANT_DIG+2) we return the other (and now we don't have to '
2982 | check for finiteness or zero).
2983 Laddsf$a$small:
2984 movel a6@(12),d0
2985 PICLEA SYM (_fpCCR),a0
2986 movew IMM (0),a0@
2987 #ifndef __mcoldfire__
2988 moveml sp@+,d2-d7 | restore data registers
2989 #else
2990 moveml sp@,d2-d7
2991 | XXX if frame pointer is ever removed, stack pointer must
2992 | be adjusted here.
2993 #endif
2994 unlk a6 | and return
2997 Laddsf$b$small:
2998 movel a6@(8),d0
2999 PICLEA SYM (_fpCCR),a0
3000 movew IMM (0),a0@
3001 #ifndef __mcoldfire__
3002 moveml sp@+,d2-d7 | restore data registers
3003 #else
3004 moveml sp@,d2-d7
3005 | XXX if frame pointer is ever removed, stack pointer must
3006 | be adjusted here.
3007 #endif
3008 unlk a6 | and return
3011 | If the numbers are denormalized remember to put exponent equal to 1.
3013 Laddsf$a$den:
3014 movel d5,d6 | d5 contains 0x01000000
3015 swap d6
3016 bra Laddsf$1
3018 Laddsf$b$den:
3019 movel d5,d7
3020 swap d7
3021 notl d4 | make d4 into a mask for the fraction
3022 | (this was not executed after the jump)
3023 bra Laddsf$2
3025 | The rest is mainly code for the different results which can be
3026 | returned (checking always for +/-INFINITY and NaN).
3028 Laddsf$b:
3029 | Return b (if a is zero).
3030 movel a6@(12),d0
3031 cmpl IMM (0x80000000),d0 | Check if b is -0
3032 bne 1f
3033 movel a0,d7
3034 andl IMM (0x80000000),d7 | Use the sign of a
3035 clrl d0
3036 bra Laddsf$ret
3037 Laddsf$a:
3038 | Return a (if b is zero).
3039 movel a6@(8),d0
3041 moveq IMM (ADD),d5
3042 | We have to check for NaN and +/-infty.
3043 movel d0,d7
3044 andl IMM (0x80000000),d7 | put sign in d7
3045 bclr IMM (31),d0 | clear sign
3046 cmpl IMM (INFINITY),d0 | check for infty or NaN
3047 bge 2f
3048 movel d0,d0 | check for zero (we do this because we don't '
3049 bne Laddsf$ret | want to return -0 by mistake
3050 bclr IMM (31),d7 | if zero be sure to clear sign
3051 bra Laddsf$ret | if everything OK just return
3053 | The value to be returned is either +/-infty or NaN
3054 andl IMM (0x007fffff),d0 | check for NaN
3055 bne Lf$inop | if mantissa not zero is NaN
3056 bra Lf$infty
3058 Laddsf$ret:
3059 | Normal exit (a and b nonzero, result is not NaN nor +/-infty).
3060 | We have to clear the exception flags (just the exception type).
3061 PICLEA SYM (_fpCCR),a0
3062 movew IMM (0),a0@
3063 orl d7,d0 | put sign bit
3064 #ifndef __mcoldfire__
3065 moveml sp@+,d2-d7 | restore data registers
3066 #else
3067 moveml sp@,d2-d7
3068 | XXX if frame pointer is ever removed, stack pointer must
3069 | be adjusted here.
3070 #endif
3071 unlk a6 | and return
3074 Laddsf$ret$den:
3075 | Return a denormalized number (for addition we don't signal underflow) '
3076 lsrl IMM (1),d0 | remember to shift right back once
3077 bra Laddsf$ret | and return
3079 | Note: when adding two floats of the same sign if either one is
3080 | NaN we return NaN without regard to whether the other is finite or
3081 | not. When subtracting them (i.e., when adding two numbers of
3082 | opposite signs) things are more complicated: if both are INFINITY
3083 | we return NaN, if only one is INFINITY and the other is NaN we return
3084 | NaN, but if it is finite we return INFINITY with the corresponding sign.
3086 Laddsf$nf:
3087 moveq IMM (ADD),d5
3088 | This could be faster but it is not worth the effort, since it is not
3089 | executed very often. We sacrifice speed for clarity here.
3090 movel a6@(8),d0 | get the numbers back (remember that we
3091 movel a6@(12),d1 | did some processing already)
3092 movel IMM (INFINITY),d4 | useful constant (INFINITY)
3093 movel d0,d2 | save sign bits
3094 movel d1,d3
3095 bclr IMM (31),d0 | clear sign bits
3096 bclr IMM (31),d1
3097 | We know that one of them is either NaN of +/-INFINITY
3098 | Check for NaN (if either one is NaN return NaN)
3099 cmpl d4,d0 | check first a (d0)
3100 bhi Lf$inop
3101 cmpl d4,d1 | check now b (d1)
3102 bhi Lf$inop
3103 | Now comes the check for +/-INFINITY. We know that both are (maybe not
3104 | finite) numbers, but we have to check if both are infinite whether we
3105 | are adding or subtracting them.
3106 eorl d3,d2 | to check sign bits
3107 bmi 1f
3108 movel d0,d7
3109 andl IMM (0x80000000),d7 | get (common) sign bit
3110 bra Lf$infty
3112 | We know one (or both) are infinite, so we test for equality between the
3113 | two numbers (if they are equal they have to be infinite both, so we
3114 | return NaN).
3115 cmpl d1,d0 | are both infinite?
3116 beq Lf$inop | if so return NaN
3118 movel d0,d7
3119 andl IMM (0x80000000),d7 | get a's sign bit '
3120 cmpl d4,d0 | test now for infinity
3121 beq Lf$infty | if a is INFINITY return with this sign
3122 bchg IMM (31),d7 | else we know b is INFINITY and has
3123 bra Lf$infty | the opposite sign
3125 |=============================================================================
3126 | __mulsf3
3127 |=============================================================================
3129 | float __mulsf3(float, float);
3130 FUNC(__mulsf3)
3131 SYM (__mulsf3):
3132 #ifndef __mcoldfire__
3133 link a6,IMM (0)
3134 moveml d2-d7,sp@-
3135 #else
3136 link a6,IMM (-24)
3137 moveml d2-d7,sp@
3138 #endif
3139 movel a6@(8),d0 | get a into d0
3140 movel a6@(12),d1 | and b into d1
3141 movel d0,d7 | d7 will hold the sign of the product
3142 eorl d1,d7 |
3143 andl IMM (0x80000000),d7
3144 movel IMM (INFINITY),d6 | useful constant (+INFINITY)
3145 movel d6,d5 | another (mask for fraction)
3146 notl d5 |
3147 movel IMM (0x00800000),d4 | this is to put hidden bit back
3148 bclr IMM (31),d0 | get rid of a's sign bit '
3149 movel d0,d2 |
3150 beq Lmulsf$a$0 | branch if a is zero
3151 bclr IMM (31),d1 | get rid of b's sign bit '
3152 movel d1,d3 |
3153 beq Lmulsf$b$0 | branch if b is zero
3154 cmpl d6,d0 | is a big?
3155 bhi Lmulsf$inop | if a is NaN return NaN
3156 beq Lmulsf$inf | if a is INFINITY we have to check b
3157 cmpl d6,d1 | now compare b with INFINITY
3158 bhi Lmulsf$inop | is b NaN?
3159 beq Lmulsf$overflow | is b INFINITY?
3160 | Here we have both numbers finite and nonzero (and with no sign bit).
3161 | Now we get the exponents into d2 and d3.
3162 andl d6,d2 | and isolate exponent in d2
3163 beq Lmulsf$a$den | if exponent is zero we have a denormalized
3164 andl d5,d0 | and isolate fraction
3165 orl d4,d0 | and put hidden bit back
3166 swap d2 | I like exponents in the first byte
3167 #ifndef __mcoldfire__
3168 lsrw IMM (7),d2 |
3169 #else
3170 lsrl IMM (7),d2 |
3171 #endif
3172 Lmulsf$1: | number
3173 andl d6,d3 |
3174 beq Lmulsf$b$den |
3175 andl d5,d1 |
3176 orl d4,d1 |
3177 swap d3 |
3178 #ifndef __mcoldfire__
3179 lsrw IMM (7),d3 |
3180 #else
3181 lsrl IMM (7),d3 |
3182 #endif
3183 Lmulsf$2: |
3184 #ifndef __mcoldfire__
3185 addw d3,d2 | add exponents
3186 subw IMM (F_BIAS+1),d2 | and subtract bias (plus one)
3187 #else
3188 addl d3,d2 | add exponents
3189 subl IMM (F_BIAS+1),d2 | and subtract bias (plus one)
3190 #endif
3192 | We are now ready to do the multiplication. The situation is as follows:
3193 | both a and b have bit FLT_MANT_DIG-1 set (even if they were
3194 | denormalized to start with!), which means that in the product
3195 | bit 2*(FLT_MANT_DIG-1) (that is, bit 2*FLT_MANT_DIG-2-32 of the
3196 | high long) is set.
3198 | To do the multiplication let us move the number a little bit around ...
3199 movel d1,d6 | second operand in d6
3200 movel d0,d5 | first operand in d4-d5
3201 movel IMM (0),d4
3202 movel d4,d1 | the sums will go in d0-d1
3203 movel d4,d0
3205 | now bit FLT_MANT_DIG-1 becomes bit 31:
3206 lsll IMM (31-FLT_MANT_DIG+1),d6
3208 | Start the loop (we loop #FLT_MANT_DIG times):
3209 moveq IMM (FLT_MANT_DIG-1),d3
3210 1: addl d1,d1 | shift sum
3211 addxl d0,d0
3212 lsll IMM (1),d6 | get bit bn
3213 bcc 2f | if not set skip sum
3214 addl d5,d1 | add a
3215 addxl d4,d0
3217 #ifndef __mcoldfire__
3218 dbf d3,1b | loop back
3219 #else
3220 subql IMM (1),d3
3221 bpl 1b
3222 #endif
3224 | Now we have the product in d0-d1, with bit (FLT_MANT_DIG - 1) + FLT_MANT_DIG
3225 | (mod 32) of d0 set. The first thing to do now is to normalize it so bit
3226 | FLT_MANT_DIG is set (to do the rounding).
3227 #ifndef __mcoldfire__
3228 rorl IMM (6),d1
3229 swap d1
3230 movew d1,d3
3231 andw IMM (0x03ff),d3
3232 andw IMM (0xfd00),d1
3233 #else
3234 movel d1,d3
3235 lsll IMM (8),d1
3236 addl d1,d1
3237 addl d1,d1
3238 moveq IMM (22),d5
3239 lsrl d5,d3
3240 orl d3,d1
3241 andl IMM (0xfffffd00),d1
3242 #endif
3243 lsll IMM (8),d0
3244 addl d0,d0
3245 addl d0,d0
3246 #ifndef __mcoldfire__
3247 orw d3,d0
3248 #else
3249 orl d3,d0
3250 #endif
3252 moveq IMM (MULTIPLY),d5
3254 btst IMM (FLT_MANT_DIG+1),d0
3255 beq Lround$exit
3256 #ifndef __mcoldfire__
3257 lsrl IMM (1),d0
3258 roxrl IMM (1),d1
3259 addw IMM (1),d2
3260 #else
3261 lsrl IMM (1),d1
3262 btst IMM (0),d0
3263 beq 10f
3264 bset IMM (31),d1
3265 10: lsrl IMM (1),d0
3266 addql IMM (1),d2
3267 #endif
3268 bra Lround$exit
3270 Lmulsf$inop:
3271 moveq IMM (MULTIPLY),d5
3272 bra Lf$inop
3274 Lmulsf$overflow:
3275 moveq IMM (MULTIPLY),d5
3276 bra Lf$overflow
3278 Lmulsf$inf:
3279 moveq IMM (MULTIPLY),d5
3280 | If either is NaN return NaN; else both are (maybe infinite) numbers, so
3281 | return INFINITY with the correct sign (which is in d7).
3282 cmpl d6,d1 | is b NaN?
3283 bhi Lf$inop | if so return NaN
3284 bra Lf$overflow | else return +/-INFINITY
3286 | If either number is zero return zero, unless the other is +/-INFINITY,
3287 | or NaN, in which case we return NaN.
3288 Lmulsf$b$0:
3289 | Here d1 (==b) is zero.
3290 movel a6@(8),d1 | get a again to check for non-finiteness
3291 bra 1f
3292 Lmulsf$a$0:
3293 movel a6@(12),d1 | get b again to check for non-finiteness
3294 1: bclr IMM (31),d1 | clear sign bit
3295 cmpl IMM (INFINITY),d1 | and check for a large exponent
3296 bge Lf$inop | if b is +/-INFINITY or NaN return NaN
3297 movel d7,d0 | else return signed zero
3298 PICLEA SYM (_fpCCR),a0 |
3299 movew IMM (0),a0@ |
3300 #ifndef __mcoldfire__
3301 moveml sp@+,d2-d7 |
3302 #else
3303 moveml sp@,d2-d7
3304 | XXX if frame pointer is ever removed, stack pointer must
3305 | be adjusted here.
3306 #endif
3307 unlk a6 |
3308 rts |
3310 | If a number is denormalized we put an exponent of 1 but do not put the
3311 | hidden bit back into the fraction; instead we shift left until bit 23
3312 | (the hidden bit) is set, adjusting the exponent accordingly. We do this
3313 | to ensure that the product of the fractions is close to 1.
3314 Lmulsf$a$den:
3315 movel IMM (1),d2
3316 andl d5,d0
3317 1: addl d0,d0 | shift a left (until bit 23 is set)
3318 #ifndef __mcoldfire__
3319 subw IMM (1),d2 | and adjust exponent
3320 #else
3321 subql IMM (1),d2 | and adjust exponent
3322 #endif
3323 btst IMM (FLT_MANT_DIG-1),d0
3324 bne Lmulsf$1 |
3325 bra 1b | else loop back
3327 Lmulsf$b$den:
3328 movel IMM (1),d3
3329 andl d5,d1
3330 1: addl d1,d1 | shift b left until bit 23 is set
3331 #ifndef __mcoldfire__
3332 subw IMM (1),d3 | and adjust exponent
3333 #else
3334 subql IMM (1),d3 | and adjust exponent
3335 #endif
3336 btst IMM (FLT_MANT_DIG-1),d1
3337 bne Lmulsf$2 |
3338 bra 1b | else loop back
3340 |=============================================================================
3341 | __divsf3
3342 |=============================================================================
3344 | float __divsf3(float, float);
3345 FUNC(__divsf3)
3346 SYM (__divsf3):
3347 #ifndef __mcoldfire__
3348 link a6,IMM (0)
3349 moveml d2-d7,sp@-
3350 #else
3351 link a6,IMM (-24)
3352 moveml d2-d7,sp@
3353 #endif
3354 movel a6@(8),d0 | get a into d0
3355 movel a6@(12),d1 | and b into d1
3356 movel d0,d7 | d7 will hold the sign of the result
3357 eorl d1,d7 |
3358 andl IMM (0x80000000),d7 |
3359 movel IMM (INFINITY),d6 | useful constant (+INFINITY)
3360 movel d6,d5 | another (mask for fraction)
3361 notl d5 |
3362 movel IMM (0x00800000),d4 | this is to put hidden bit back
3363 bclr IMM (31),d0 | get rid of a's sign bit '
3364 movel d0,d2 |
3365 beq Ldivsf$a$0 | branch if a is zero
3366 bclr IMM (31),d1 | get rid of b's sign bit '
3367 movel d1,d3 |
3368 beq Ldivsf$b$0 | branch if b is zero
3369 cmpl d6,d0 | is a big?
3370 bhi Ldivsf$inop | if a is NaN return NaN
3371 beq Ldivsf$inf | if a is INFINITY we have to check b
3372 cmpl d6,d1 | now compare b with INFINITY
3373 bhi Ldivsf$inop | if b is NaN return NaN
3374 beq Ldivsf$underflow
3375 | Here we have both numbers finite and nonzero (and with no sign bit).
3376 | Now we get the exponents into d2 and d3 and normalize the numbers to
3377 | ensure that the ratio of the fractions is close to 1. We do this by
3378 | making sure that bit #FLT_MANT_DIG-1 (hidden bit) is set.
3379 andl d6,d2 | and isolate exponent in d2
3380 beq Ldivsf$a$den | if exponent is zero we have a denormalized
3381 andl d5,d0 | and isolate fraction
3382 orl d4,d0 | and put hidden bit back
3383 swap d2 | I like exponents in the first byte
3384 #ifndef __mcoldfire__
3385 lsrw IMM (7),d2 |
3386 #else
3387 lsrl IMM (7),d2 |
3388 #endif
3389 Ldivsf$1: |
3390 andl d6,d3 |
3391 beq Ldivsf$b$den |
3392 andl d5,d1 |
3393 orl d4,d1 |
3394 swap d3 |
3395 #ifndef __mcoldfire__
3396 lsrw IMM (7),d3 |
3397 #else
3398 lsrl IMM (7),d3 |
3399 #endif
3400 Ldivsf$2: |
3401 #ifndef __mcoldfire__
3402 subw d3,d2 | subtract exponents
3403 addw IMM (F_BIAS),d2 | and add bias
3404 #else
3405 subl d3,d2 | subtract exponents
3406 addl IMM (F_BIAS),d2 | and add bias
3407 #endif
3409 | We are now ready to do the division. We have prepared things in such a way
3410 | that the ratio of the fractions will be less than 2 but greater than 1/2.
3411 | At this point the registers in use are:
3412 | d0 holds a (first operand, bit FLT_MANT_DIG=0, bit FLT_MANT_DIG-1=1)
3413 | d1 holds b (second operand, bit FLT_MANT_DIG=1)
3414 | d2 holds the difference of the exponents, corrected by the bias
3415 | d7 holds the sign of the ratio
3416 | d4, d5, d6 hold some constants
3417 movel d7,a0 | d6-d7 will hold the ratio of the fractions
3418 movel IMM (0),d6 |
3419 movel d6,d7
3421 moveq IMM (FLT_MANT_DIG+1),d3
3422 1: cmpl d0,d1 | is a < b?
3423 bhi 2f |
3424 bset d3,d6 | set a bit in d6
3425 subl d1,d0 | if a >= b a <-- a-b
3426 beq 3f | if a is zero, exit
3427 2: addl d0,d0 | multiply a by 2
3428 #ifndef __mcoldfire__
3429 dbra d3,1b
3430 #else
3431 subql IMM (1),d3
3432 bpl 1b
3433 #endif
3435 | Now we keep going to set the sticky bit ...
3436 moveq IMM (FLT_MANT_DIG),d3
3437 1: cmpl d0,d1
3438 ble 2f
3439 addl d0,d0
3440 #ifndef __mcoldfire__
3441 dbra d3,1b
3442 #else
3443 subql IMM(1),d3
3444 bpl 1b
3445 #endif
3446 movel IMM (0),d1
3447 bra 3f
3448 2: movel IMM (0),d1
3449 #ifndef __mcoldfire__
3450 subw IMM (FLT_MANT_DIG),d3
3451 addw IMM (31),d3
3452 #else
3453 subl IMM (FLT_MANT_DIG),d3
3454 addl IMM (31),d3
3455 #endif
3456 bset d3,d1
3458 movel d6,d0 | put the ratio in d0-d1
3459 movel a0,d7 | get sign back
3461 | Because of the normalization we did before we are guaranteed that
3462 | d0 is smaller than 2^26 but larger than 2^24. Thus bit 26 is not set,
3463 | bit 25 could be set, and if it is not set then bit 24 is necessarily set.
3464 btst IMM (FLT_MANT_DIG+1),d0
3465 beq 1f | if it is not set, then bit 24 is set
3466 lsrl IMM (1),d0 |
3467 #ifndef __mcoldfire__
3468 addw IMM (1),d2 |
3469 #else
3470 addl IMM (1),d2 |
3471 #endif
3473 | Now round, check for over- and underflow, and exit.
3474 moveq IMM (DIVIDE),d5
3475 bra Lround$exit
3477 Ldivsf$inop:
3478 moveq IMM (DIVIDE),d5
3479 bra Lf$inop
3481 Ldivsf$overflow:
3482 moveq IMM (DIVIDE),d5
3483 bra Lf$overflow
3485 Ldivsf$underflow:
3486 moveq IMM (DIVIDE),d5
3487 bra Lf$underflow
3489 Ldivsf$a$0:
3490 moveq IMM (DIVIDE),d5
3491 | If a is zero check to see whether b is zero also. In that case return
3492 | NaN; then check if b is NaN, and return NaN also in that case. Else
3493 | return a properly signed zero.
3494 andl IMM (0x7fffffff),d1 | clear sign bit and test b
3495 beq Lf$inop | if b is also zero return NaN
3496 cmpl IMM (INFINITY),d1 | check for NaN
3497 bhi Lf$inop |
3498 movel d7,d0 | else return signed zero
3499 PICLEA SYM (_fpCCR),a0 |
3500 movew IMM (0),a0@ |
3501 #ifndef __mcoldfire__
3502 moveml sp@+,d2-d7 |
3503 #else
3504 moveml sp@,d2-d7 |
3505 | XXX if frame pointer is ever removed, stack pointer must
3506 | be adjusted here.
3507 #endif
3508 unlk a6 |
3509 rts |
3511 Ldivsf$b$0:
3512 moveq IMM (DIVIDE),d5
3513 | If we got here a is not zero. Check if a is NaN; in that case return NaN,
3514 | else return +/-INFINITY. Remember that a is in d0 with the sign bit
3515 | cleared already.
3516 cmpl IMM (INFINITY),d0 | compare d0 with INFINITY
3517 bhi Lf$inop | if larger it is NaN
3518 bra Lf$div$0 | else signal DIVIDE_BY_ZERO
3520 Ldivsf$inf:
3521 moveq IMM (DIVIDE),d5
3522 | If a is INFINITY we have to check b
3523 cmpl IMM (INFINITY),d1 | compare b with INFINITY
3524 bge Lf$inop | if b is NaN or INFINITY return NaN
3525 bra Lf$overflow | else return overflow
3527 | If a number is denormalized we put an exponent of 1 but do not put the
3528 | bit back into the fraction.
3529 Ldivsf$a$den:
3530 movel IMM (1),d2
3531 andl d5,d0
3532 1: addl d0,d0 | shift a left until bit FLT_MANT_DIG-1 is set
3533 #ifndef __mcoldfire__
3534 subw IMM (1),d2 | and adjust exponent
3535 #else
3536 subl IMM (1),d2 | and adjust exponent
3537 #endif
3538 btst IMM (FLT_MANT_DIG-1),d0
3539 bne Ldivsf$1
3540 bra 1b
3542 Ldivsf$b$den:
3543 movel IMM (1),d3
3544 andl d5,d1
3545 1: addl d1,d1 | shift b left until bit FLT_MANT_DIG is set
3546 #ifndef __mcoldfire__
3547 subw IMM (1),d3 | and adjust exponent
3548 #else
3549 subl IMM (1),d3 | and adjust exponent
3550 #endif
3551 btst IMM (FLT_MANT_DIG-1),d1
3552 bne Ldivsf$2
3553 bra 1b
3555 Lround$exit:
3556 | This is a common exit point for __mulsf3 and __divsf3.
3558 | First check for underlow in the exponent:
3559 #ifndef __mcoldfire__
3560 cmpw IMM (-FLT_MANT_DIG-1),d2
3561 #else
3562 cmpl IMM (-FLT_MANT_DIG-1),d2
3563 #endif
3564 blt Lf$underflow
3565 | It could happen that the exponent is less than 1, in which case the
3566 | number is denormalized. In this case we shift right and adjust the
3567 | exponent until it becomes 1 or the fraction is zero (in the latter case
3568 | we signal underflow and return zero).
3569 movel IMM (0),d6 | d6 is used temporarily
3570 #ifndef __mcoldfire__
3571 cmpw IMM (1),d2 | if the exponent is less than 1 we
3572 #else
3573 cmpl IMM (1),d2 | if the exponent is less than 1 we
3574 #endif
3575 bge 2f | have to shift right (denormalize)
3577 #ifndef __mcoldfire__
3578 addw IMM (1),d2 | adjust the exponent
3579 lsrl IMM (1),d0 | shift right once
3580 roxrl IMM (1),d1 |
3581 roxrl IMM (1),d6 | d6 collect bits we would lose otherwise
3582 cmpw IMM (1),d2 | is the exponent 1 already?
3583 #else
3584 addql IMM (1),d2 | adjust the exponent
3585 lsrl IMM (1),d6
3586 btst IMM (0),d1
3587 beq 11f
3588 bset IMM (31),d6
3589 11: lsrl IMM (1),d1
3590 btst IMM (0),d0
3591 beq 10f
3592 bset IMM (31),d1
3593 10: lsrl IMM (1),d0
3594 cmpl IMM (1),d2 | is the exponent 1 already?
3595 #endif
3596 beq 2f | if not loop back
3597 bra 1b |
3598 bra Lf$underflow | safety check, shouldn't execute '
3599 2: orl d6,d1 | this is a trick so we don't lose '
3600 | the extra bits which were flushed right
3601 | Now call the rounding routine (which takes care of denormalized numbers):
3602 lea pc@(Lround$0),a0 | to return from rounding routine
3603 PICLEA SYM (_fpCCR),a1 | check the rounding mode
3604 #ifdef __mcoldfire__
3605 clrl d6
3606 #endif
3607 movew a1@(6),d6 | rounding mode in d6
3608 beq Lround$to$nearest
3609 #ifndef __mcoldfire__
3610 cmpw IMM (ROUND_TO_PLUS),d6
3611 #else
3612 cmpl IMM (ROUND_TO_PLUS),d6
3613 #endif
3614 bhi Lround$to$minus
3615 blt Lround$to$zero
3616 bra Lround$to$plus
3617 Lround$0:
3618 | Here we have a correctly rounded result (either normalized or denormalized).
3620 | Here we should have either a normalized number or a denormalized one, and
3621 | the exponent is necessarily larger or equal to 1 (so we don't have to '
3622 | check again for underflow!). We have to check for overflow or for a
3623 | denormalized number (which also signals underflow).
3624 | Check for overflow (i.e., exponent >= 255).
3625 #ifndef __mcoldfire__
3626 cmpw IMM (0x00ff),d2
3627 #else
3628 cmpl IMM (0x00ff),d2
3629 #endif
3630 bge Lf$overflow
3631 | Now check for a denormalized number (exponent==0).
3632 movew d2,d2
3633 beq Lf$den
3635 | Put back the exponents and sign and return.
3636 #ifndef __mcoldfire__
3637 lslw IMM (7),d2 | exponent back to fourth byte
3638 #else
3639 lsll IMM (7),d2 | exponent back to fourth byte
3640 #endif
3641 bclr IMM (FLT_MANT_DIG-1),d0
3642 swap d0 | and put back exponent
3643 #ifndef __mcoldfire__
3644 orw d2,d0 |
3645 #else
3646 orl d2,d0
3647 #endif
3648 swap d0 |
3649 orl d7,d0 | and sign also
3651 PICLEA SYM (_fpCCR),a0
3652 movew IMM (0),a0@
3653 #ifndef __mcoldfire__
3654 moveml sp@+,d2-d7
3655 #else
3656 moveml sp@,d2-d7
3657 | XXX if frame pointer is ever removed, stack pointer must
3658 | be adjusted here.
3659 #endif
3660 unlk a6
3663 |=============================================================================
3664 | __negsf2
3665 |=============================================================================
3667 | This is trivial and could be shorter if we didn't bother checking for NaN '
3668 | and +/-INFINITY.
3670 | float __negsf2(float);
3671 FUNC(__negsf2)
3672 SYM (__negsf2):
3673 #ifndef __mcoldfire__
3674 link a6,IMM (0)
3675 moveml d2-d7,sp@-
3676 #else
3677 link a6,IMM (-24)
3678 moveml d2-d7,sp@
3679 #endif
3680 moveq IMM (NEGATE),d5
3681 movel a6@(8),d0 | get number to negate in d0
3682 bchg IMM (31),d0 | negate
3683 movel d0,d1 | make a positive copy
3684 bclr IMM (31),d1 |
3685 tstl d1 | check for zero
3686 beq 2f | if zero (either sign) return +zero
3687 cmpl IMM (INFINITY),d1 | compare to +INFINITY
3688 blt 1f |
3689 bhi Lf$inop | if larger (fraction not zero) is NaN
3690 movel d0,d7 | else get sign and return INFINITY
3691 andl IMM (0x80000000),d7
3692 bra Lf$infty
3693 1: PICLEA SYM (_fpCCR),a0
3694 movew IMM (0),a0@
3695 #ifndef __mcoldfire__
3696 moveml sp@+,d2-d7
3697 #else
3698 moveml sp@,d2-d7
3699 | XXX if frame pointer is ever removed, stack pointer must
3700 | be adjusted here.
3701 #endif
3702 unlk a6
3704 2: bclr IMM (31),d0
3705 bra 1b
3707 |=============================================================================
3708 | __cmpsf2
3709 |=============================================================================
3711 GREATER = 1
3712 LESS = -1
3713 EQUAL = 0
3715 | int __cmpsf2_internal(float, float, int);
3716 SYM (__cmpsf2_internal):
3717 #ifndef __mcoldfire__
3718 link a6,IMM (0)
3719 moveml d2-d7,sp@- | save registers
3720 #else
3721 link a6,IMM (-24)
3722 moveml d2-d7,sp@
3723 #endif
3724 moveq IMM (COMPARE),d5
3725 movel a6@(8),d0 | get first operand
3726 movel a6@(12),d1 | get second operand
3727 | Check if either is NaN, and in that case return garbage and signal
3728 | INVALID_OPERATION. Check also if either is zero, and clear the signs
3729 | if necessary.
3730 movel d0,d6
3731 andl IMM (0x7fffffff),d0
3732 beq Lcmpsf$a$0
3733 cmpl IMM (0x7f800000),d0
3734 bhi Lcmpf$inop
3735 Lcmpsf$1:
3736 movel d1,d7
3737 andl IMM (0x7fffffff),d1
3738 beq Lcmpsf$b$0
3739 cmpl IMM (0x7f800000),d1
3740 bhi Lcmpf$inop
3741 Lcmpsf$2:
3742 | Check the signs
3743 eorl d6,d7
3744 bpl 1f
3745 | If the signs are not equal check if a >= 0
3746 tstl d6
3747 bpl Lcmpsf$a$gt$b | if (a >= 0 && b < 0) => a > b
3748 bmi Lcmpsf$b$gt$a | if (a < 0 && b >= 0) => a < b
3750 | If the signs are equal check for < 0
3751 tstl d6
3752 bpl 1f
3753 | If both are negative exchange them
3754 #ifndef __mcoldfire__
3755 exg d0,d1
3756 #else
3757 movel d0,d7
3758 movel d1,d0
3759 movel d7,d1
3760 #endif
3762 | Now that they are positive we just compare them as longs (does this also
3763 | work for denormalized numbers?).
3764 cmpl d0,d1
3765 bhi Lcmpsf$b$gt$a | |b| > |a|
3766 bne Lcmpsf$a$gt$b | |b| < |a|
3767 | If we got here a == b.
3768 movel IMM (EQUAL),d0
3769 #ifndef __mcoldfire__
3770 moveml sp@+,d2-d7 | put back the registers
3771 #else
3772 moveml sp@,d2-d7
3773 #endif
3774 unlk a6
3776 Lcmpsf$a$gt$b:
3777 movel IMM (GREATER),d0
3778 #ifndef __mcoldfire__
3779 moveml sp@+,d2-d7 | put back the registers
3780 #else
3781 moveml sp@,d2-d7
3782 | XXX if frame pointer is ever removed, stack pointer must
3783 | be adjusted here.
3784 #endif
3785 unlk a6
3787 Lcmpsf$b$gt$a:
3788 movel IMM (LESS),d0
3789 #ifndef __mcoldfire__
3790 moveml sp@+,d2-d7 | put back the registers
3791 #else
3792 moveml sp@,d2-d7
3793 | XXX if frame pointer is ever removed, stack pointer must
3794 | be adjusted here.
3795 #endif
3796 unlk a6
3799 Lcmpsf$a$0:
3800 bclr IMM (31),d6
3801 bra Lcmpsf$1
3802 Lcmpsf$b$0:
3803 bclr IMM (31),d7
3804 bra Lcmpsf$2
3806 Lcmpf$inop:
3807 movl a6@(16),d0
3808 moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
3809 moveq IMM (SINGLE_FLOAT),d6
3810 PICJUMP $_exception_handler
3812 | int __cmpsf2(float, float);
3813 FUNC(__cmpsf2)
3814 SYM (__cmpsf2):
3815 link a6,IMM (0)
3816 pea 1
3817 movl a6@(12),sp@-
3818 movl a6@(8),sp@-
3819 bsr (__cmpsf2_internal)
3820 unlk a6
3823 |=============================================================================
3824 | rounding routines
3825 |=============================================================================
3827 | The rounding routines expect the number to be normalized in registers
3828 | d0-d1, with the exponent in register d2. They assume that the
3829 | exponent is larger or equal to 1. They return a properly normalized number
3830 | if possible, and a denormalized number otherwise. The exponent is returned
3831 | in d2.
3833 Lround$to$nearest:
3834 | We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
3835 | Here we assume that the exponent is not too small (this should be checked
3836 | before entering the rounding routine), but the number could be denormalized.
3838 | Check for denormalized numbers:
3839 1: btst IMM (FLT_MANT_DIG),d0
3840 bne 2f | if set the number is normalized
3841 | Normalize shifting left until bit #FLT_MANT_DIG is set or the exponent
3842 | is one (remember that a denormalized number corresponds to an
3843 | exponent of -F_BIAS+1).
3844 #ifndef __mcoldfire__
3845 cmpw IMM (1),d2 | remember that the exponent is at least one
3846 #else
3847 cmpl IMM (1),d2 | remember that the exponent is at least one
3848 #endif
3849 beq 2f | an exponent of one means denormalized
3850 addl d1,d1 | else shift and adjust the exponent
3851 addxl d0,d0 |
3852 #ifndef __mcoldfire__
3853 dbra d2,1b |
3854 #else
3855 subql IMM (1),d2
3856 bpl 1b
3857 #endif
3859 | Now round: we do it as follows: after the shifting we can write the
3860 | fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
3861 | If delta < 1, do nothing. If delta > 1, add 1 to f.
3862 | If delta == 1, we make sure the rounded number will be even (odd?)
3863 | (after shifting).
3864 btst IMM (0),d0 | is delta < 1?
3865 beq 2f | if so, do not do anything
3866 tstl d1 | is delta == 1?
3867 bne 1f | if so round to even
3868 movel d0,d1 |
3869 andl IMM (2),d1 | bit 1 is the last significant bit
3870 addl d1,d0 |
3871 bra 2f |
3872 1: movel IMM (1),d1 | else add 1
3873 addl d1,d0 |
3874 | Shift right once (because we used bit #FLT_MANT_DIG!).
3875 2: lsrl IMM (1),d0
3876 | Now check again bit #FLT_MANT_DIG (rounding could have produced a
3877 | 'fraction overflow' ...).
3878 btst IMM (FLT_MANT_DIG),d0
3879 beq 1f
3880 lsrl IMM (1),d0
3881 #ifndef __mcoldfire__
3882 addw IMM (1),d2
3883 #else
3884 addql IMM (1),d2
3885 #endif
3887 | If bit #FLT_MANT_DIG-1 is clear we have a denormalized number, so we
3888 | have to put the exponent to zero and return a denormalized number.
3889 btst IMM (FLT_MANT_DIG-1),d0
3890 beq 1f
3891 jmp a0@
3892 1: movel IMM (0),d2
3893 jmp a0@
3895 Lround$to$zero:
3896 Lround$to$plus:
3897 Lround$to$minus:
3898 jmp a0@
3899 #endif /* L_float */
3901 | gcc expects the routines __eqdf2, __nedf2, __gtdf2, __gedf2,
3902 | __ledf2, __ltdf2 to all return the same value as a direct call to
3903 | __cmpdf2 would. In this implementation, each of these routines
3904 | simply calls __cmpdf2. It would be more efficient to give the
3905 | __cmpdf2 routine several names, but separating them out will make it
3906 | easier to write efficient versions of these routines someday.
3907 | If the operands recompare unordered unordered __gtdf2 and __gedf2 return -1.
3908 | The other routines return 1.
3910 #ifdef L_eqdf2
3911 .text
3912 FUNC(__eqdf2)
3913 .globl SYM (__eqdf2)
3914 SYM (__eqdf2):
3915 link a6,IMM (0)
3916 pea 1
3917 movl a6@(20),sp@-
3918 movl a6@(16),sp@-
3919 movl a6@(12),sp@-
3920 movl a6@(8),sp@-
3921 PICCALL SYM (__cmpdf2_internal)
3922 unlk a6
3924 #endif /* L_eqdf2 */
3926 #ifdef L_nedf2
3927 .text
3928 FUNC(__nedf2)
3929 .globl SYM (__nedf2)
3930 SYM (__nedf2):
3931 link a6,IMM (0)
3932 pea 1
3933 movl a6@(20),sp@-
3934 movl a6@(16),sp@-
3935 movl a6@(12),sp@-
3936 movl a6@(8),sp@-
3937 PICCALL SYM (__cmpdf2_internal)
3938 unlk a6
3940 #endif /* L_nedf2 */
3942 #ifdef L_gtdf2
3943 .text
3944 FUNC(__gtdf2)
3945 .globl SYM (__gtdf2)
3946 SYM (__gtdf2):
3947 link a6,IMM (0)
3948 pea -1
3949 movl a6@(20),sp@-
3950 movl a6@(16),sp@-
3951 movl a6@(12),sp@-
3952 movl a6@(8),sp@-
3953 PICCALL SYM (__cmpdf2_internal)
3954 unlk a6
3956 #endif /* L_gtdf2 */
3958 #ifdef L_gedf2
3959 .text
3960 FUNC(__gedf2)
3961 .globl SYM (__gedf2)
3962 SYM (__gedf2):
3963 link a6,IMM (0)
3964 pea -1
3965 movl a6@(20),sp@-
3966 movl a6@(16),sp@-
3967 movl a6@(12),sp@-
3968 movl a6@(8),sp@-
3969 PICCALL SYM (__cmpdf2_internal)
3970 unlk a6
3972 #endif /* L_gedf2 */
3974 #ifdef L_ltdf2
3975 .text
3976 FUNC(__ltdf2)
3977 .globl SYM (__ltdf2)
3978 SYM (__ltdf2):
3979 link a6,IMM (0)
3980 pea 1
3981 movl a6@(20),sp@-
3982 movl a6@(16),sp@-
3983 movl a6@(12),sp@-
3984 movl a6@(8),sp@-
3985 PICCALL SYM (__cmpdf2_internal)
3986 unlk a6
3988 #endif /* L_ltdf2 */
3990 #ifdef L_ledf2
3991 .text
3992 FUNC(__ledf2)
3993 .globl SYM (__ledf2)
3994 SYM (__ledf2):
3995 link a6,IMM (0)
3996 pea 1
3997 movl a6@(20),sp@-
3998 movl a6@(16),sp@-
3999 movl a6@(12),sp@-
4000 movl a6@(8),sp@-
4001 PICCALL SYM (__cmpdf2_internal)
4002 unlk a6
4004 #endif /* L_ledf2 */
4006 | The comments above about __eqdf2, et. al., also apply to __eqsf2,
4007 | et. al., except that the latter call __cmpsf2 rather than __cmpdf2.
4009 #ifdef L_eqsf2
4010 .text
4011 FUNC(__eqsf2)
4012 .globl SYM (__eqsf2)
4013 SYM (__eqsf2):
4014 link a6,IMM (0)
4015 pea 1
4016 movl a6@(12),sp@-
4017 movl a6@(8),sp@-
4018 PICCALL SYM (__cmpsf2_internal)
4019 unlk a6
4021 #endif /* L_eqsf2 */
4023 #ifdef L_nesf2
4024 .text
4025 FUNC(__nesf2)
4026 .globl SYM (__nesf2)
4027 SYM (__nesf2):
4028 link a6,IMM (0)
4029 pea 1
4030 movl a6@(12),sp@-
4031 movl a6@(8),sp@-
4032 PICCALL SYM (__cmpsf2_internal)
4033 unlk a6
4035 #endif /* L_nesf2 */
4037 #ifdef L_gtsf2
4038 .text
4039 FUNC(__gtsf2)
4040 .globl SYM (__gtsf2)
4041 SYM (__gtsf2):
4042 link a6,IMM (0)
4043 pea -1
4044 movl a6@(12),sp@-
4045 movl a6@(8),sp@-
4046 PICCALL SYM (__cmpsf2_internal)
4047 unlk a6
4049 #endif /* L_gtsf2 */
4051 #ifdef L_gesf2
4052 .text
4053 FUNC(__gesf2)
4054 .globl SYM (__gesf2)
4055 SYM (__gesf2):
4056 link a6,IMM (0)
4057 pea -1
4058 movl a6@(12),sp@-
4059 movl a6@(8),sp@-
4060 PICCALL SYM (__cmpsf2_internal)
4061 unlk a6
4063 #endif /* L_gesf2 */
4065 #ifdef L_ltsf2
4066 .text
4067 FUNC(__ltsf2)
4068 .globl SYM (__ltsf2)
4069 SYM (__ltsf2):
4070 link a6,IMM (0)
4071 pea 1
4072 movl a6@(12),sp@-
4073 movl a6@(8),sp@-
4074 PICCALL SYM (__cmpsf2_internal)
4075 unlk a6
4077 #endif /* L_ltsf2 */
4079 #ifdef L_lesf2
4080 .text
4081 FUNC(__lesf2)
4082 .globl SYM (__lesf2)
4083 SYM (__lesf2):
4084 link a6,IMM (0)
4085 pea 1
4086 movl a6@(12),sp@-
4087 movl a6@(8),sp@-
4088 PICCALL SYM (__cmpsf2_internal)
4089 unlk a6
4091 #endif /* L_lesf2 */