revert between 56095 -> 55830 in arch
[AROS.git] / arch / m68k-all / libgcc1 / _addsubsf3.s
blobc8feddb6774a83bd3c5fd27a1150fbed4ad840ff
1 | single floating point add/subtract routine
3 | written by Kai-Uwe Bloem (I5110401@dbstu1.bitnet).
4 | Based on a 80x86 floating point packet from comp.os.minix, written by P.Housel
7 | Revision 1.3, kub 01-90 :
8 | added support for denormalized numbers
10 | Revision 1.2, kub 01-90 :
11 | replace far shifts by swaps to gain speed (more optimization is of course
12 | possible by doing shifts all in one intruction, but what about the rounding
13 | bits)
15 | Revision 1.1, kub 12-89 :
16 | Created single float version for 68000
18 | Revision 1.0:
19 | original 8088 code from P.S.Housel for double floats
21 .text
22 .even
23 .globl __subsf3
24 .globl __addsf3
26 __subsf3:
27 eorb #0x80,%sp@(8) | reverse sign of v
28 __addsf3:
29 lea %sp@(4),%a0 | pointer to u and v parameter
30 moveml %d2-%d5,%sp@- | save registers
31 moveml %a0@,%d4/%d5 | %d4 = v, %d5 = u
33 movel %d5,%d0 | %d0 = u.exp
34 swap %d0
35 movel %d5,%d2 | %d2.h = u.sign
36 movew %d0,%d2
37 lsrw #7,%d0
38 andw #0xff,%d0 | kill sign bit (exponent is 8 bits)
40 movel %d4,%d1 | %d1 = v.exp
41 swap %d1
42 eorw %d1,%d2 | %d2.l = u.sign ^ v.sign
43 lsrw #7,%d1
44 andw #0xff,%d1 | kill sign bit (exponent is 8 bits)
46 andl #0x7fffff,%d5 | remove exponent from mantissa
47 tstw %d0 | check for zero exponent - no leading "1"
48 beq L_00
49 orl #0x800000,%d5 | restore implied leading "1"
50 bra L_10
51 L_00: addw #1,%d0 | "normalize" exponent
52 L_10:
53 andl #0x7fffff,%d4 | remove exponent from mantissa
54 tstw %d1 | check for zero exponent - no leading "1"
55 beq L_01
56 orl #0x800000,%d4 | restore implied leading "1"
57 bra L_11
58 L_01: addw #1,%d1 | "normalize" exponent
59 L_11:
60 clrw %d3 | (put initial zero rounding bits in %d3)
61 negw %d1 | %d1 = u.exp - v.exp
62 addw %d0,%d1
63 beq L_5 | exponents are equal - no shifting neccessary
64 bgt L_12 | not equal but no exchange neccessary
65 exg %d4,%d5 | exchange u and v
66 subw %d1,%d0 | %d0 = u.exp - (u.exp - v.exp) = v.exp
67 negw %d1
68 tstw %d2 | %d2.h = u.sign ^ (u.sign ^ v.sign) = v.sign
69 bpl L_12
70 bchg #31,%d2
71 L_12:
72 cmpw #24,%d1 | is u so much bigger that v is not
73 bge L_7 | significant ?
75 movew #7-1,%d3 | shift u left up to 7 bits to minimize loss
76 L_2:
77 addl %d5,%d5
78 subw #1,%d0 | decrement exponent
79 subw #1,%d1 | done shifting altogether ?
80 dbeq %d3,L_2 | loop if still can shift u.mant more
81 clrw %d3
83 cmpw #16,%d1 | see if fast rotate possible
84 blt L_4
85 orb %d4,%d3 | set rounding bits
86 orb %d2,%d3
87 sne %d2 | "sticky byte"
88 movew %d4,%d3
89 lsrw #8,%d3
90 clrw %d4 | rotate by swapping register halfs
91 swap %d4
92 subw #16,%d1
93 L_02:
94 lsrl #1,%d4 | shift v.mant right the rest of the way
95 orb %d3,%d2 | set "sticky byte" if necessary
96 roxrw #1,%d3 | shift into rounding bits
97 L_4: dbra %d1,L_02 | loop
98 andb #1,%d2 | see if "sticky bit" should be set
99 orb %d2,%d3
100 L_5:
101 tstw %d2 | are the signs equal ?
102 bpl L_6 | yes, no negate necessary
104 negb %d3 | negate rounding bits and v.mant
105 negl %d4
106 L_6:
107 addl %d4,%d5 | u.mant = u.mant + v.mant
108 bcs L_7 | needn't negate
109 tstw %d2 | opposite signs ?
110 bpl L_7 | don't need to negate result
112 negb %d3 | negate rounding bits and u.mant
113 negl %d5
114 notl %d2 | switch sign
115 L_7:
116 movel %d5,%d4 | move result for normalization
117 moveb %d3,%d1 | put rounding bits in %d1 for norm_sf
118 swap %d2 | put sign into %d2 (exponent is in %d0)
119 jmp norm_sf | leave registers on stack for norm_sf