BCPL_GlobVec size fix.
[AROS.git] / arch / m68k-all / utility / smult64.s
blob500c3800b3cc8cdabaebb9f6ad27c53f45ad284d
1 /*
2 Copyright © 1995-2001, The AROS Development Team. All rights reserved.
3 $Id$
5 Desc: Utility 64 bit multiplication routines. m68k version.
6 Lang: english
7 */
9 /* SMult64()/UMult64():
10 These are the signed/unsigned 64 bit multiplication routines.
11 There are two possibilities here because as of the 060 the
12 32*32->64 bit result instructions are not supported, and I haven't
13 quite figured out how to do this using the 32 bit ops yet (can't be
14 that hard though).
16 Still, emulating is faster than a unsup integer instruction except.
20 #include "aros/m68k/asm.h"
22 .text
23 .balign 4
25 .globl AROS_SLIB_ENTRY(SMult64,Utility,33)
26 .globl AROS_SLIB_ENTRY(SMult64_020,Utility,33)
28 /* Required by SMult64() */
29 .globl AROS_SLIB_ENTRY(UMult64,Utility,34)
31 .type AROS_SLIB_ENTRY(SMult64,Utility,33),@function
32 .type AROS_SLIB_ENTRY(SMult64_020,Utility,33),@function
35 AROS_SLIB_ENTRY(SMult64_020,Utility,33):
36 muls.l %d0,%d0:%d1
37 rts
39 /* How do I do this, again consider:
40 (a^16 + b) * (c^16 + d)
41 = ac^32 + (ad + bc)^16 + bd
43 I tried to think of a way of doing this with the mulu.l instr,
44 but I couldn't so I'll just use the mulu.w. Its quicker than
45 an unsupp integer instruction anyway :)
48 /* Have to change the sign... */
49 .balign 4
50 AROS_SLIB_ENTRY(SMult64,Utility,33):
51 move.l %d2,-(%sp)
52 moveq #0,%d2
53 tst.l %d0
54 bpl.s .Lispos1
55 neg.l %d0
56 addq.l #1,%d2
57 .Lispos1:
58 tst.l %d1
59 bpl.s .Lispos2
60 neg.l %d1
61 subq.l #1,%d2
63 /* Ok, so if d2 != 0, then the sign was changed */
64 .Lispos2:
65 jsr AROS_SLIB_ENTRY(UMult64,Utility,34)
66 tst.l %d0
67 beq.s .Lispos
68 moveq #0,%d2
70 /* Ok we have to change the sign, 2's comp = 1's comp + 1 */
71 not.l %d0
72 not.l %d1
73 /* Add 1 to low order 32 bits */
74 addq.l #1,%d1
75 /* Add 0 and the carry to the high 32 bits */
76 addx.l %d2,%d0
77 .Lispos:
78 move.l (%sp)+,%d2
79 rts