2 * Signed multiply, from Appendix E of the Sparc Version 8
7 * Returns %o0 * %o1 in %o1%o0 (i.e., %o1 holds the upper 32 bits of
10 * This code optimizes short (less than 13-bit) multiplies.
15 mov %o0, %y ! multiplier -> Y
16 andncc %o0, 0xfff, %g0 ! test bits 12..31
17 be Lmul_shortway ! if zero, can do it the short way
18 andcc %g0, %g0, %o4 ! zero the partial product and clear N and V
21 * Long multiply. 32 steps, followed by a final shift step.
23 mulscc %o4, %o1, %o4 ! 1
24 mulscc %o4, %o1, %o4 ! 2
25 mulscc %o4, %o1, %o4 ! 3
26 mulscc %o4, %o1, %o4 ! 4
27 mulscc %o4, %o1, %o4 ! 5
28 mulscc %o4, %o1, %o4 ! 6
29 mulscc %o4, %o1, %o4 ! 7
30 mulscc %o4, %o1, %o4 ! 8
31 mulscc %o4, %o1, %o4 ! 9
32 mulscc %o4, %o1, %o4 ! 10
33 mulscc %o4, %o1, %o4 ! 11
34 mulscc %o4, %o1, %o4 ! 12
35 mulscc %o4, %o1, %o4 ! 13
36 mulscc %o4, %o1, %o4 ! 14
37 mulscc %o4, %o1, %o4 ! 15
38 mulscc %o4, %o1, %o4 ! 16
39 mulscc %o4, %o1, %o4 ! 17
40 mulscc %o4, %o1, %o4 ! 18
41 mulscc %o4, %o1, %o4 ! 19
42 mulscc %o4, %o1, %o4 ! 20
43 mulscc %o4, %o1, %o4 ! 21
44 mulscc %o4, %o1, %o4 ! 22
45 mulscc %o4, %o1, %o4 ! 23
46 mulscc %o4, %o1, %o4 ! 24
47 mulscc %o4, %o1, %o4 ! 25
48 mulscc %o4, %o1, %o4 ! 26
49 mulscc %o4, %o1, %o4 ! 27
50 mulscc %o4, %o1, %o4 ! 28
51 mulscc %o4, %o1, %o4 ! 29
52 mulscc %o4, %o1, %o4 ! 30
53 mulscc %o4, %o1, %o4 ! 31
54 mulscc %o4, %o1, %o4 ! 32
55 mulscc %o4, %g0, %o4 ! final shift
57 ! If %o0 was negative, the result is
58 ! (%o0 * %o1) + (%o1 << 32))
66 ! %o0 was indeed negative; fix upper 32 bits of result by subtracting
67 ! %o1 (i.e., return %o4 - %o1 in %o1).
75 /* Faster code adapted from tege@sics.se's code for umul.S. */
76 sra %o0, 31, %o2 ! make mask from sign bit
77 and %o1, %o2, %o2 ! %o2 = 0 or %o1, depending on sign of %o0
78 rd %y, %o0 ! get lower half of product
80 sub %o4, %o2, %o1 ! subtract compensation
81 ! and put upper half in place
86 * Short multiply. 12 steps, followed by a final shift step.
87 * The resulting bits are off by 12 and (32-12) = 20 bit positions,
88 * but there is no problem with %o0 being negative (unlike above).
90 mulscc %o4, %o1, %o4 ! 1
91 mulscc %o4, %o1, %o4 ! 2
92 mulscc %o4, %o1, %o4 ! 3
93 mulscc %o4, %o1, %o4 ! 4
94 mulscc %o4, %o1, %o4 ! 5
95 mulscc %o4, %o1, %o4 ! 6
96 mulscc %o4, %o1, %o4 ! 7
97 mulscc %o4, %o1, %o4 ! 8
98 mulscc %o4, %o1, %o4 ! 9
99 mulscc %o4, %o1, %o4 ! 10
100 mulscc %o4, %o1, %o4 ! 11
101 mulscc %o4, %o1, %o4 ! 12
102 mulscc %o4, %g0, %o4 ! final shift
105 * %o4 has 20 of the bits that should be in the low part of the
106 * result; %y has the bottom 12 (as %y's top 12). That is:
109 * +----------------+----------------+
110 * | -12- | -20- | -12- | -20- |
111 * +------(---------+------)---------+
112 * --hi-- ----low-part----
114 * The upper 12 bits of %o4 should be sign-extended to form the
115 * high part of the product (i.e., highpart = %o4 >> 20).
119 sll %o4, 12, %o0 ! shift middle bits left 12
120 srl %o5, 20, %o5 ! shift low bits right 20, zero fill at left
121 or %o5, %o0, %o0 ! construct low part of result
123 sra %o4, 20, %o1 ! ... and extract high part of result