2 * Signed multiply, from Appendix E of the Sparc Version 8
7 * Returns %o0 * %o1 in %o1%o0 (i.e., %o1 holds the upper 32 bits of
10 * This code optimizes short (less than 13-bit) multiplies.
17 mov %o0, %y ! multiplier -> Y
18 andncc %o0, 0xfff, %g0 ! test bits 12..31
19 be LOC(mul_shortway) ! if zero, can do it the short way
20 andcc %g0, %g0, %o4 ! zero the partial product and clear N and V
23 * Long multiply. 32 steps, followed by a final shift step.
25 mulscc %o4, %o1, %o4 ! 1
26 mulscc %o4, %o1, %o4 ! 2
27 mulscc %o4, %o1, %o4 ! 3
28 mulscc %o4, %o1, %o4 ! 4
29 mulscc %o4, %o1, %o4 ! 5
30 mulscc %o4, %o1, %o4 ! 6
31 mulscc %o4, %o1, %o4 ! 7
32 mulscc %o4, %o1, %o4 ! 8
33 mulscc %o4, %o1, %o4 ! 9
34 mulscc %o4, %o1, %o4 ! 10
35 mulscc %o4, %o1, %o4 ! 11
36 mulscc %o4, %o1, %o4 ! 12
37 mulscc %o4, %o1, %o4 ! 13
38 mulscc %o4, %o1, %o4 ! 14
39 mulscc %o4, %o1, %o4 ! 15
40 mulscc %o4, %o1, %o4 ! 16
41 mulscc %o4, %o1, %o4 ! 17
42 mulscc %o4, %o1, %o4 ! 18
43 mulscc %o4, %o1, %o4 ! 19
44 mulscc %o4, %o1, %o4 ! 20
45 mulscc %o4, %o1, %o4 ! 21
46 mulscc %o4, %o1, %o4 ! 22
47 mulscc %o4, %o1, %o4 ! 23
48 mulscc %o4, %o1, %o4 ! 24
49 mulscc %o4, %o1, %o4 ! 25
50 mulscc %o4, %o1, %o4 ! 26
51 mulscc %o4, %o1, %o4 ! 27
52 mulscc %o4, %o1, %o4 ! 28
53 mulscc %o4, %o1, %o4 ! 29
54 mulscc %o4, %o1, %o4 ! 30
55 mulscc %o4, %o1, %o4 ! 31
56 mulscc %o4, %o1, %o4 ! 32
57 mulscc %o4, %g0, %o4 ! final shift
59 ! If %o0 was negative, the result is
60 ! (%o0 * %o1) + (%o1 << 32))
68 ! %o0 was indeed negative; fix upper 32 bits of result by subtracting
69 ! %o1 (i.e., return %o4 - %o1 in %o1).
77 /* Faster code adapted from tege@sics.se's code for umul.S. */
78 sra %o0, 31, %o2 ! make mask from sign bit
79 and %o1, %o2, %o2 ! %o2 = 0 or %o1, depending on sign of %o0
80 rd %y, %o0 ! get lower half of product
82 sub %o4, %o2, %o1 ! subtract compensation
83 ! and put upper half in place
88 * Short multiply. 12 steps, followed by a final shift step.
89 * The resulting bits are off by 12 and (32-12) = 20 bit positions,
90 * but there is no problem with %o0 being negative (unlike above).
92 mulscc %o4, %o1, %o4 ! 1
93 mulscc %o4, %o1, %o4 ! 2
94 mulscc %o4, %o1, %o4 ! 3
95 mulscc %o4, %o1, %o4 ! 4
96 mulscc %o4, %o1, %o4 ! 5
97 mulscc %o4, %o1, %o4 ! 6
98 mulscc %o4, %o1, %o4 ! 7
99 mulscc %o4, %o1, %o4 ! 8
100 mulscc %o4, %o1, %o4 ! 9
101 mulscc %o4, %o1, %o4 ! 10
102 mulscc %o4, %o1, %o4 ! 11
103 mulscc %o4, %o1, %o4 ! 12
104 mulscc %o4, %g0, %o4 ! final shift
107 * %o4 has 20 of the bits that should be in the low part of the
108 * result; %y has the bottom 12 (as %y's top 12). That is:
111 * +----------------+----------------+
112 * | -12- | -20- | -12- | -20- |
113 * +------(---------+------)---------+
114 * --hi-- ----low-part----
116 * The upper 12 bits of %o4 should be sign-extended to form the
117 * high part of the product (i.e., highpart = %o4 >> 20).
121 sll %o4, 12, %o0 ! shift middle bits left 12
122 srl %o5, 20, %o5 ! shift low bits right 20, zero fill at left
123 or %o5, %o0, %o0 ! construct low part of result
125 sra %o4, 20, %o1 ! ... and extract high part of result