1 /* __mpn_divmod_1(quot_ptr, dividend_ptr, dividend_size, divisor_limb) --
2 Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
3 Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR.
4 Return the single-limb remainder.
5 There are no constraints on the value of the divisor.
7 QUOT_PTR and DIVIDEND_PTR might point to the same limb.
9 Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
11 This file is part of the GNU MP Library.
13 The GNU MP Library is free software; you can redistribute it and/or modify
14 it under the terms of the GNU Library General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or (at your
16 option) any later version.
18 The GNU MP Library is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
20 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
21 License for more details.
23 You should have received a copy of the GNU Library General Public License
24 along with the GNU MP Library; see the file COPYING.LIB. If not, write to
25 the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
36 #define UDIV_TIME UMUL_TIME
39 /* FIXME: We should be using invert_limb (or invert_normalized_limb)
40 here (not udiv_qrnnd). */
44 __mpn_divmod_1 (mp_ptr quot_ptr
,
45 mp_srcptr dividend_ptr
, mp_size_t dividend_size
,
48 __mpn_divmod_1 (quot_ptr
, dividend_ptr
, dividend_size
, divisor_limb
)
50 mp_srcptr dividend_ptr
;
51 mp_size_t dividend_size
;
59 /* ??? Should this be handled at all? Rely on callers? */
60 if (dividend_size
== 0)
63 /* If multiplication is much faster than division, and the
64 dividend is large, pre-invert the divisor, and use
65 only multiplications in the inner loop. */
67 /* This test should be read:
68 Does it ever help to use udiv_qrnnd_preinv?
69 && Does what we save compensate for the inversion overhead? */
70 if (UDIV_TIME
> (2 * UMUL_TIME
+ 6)
71 && (UDIV_TIME
- (2 * UMUL_TIME
+ 6)) * dividend_size
> UDIV_TIME
)
73 int normalization_steps
;
75 count_leading_zeros (normalization_steps
, divisor_limb
);
76 if (normalization_steps
!= 0)
78 mp_limb divisor_limb_inverted
;
80 divisor_limb
<<= normalization_steps
;
82 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
83 result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
84 most significant bit (with weight 2**N) implicit. */
86 #if 0 /* This can't happen when normalization_steps != 0 */
87 /* Special case for DIVISOR_LIMB == 100...000. */
88 if (divisor_limb
<< 1 == 0)
89 divisor_limb_inverted
= ~(mp_limb
) 0;
92 udiv_qrnnd (divisor_limb_inverted
, dummy
,
93 -divisor_limb
, 0, divisor_limb
);
95 n1
= dividend_ptr
[dividend_size
- 1];
96 r
= n1
>> (BITS_PER_MP_LIMB
- normalization_steps
);
98 /* Possible optimization:
100 && divisor_limb > ((n1 << normalization_steps)
101 | (dividend_ptr[dividend_size - 2] >> ...)))
102 ...one division less... */
104 for (i
= dividend_size
- 2; i
>= 0; i
--)
106 n0
= dividend_ptr
[i
];
107 udiv_qrnnd_preinv (quot_ptr
[i
+ 1], r
, r
,
108 ((n1
<< normalization_steps
)
109 | (n0
>> (BITS_PER_MP_LIMB
- normalization_steps
))),
110 divisor_limb
, divisor_limb_inverted
);
113 udiv_qrnnd_preinv (quot_ptr
[0], r
, r
,
114 n1
<< normalization_steps
,
115 divisor_limb
, divisor_limb_inverted
);
116 return r
>> normalization_steps
;
120 mp_limb divisor_limb_inverted
;
122 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
123 result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
124 most significant bit (with weight 2**N) implicit. */
126 /* Special case for DIVISOR_LIMB == 100...000. */
127 if (divisor_limb
<< 1 == 0)
128 divisor_limb_inverted
= ~(mp_limb
) 0;
130 udiv_qrnnd (divisor_limb_inverted
, dummy
,
131 -divisor_limb
, 0, divisor_limb
);
133 i
= dividend_size
- 1;
136 if (r
>= divisor_limb
)
146 n0
= dividend_ptr
[i
];
147 udiv_qrnnd_preinv (quot_ptr
[i
], r
, r
,
148 n0
, divisor_limb
, divisor_limb_inverted
);
155 if (UDIV_NEEDS_NORMALIZATION
)
157 int normalization_steps
;
159 count_leading_zeros (normalization_steps
, divisor_limb
);
160 if (normalization_steps
!= 0)
162 divisor_limb
<<= normalization_steps
;
164 n1
= dividend_ptr
[dividend_size
- 1];
165 r
= n1
>> (BITS_PER_MP_LIMB
- normalization_steps
);
167 /* Possible optimization:
169 && divisor_limb > ((n1 << normalization_steps)
170 | (dividend_ptr[dividend_size - 2] >> ...)))
171 ...one division less... */
173 for (i
= dividend_size
- 2; i
>= 0; i
--)
175 n0
= dividend_ptr
[i
];
176 udiv_qrnnd (quot_ptr
[i
+ 1], r
, r
,
177 ((n1
<< normalization_steps
)
178 | (n0
>> (BITS_PER_MP_LIMB
- normalization_steps
))),
182 udiv_qrnnd (quot_ptr
[0], r
, r
,
183 n1
<< normalization_steps
,
185 return r
>> normalization_steps
;
188 /* No normalization needed, either because udiv_qrnnd doesn't require
189 it, or because DIVISOR_LIMB is already normalized. */
191 i
= dividend_size
- 1;
194 if (r
>= divisor_limb
)
204 n0
= dividend_ptr
[i
];
205 udiv_qrnnd (quot_ptr
[i
], r
, r
, n0
, divisor_limb
);