1 /* mpz_powm(res,base,exp,mod) -- Set R to (U^E) mod M.
3 Contributed to the GNU project by Torbjorn Granlund.
5 Copyright 1991, 1993, 1994, 1996, 1997, 2000, 2001, 2002, 2005, 2008, 2009
6 Free Software Foundation, Inc.
8 This file is part of the GNU MP Library.
10 The GNU MP Library is free software; you can redistribute it and/or modify
11 it under the terms of the GNU Lesser General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or (at your
13 option) any later version.
15 The GNU MP Library is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
18 License for more details.
20 You should have received a copy of the GNU Lesser General Public License
21 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
34 * Improve handling of buffers. It is pretty ugly now.
36 * For even moduli, we compute a binvert of its odd part both here and in
37 mpn_powm. How can we avoid this recomputation?
52 #define HANDLE_NEGATIVE_EXPONENT 1
56 mpz_powm (mpz_ptr r
, mpz_srcptr b
, mpz_srcptr e
, mpz_srcptr m
)
57 #else /* BERKELEY_MP */
58 pow (mpz_srcptr b
, mpz_srcptr e
, mpz_srcptr m
, mpz_ptr r
)
59 #endif /* BERKELEY_MP */
61 mp_size_t n
, nodd
, ncnt
;
65 mp_size_t rn
, bn
, es
, en
, itch
;
77 if (UNLIKELY (es
<= 0))
82 /* b^0 mod m, b is anything and m is non-zero.
83 Result is 1 mod m, i.e., 1 or 0 depending on if m = 1. */
84 SIZ(r
) = n
!= 1 || mp
[0] != 1;
86 TMP_FREE
; /* we haven't really allocated anything here */
89 #if HANDLE_NEGATIVE_EXPONENT
90 MPZ_TMP_INIT (new_b
, n
+ 1);
92 if (! mpz_invert (new_b
, b
, m
))
104 if (UNLIKELY (bn
== 0))
113 /* Handle (b^1 mod m) early, since mpn_pow* do not handle that case. */
114 if (UNLIKELY (en
== 1 && ep
[0] == 1))
116 rp
= TMP_ALLOC_LIMBS (n
);
120 mp_ptr qp
= TMP_ALLOC_LIMBS (bn
- n
+ 1);
121 mpn_tdiv_qr (qp
, rp
, 0L, bp
, bn
, mp
, n
);
123 MPN_NORMALIZE (rp
, rn
);
125 if (SIZ(b
) < 0 && rn
!= 0)
127 mpn_sub (rp
, mp
, n
, rp
, rn
);
129 MPN_NORMALIZE (rp
, rn
);
136 mpn_sub (rp
, mp
, n
, bp
, bn
);
138 rn
-= (rp
[rn
- 1] == 0);
142 MPN_COPY (rp
, bp
, bn
);
149 /* Remove low zero limbs from M. This loop will terminate for correctly
150 represented mpz numbers. */
152 while (UNLIKELY (mp
[0] == 0))
161 mp_ptr
new = TMP_ALLOC_LIMBS (nodd
);
162 count_trailing_zeros (cnt
, mp
[0]);
163 mpn_rshift (new, mp
, nodd
, cnt
);
164 nodd
-= new[nodd
- 1] == 0;
171 /* We will call both mpn_powm and mpn_powlo. */
172 /* rp needs n, mpn_powlo needs 4n, the 2 mpn_binvert might need more */
173 mp_size_t n_largest_binvert
= MAX (ncnt
, nodd
);
174 mp_size_t itch_binvert
= mpn_binvert_itch (n_largest_binvert
);
175 itch
= 3 * n
+ MAX (itch_binvert
, 2 * n
);
179 /* We will call just mpn_powm. */
180 mp_size_t itch_binvert
= mpn_binvert_itch (nodd
);
181 itch
= n
+ MAX (itch_binvert
, 2 * n
);
183 tp
= TMP_ALLOC_LIMBS (itch
);
188 mpn_powm (rp
, bp
, bn
, ep
, en
, mp
, nodd
, tp
);
194 mp_ptr r2
, xp
, yp
, odd_inv_2exp
;
200 mp_ptr
new = TMP_ALLOC_LIMBS (ncnt
);
201 MPN_COPY (new, bp
, bn
);
202 MPN_ZERO (new + bn
, ncnt
- bn
);
217 t
= (ncnt
- (cnt
!= 0)) * GMP_NUMB_BITS
+ cnt
;
219 /* Count number of low zero bits in B, up to 3. */
220 bcnt
= (0x1213 >> ((bp
[0] & 7) << 1)) & 0x3;
221 /* Note that ep[0] * bcnt might overflow, but that just results
222 in a missed optimization. */
223 if (ep
[0] * bcnt
>= t
)
230 mpn_powlo (r2
, bp
, ep
, en
, ncnt
, tp
+ ncnt
);
235 mp_ptr
new = TMP_ALLOC_LIMBS (ncnt
);
236 MPN_COPY (new, mp
, nodd
);
237 MPN_ZERO (new + nodd
, ncnt
- nodd
);
241 odd_inv_2exp
= tp
+ n
;
242 mpn_binvert (odd_inv_2exp
, mp
, ncnt
, tp
+ 2 * n
);
244 mpn_sub (r2
, r2
, ncnt
, rp
, nodd
> ncnt
? ncnt
: nodd
);
247 mpn_mullo_n (xp
, odd_inv_2exp
, r2
, ncnt
);
250 xp
[ncnt
- 1] &= (CNST_LIMB(1) << cnt
) - 1;
254 mpn_mul (yp
, xp
, ncnt
, mp
, nodd
);
256 mpn_mul (yp
, mp
, nodd
, xp
, ncnt
);
258 mpn_add (rp
, yp
, n
, rp
, nodd
);
260 ASSERT (nodd
+ ncnt
>= n
);
261 ASSERT (nodd
+ ncnt
<= n
+ 1);
264 MPN_NORMALIZE (rp
, rn
);
266 if ((ep
[0] & 1) && SIZ(b
) < 0 && rn
!= 0)
268 mpn_sub (rp
, PTR(m
), n
, rp
, rn
);
270 MPN_NORMALIZE (rp
, rn
);
276 MPN_COPY (PTR(r
), rp
, rn
);