beta-0.89.2
[luatex.git] / source / libs / gmp / gmp-src / gmp-impl.h
blob24214a604780525d375800b7c1f71883b9335b73
1 /* Include file for internal GNU MP types and definitions.
3 THE CONTENTS OF THIS FILE ARE FOR INTERNAL USE AND ARE ALMOST CERTAIN TO
4 BE SUBJECT TO INCOMPATIBLE CHANGES IN FUTURE GNU MP RELEASES.
6 Copyright 1991, 1993-1997, 1999-2015 Free Software Foundation, Inc.
8 This file is part of the GNU MP Library.
10 The GNU MP Library is free software; you can redistribute it and/or modify
11 it under the terms of either:
13 * the GNU Lesser General Public License as published by the Free
14 Software Foundation; either version 3 of the License, or (at your
15 option) any later version.
19 * the GNU General Public License as published by the Free Software
20 Foundation; either version 2 of the License, or (at your option) any
21 later version.
23 or both in parallel, as here.
25 The GNU MP Library is distributed in the hope that it will be useful, but
26 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
27 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
28 for more details.
30 You should have received copies of the GNU General Public License and the
31 GNU Lesser General Public License along with the GNU MP Library. If not,
32 see https://www.gnu.org/licenses/. */
35 /* __GMP_DECLSPEC must be given on any global data that will be accessed
36 from outside libgmp, meaning from the test or development programs, or
37 from libgmpxx. Failing to do this will result in an incorrect address
38 being used for the accesses. On functions __GMP_DECLSPEC makes calls
39 from outside libgmp more efficient, but they'll still work fine without
40 it. */
43 #ifndef __GMP_IMPL_H__
44 #define __GMP_IMPL_H__
46 #if defined _CRAY
47 #include <intrinsics.h> /* for _popcnt */
48 #endif
50 /* For INT_MAX, etc. We used to avoid it because of a bug (on solaris,
51 gcc 2.95 under -mcpu=ultrasparc in ABI=32 ends up getting wrong
52 values (the ABI=64 values)), but it should be safe now.
54 On Cray vector systems, however, we need the system limits.h since sizes
55 of signed and unsigned types can differ there, depending on compiler
56 options (eg. -hnofastmd), making our SHRT_MAX etc expressions fail. For
57 reference, int can be 46 or 64 bits, whereas uint is always 64 bits; and
58 short can be 24, 32, 46 or 64 bits, and different for ushort. */
60 #include <limits.h>
62 /* For fat.h and other fat binary stuff.
63 No need for __GMP_ATTRIBUTE_PURE or __GMP_NOTHROW, since functions
64 declared this way are only used to set function pointers in __gmpn_cpuvec,
65 they're not called directly. */
66 #define DECL_add_n(name) \
67 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t)
68 #define DECL_addlsh1_n(name) \
69 DECL_add_n (name)
70 #define DECL_addlsh2_n(name) \
71 DECL_add_n (name)
72 #define DECL_addmul_1(name) \
73 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t)
74 #define DECL_addmul_2(name) \
75 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr)
76 #define DECL_bdiv_dbm1c(name) \
77 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t)
78 #define DECL_cnd_add_n(name) \
79 __GMP_DECLSPEC mp_limb_t name (mp_limb_t, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t)
80 #define DECL_cnd_sub_n(name) \
81 __GMP_DECLSPEC mp_limb_t name (mp_limb_t, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t)
82 #define DECL_com(name) \
83 __GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t)
84 #define DECL_copyd(name) \
85 __GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t)
86 #define DECL_copyi(name) \
87 DECL_copyd (name)
88 #define DECL_divexact_1(name) \
89 __GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t)
90 #define DECL_divexact_by3c(name) \
91 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t)
92 #define DECL_divrem_1(name) \
93 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t)
94 #define DECL_gcd_1(name) \
95 __GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t, mp_limb_t)
96 #define DECL_lshift(name) \
97 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, unsigned)
98 #define DECL_lshiftc(name) \
99 DECL_lshift (name)
100 #define DECL_mod_1(name) \
101 __GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t, mp_limb_t)
102 #define DECL_mod_1_1p(name) \
103 __GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t [])
104 #define DECL_mod_1_1p_cps(name) \
105 __GMP_DECLSPEC void name (mp_limb_t cps[], mp_limb_t b)
106 #define DECL_mod_1s_2p(name) \
107 DECL_mod_1_1p (name)
108 #define DECL_mod_1s_2p_cps(name) \
109 DECL_mod_1_1p_cps (name)
110 #define DECL_mod_1s_4p(name) \
111 DECL_mod_1_1p (name)
112 #define DECL_mod_1s_4p_cps(name) \
113 DECL_mod_1_1p_cps (name)
114 #define DECL_mod_34lsub1(name) \
115 __GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t)
116 #define DECL_modexact_1c_odd(name) \
117 __GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t)
118 #define DECL_mul_1(name) \
119 DECL_addmul_1 (name)
120 #define DECL_mul_basecase(name) \
121 __GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t)
122 #define DECL_mullo_basecase(name) \
123 __GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t)
124 #define DECL_preinv_divrem_1(name) \
125 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int)
126 #define DECL_preinv_mod_1(name) \
127 __GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t)
128 #define DECL_redc_1(name) \
129 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t)
130 #define DECL_redc_2(name) \
131 __GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr)
132 #define DECL_rshift(name) \
133 DECL_lshift (name)
134 #define DECL_sqr_basecase(name) \
135 __GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t)
136 #define DECL_sub_n(name) \
137 DECL_add_n (name)
138 #define DECL_sublsh1_n(name) \
139 DECL_add_n (name)
140 #define DECL_submul_1(name) \
141 DECL_addmul_1 (name)
143 #if ! defined (__GMP_WITHIN_CONFIGURE)
144 #include "config.h"
145 #include "gmp-mparam.h"
146 #include "fib_table.h"
147 #include "fac_table.h"
148 #include "mp_bases.h"
149 #if WANT_FAT_BINARY
150 #include "fat.h"
151 #endif
152 #endif
154 #if HAVE_INTTYPES_H /* for uint_least32_t */
155 # include <inttypes.h>
156 #else
157 # if HAVE_STDINT_H
158 # include <stdint.h>
159 # endif
160 #endif
162 #ifdef __cplusplus
163 #include <cstring> /* for strlen */
164 #include <string> /* for std::string */
165 #endif
168 #ifndef WANT_TMP_DEBUG /* for TMP_ALLOC_LIMBS_2 and others */
169 #define WANT_TMP_DEBUG 0
170 #endif
172 /* The following tries to get a good version of alloca. The tests are
173 adapted from autoconf AC_FUNC_ALLOCA, with a couple of additions.
174 Whether this succeeds is tested by GMP_FUNC_ALLOCA and HAVE_ALLOCA will
175 be setup appropriately.
177 ifndef alloca - a cpp define might already exist.
178 glibc <stdlib.h> includes <alloca.h> which uses GCC __builtin_alloca.
179 HP cc +Olibcalls adds a #define of alloca to __builtin_alloca.
181 GCC __builtin_alloca - preferred whenever available.
183 _AIX pragma - IBM compilers need a #pragma in "each module that needs to
184 use alloca". Pragma indented to protect pre-ANSI cpp's. _IBMR2 was
185 used in past versions of GMP, retained still in case it matters.
187 The autoconf manual says this pragma needs to be at the start of a C
188 file, apart from comments and preprocessor directives. Is that true?
189 xlc on aix 4.xxx doesn't seem to mind it being after prototypes etc
190 from gmp.h.
193 #ifndef alloca
194 # ifdef __GNUC__
195 # define alloca __builtin_alloca
196 # else
197 # ifdef __DECC
198 # define alloca(x) __ALLOCA(x)
199 # else
200 # ifdef _MSC_VER
201 # include <malloc.h>
202 # define alloca _alloca
203 # else
204 # if HAVE_ALLOCA_H
205 # include <alloca.h>
206 # else
207 # if defined (_AIX) || defined (_IBMR2)
208 #pragma alloca
209 # else
210 char *alloca ();
211 # endif
212 # endif
213 # endif
214 # endif
215 # endif
216 #endif
219 /* if not provided by gmp-mparam.h */
220 #ifndef GMP_LIMB_BYTES
221 #define GMP_LIMB_BYTES SIZEOF_MP_LIMB_T
222 #endif
223 #ifndef GMP_LIMB_BITS
224 #define GMP_LIMB_BITS (8 * SIZEOF_MP_LIMB_T)
225 #endif
227 #define BITS_PER_ULONG (8 * SIZEOF_UNSIGNED_LONG)
230 /* gmp_uint_least32_t is an unsigned integer type with at least 32 bits. */
231 #if HAVE_UINT_LEAST32_T
232 typedef uint_least32_t gmp_uint_least32_t;
233 #else
234 #if SIZEOF_UNSIGNED_SHORT >= 4
235 typedef unsigned short gmp_uint_least32_t;
236 #else
237 #if SIZEOF_UNSIGNED >= 4
238 typedef unsigned gmp_uint_least32_t;
239 #else
240 typedef unsigned long gmp_uint_least32_t;
241 #endif
242 #endif
243 #endif
246 /* gmp_intptr_t, for pointer to integer casts */
247 #if HAVE_INTPTR_T
248 typedef intptr_t gmp_intptr_t;
249 #else /* fallback */
250 typedef size_t gmp_intptr_t;
251 #endif
254 /* pre-inverse types for truncating division and modulo */
255 typedef struct {mp_limb_t inv32;} gmp_pi1_t;
256 typedef struct {mp_limb_t inv21, inv32, inv53;} gmp_pi2_t;
259 /* "const" basically means a function does nothing but examine its arguments
260 and give a return value, it doesn't read or write any memory (neither
261 global nor pointed to by arguments), and has no other side-effects. This
262 is more restrictive than "pure". See info node "(gcc)Function
263 Attributes". __GMP_NO_ATTRIBUTE_CONST_PURE lets tune/common.c etc turn
264 this off when trying to write timing loops. */
265 #if HAVE_ATTRIBUTE_CONST && ! defined (__GMP_NO_ATTRIBUTE_CONST_PURE)
266 #define ATTRIBUTE_CONST __attribute__ ((const))
267 #else
268 #define ATTRIBUTE_CONST
269 #endif
271 #if HAVE_ATTRIBUTE_NORETURN
272 #define ATTRIBUTE_NORETURN __attribute__ ((noreturn))
273 #else
274 #define ATTRIBUTE_NORETURN
275 #endif
277 /* "malloc" means a function behaves like malloc in that the pointer it
278 returns doesn't alias anything. */
279 #if HAVE_ATTRIBUTE_MALLOC
280 #define ATTRIBUTE_MALLOC __attribute__ ((malloc))
281 #else
282 #define ATTRIBUTE_MALLOC
283 #endif
286 #if ! HAVE_STRCHR
287 #define strchr(s,c) index(s,c)
288 #endif
290 #if ! HAVE_MEMSET
291 #define memset(p, c, n) \
292 do { \
293 ASSERT ((n) >= 0); \
294 char *__memset__p = (p); \
295 int __i; \
296 for (__i = 0; __i < (n); __i++) \
297 __memset__p[__i] = (c); \
298 } while (0)
299 #endif
301 /* va_copy is standard in C99, and gcc provides __va_copy when in strict C89
302 mode. Falling back to a memcpy will give maximum portability, since it
303 works no matter whether va_list is a pointer, struct or array. */
304 #if ! defined (va_copy) && defined (__va_copy)
305 #define va_copy(dst,src) __va_copy(dst,src)
306 #endif
307 #if ! defined (va_copy)
308 #define va_copy(dst,src) \
309 do { memcpy (&(dst), &(src), sizeof (va_list)); } while (0)
310 #endif
313 /* HAVE_HOST_CPU_alpha_CIX is 1 on an alpha with the CIX instructions
314 (ie. ctlz, ctpop, cttz). */
315 #if HAVE_HOST_CPU_alphaev67 || HAVE_HOST_CPU_alphaev68 \
316 || HAVE_HOST_CPU_alphaev7
317 #define HAVE_HOST_CPU_alpha_CIX 1
318 #endif
321 #if defined (__cplusplus)
322 extern "C" {
323 #endif
326 /* Usage: TMP_DECL;
327 TMP_MARK;
328 ptr = TMP_ALLOC (bytes);
329 TMP_FREE;
331 Small allocations should use TMP_SALLOC, big allocations should use
332 TMP_BALLOC. Allocations that might be small or big should use TMP_ALLOC.
334 Functions that use just TMP_SALLOC should use TMP_SDECL, TMP_SMARK, and
335 TMP_SFREE.
337 TMP_DECL just declares a variable, but might be empty and so must be last
338 in a list of variables. TMP_MARK must be done before any TMP_ALLOC.
339 TMP_ALLOC(0) is not allowed. TMP_FREE doesn't need to be done if a
340 TMP_MARK was made, but then no TMP_ALLOCs. */
342 /* The alignment in bytes, used for TMP_ALLOCed blocks, when alloca or
343 __gmp_allocate_func doesn't already determine it. Currently TMP_ALLOC
344 isn't used for "double"s, so that's not in the union. */
345 union tmp_align_t {
346 mp_limb_t l;
347 char *p;
349 #define __TMP_ALIGN sizeof (union tmp_align_t)
351 /* Return "a" rounded upwards to a multiple of "m", if it isn't already.
352 "a" must be an unsigned type.
353 This is designed for use with a compile-time constant "m".
354 The POW2 case is expected to be usual, and gcc 3.0 and up recognises
355 "(-(8*n))%8" or the like is always zero, which means the rounding up in
356 the WANT_TMP_NOTREENTRANT version of TMP_ALLOC below will be a noop. */
357 #define ROUND_UP_MULTIPLE(a,m) \
358 (POW2_P(m) ? (a) + (-(a))%(m) \
359 : (a)+(m)-1 - (((a)+(m)-1) % (m)))
361 #if defined (WANT_TMP_ALLOCA) || defined (WANT_TMP_REENTRANT)
362 struct tmp_reentrant_t {
363 struct tmp_reentrant_t *next;
364 size_t size; /* bytes, including header */
366 __GMP_DECLSPEC void *__gmp_tmp_reentrant_alloc (struct tmp_reentrant_t **, size_t) ATTRIBUTE_MALLOC;
367 __GMP_DECLSPEC void __gmp_tmp_reentrant_free (struct tmp_reentrant_t *);
368 #endif
370 #if WANT_TMP_ALLOCA
371 #define TMP_SDECL
372 #define TMP_DECL struct tmp_reentrant_t *__tmp_marker
373 #define TMP_SMARK
374 #define TMP_MARK __tmp_marker = 0
375 #define TMP_SALLOC(n) alloca(n)
376 #define TMP_BALLOC(n) __gmp_tmp_reentrant_alloc (&__tmp_marker, n)
377 /* The peculiar stack allocation limit here is chosen for efficient asm. */
378 #define TMP_ALLOC(n) \
379 (LIKELY ((n) <= 0x7f00) ? TMP_SALLOC(n) : TMP_BALLOC(n))
380 #define TMP_SFREE
381 #define TMP_FREE \
382 do { \
383 if (UNLIKELY (__tmp_marker != 0)) \
384 __gmp_tmp_reentrant_free (__tmp_marker); \
385 } while (0)
386 #endif
388 #if WANT_TMP_REENTRANT
389 #define TMP_SDECL TMP_DECL
390 #define TMP_DECL struct tmp_reentrant_t *__tmp_marker
391 #define TMP_SMARK TMP_MARK
392 #define TMP_MARK __tmp_marker = 0
393 #define TMP_SALLOC(n) TMP_ALLOC(n)
394 #define TMP_BALLOC(n) TMP_ALLOC(n)
395 #define TMP_ALLOC(n) __gmp_tmp_reentrant_alloc (&__tmp_marker, n)
396 #define TMP_SFREE TMP_FREE
397 #define TMP_FREE __gmp_tmp_reentrant_free (__tmp_marker)
398 #endif
400 #if WANT_TMP_NOTREENTRANT
401 struct tmp_marker
403 struct tmp_stack *which_chunk;
404 void *alloc_point;
406 __GMP_DECLSPEC void *__gmp_tmp_alloc (unsigned long) ATTRIBUTE_MALLOC;
407 __GMP_DECLSPEC void __gmp_tmp_mark (struct tmp_marker *);
408 __GMP_DECLSPEC void __gmp_tmp_free (struct tmp_marker *);
409 #define TMP_SDECL TMP_DECL
410 #define TMP_DECL struct tmp_marker __tmp_marker
411 #define TMP_SMARK TMP_MARK
412 #define TMP_MARK __gmp_tmp_mark (&__tmp_marker)
413 #define TMP_SALLOC(n) TMP_ALLOC(n)
414 #define TMP_BALLOC(n) TMP_ALLOC(n)
415 #define TMP_ALLOC(n) \
416 __gmp_tmp_alloc (ROUND_UP_MULTIPLE ((unsigned long) (n), __TMP_ALIGN))
417 #define TMP_SFREE TMP_FREE
418 #define TMP_FREE __gmp_tmp_free (&__tmp_marker)
419 #endif
421 #if WANT_TMP_DEBUG
422 /* See tal-debug.c for some comments. */
423 struct tmp_debug_t {
424 struct tmp_debug_entry_t *list;
425 const char *file;
426 int line;
428 struct tmp_debug_entry_t {
429 struct tmp_debug_entry_t *next;
430 void *block;
431 size_t size;
433 __GMP_DECLSPEC void __gmp_tmp_debug_mark (const char *, int, struct tmp_debug_t **,
434 struct tmp_debug_t *,
435 const char *, const char *);
436 __GMP_DECLSPEC void *__gmp_tmp_debug_alloc (const char *, int, int,
437 struct tmp_debug_t **, const char *,
438 size_t) ATTRIBUTE_MALLOC;
439 __GMP_DECLSPEC void __gmp_tmp_debug_free (const char *, int, int,
440 struct tmp_debug_t **,
441 const char *, const char *);
442 #define TMP_SDECL TMP_DECL_NAME(__tmp_xmarker, "__tmp_marker")
443 #define TMP_DECL TMP_DECL_NAME(__tmp_xmarker, "__tmp_marker")
444 #define TMP_SMARK TMP_MARK_NAME(__tmp_xmarker, "__tmp_marker")
445 #define TMP_MARK TMP_MARK_NAME(__tmp_xmarker, "__tmp_marker")
446 #define TMP_SFREE TMP_FREE_NAME(__tmp_xmarker, "__tmp_marker")
447 #define TMP_FREE TMP_FREE_NAME(__tmp_xmarker, "__tmp_marker")
448 /* The marker variable is designed to provoke an uninitialized variable
449 warning from the compiler if TMP_FREE is used without a TMP_MARK.
450 __tmp_marker_inscope does the same for TMP_ALLOC. Runtime tests pick
451 these things up too. */
452 #define TMP_DECL_NAME(marker, marker_name) \
453 int marker; \
454 int __tmp_marker_inscope; \
455 const char *__tmp_marker_name = marker_name; \
456 struct tmp_debug_t __tmp_marker_struct; \
457 /* don't demand NULL, just cast a zero */ \
458 struct tmp_debug_t *__tmp_marker = (struct tmp_debug_t *) 0
459 #define TMP_MARK_NAME(marker, marker_name) \
460 do { \
461 marker = 1; \
462 __tmp_marker_inscope = 1; \
463 __gmp_tmp_debug_mark (ASSERT_FILE, ASSERT_LINE, \
464 &__tmp_marker, &__tmp_marker_struct, \
465 __tmp_marker_name, marker_name); \
466 } while (0)
467 #define TMP_SALLOC(n) TMP_ALLOC(n)
468 #define TMP_BALLOC(n) TMP_ALLOC(n)
469 #define TMP_ALLOC(size) \
470 __gmp_tmp_debug_alloc (ASSERT_FILE, ASSERT_LINE, \
471 __tmp_marker_inscope, \
472 &__tmp_marker, __tmp_marker_name, size)
473 #define TMP_FREE_NAME(marker, marker_name) \
474 do { \
475 __gmp_tmp_debug_free (ASSERT_FILE, ASSERT_LINE, \
476 marker, &__tmp_marker, \
477 __tmp_marker_name, marker_name); \
478 } while (0)
479 #endif /* WANT_TMP_DEBUG */
482 /* Allocating various types. */
483 #define TMP_ALLOC_TYPE(n,type) ((type *) TMP_ALLOC ((n) * sizeof (type)))
484 #define TMP_SALLOC_TYPE(n,type) ((type *) TMP_SALLOC ((n) * sizeof (type)))
485 #define TMP_BALLOC_TYPE(n,type) ((type *) TMP_BALLOC ((n) * sizeof (type)))
486 #define TMP_ALLOC_LIMBS(n) TMP_ALLOC_TYPE(n,mp_limb_t)
487 #define TMP_SALLOC_LIMBS(n) TMP_SALLOC_TYPE(n,mp_limb_t)
488 #define TMP_BALLOC_LIMBS(n) TMP_BALLOC_TYPE(n,mp_limb_t)
489 #define TMP_ALLOC_MP_PTRS(n) TMP_ALLOC_TYPE(n,mp_ptr)
490 #define TMP_SALLOC_MP_PTRS(n) TMP_SALLOC_TYPE(n,mp_ptr)
491 #define TMP_BALLOC_MP_PTRS(n) TMP_BALLOC_TYPE(n,mp_ptr)
493 /* It's more efficient to allocate one block than many. This is certainly
494 true of the malloc methods, but it can even be true of alloca if that
495 involves copying a chunk of stack (various RISCs), or a call to a stack
496 bounds check (mingw). In any case, when debugging keep separate blocks
497 so a redzoning malloc debugger can protect each individually. */
498 #define TMP_ALLOC_LIMBS_2(xp,xsize, yp,ysize) \
499 do { \
500 if (WANT_TMP_DEBUG) \
502 (xp) = TMP_ALLOC_LIMBS (xsize); \
503 (yp) = TMP_ALLOC_LIMBS (ysize); \
505 else \
507 (xp) = TMP_ALLOC_LIMBS ((xsize) + (ysize)); \
508 (yp) = (xp) + (xsize); \
510 } while (0)
511 #define TMP_ALLOC_LIMBS_3(xp,xsize, yp,ysize, zp,zsize) \
512 do { \
513 if (WANT_TMP_DEBUG) \
515 (xp) = TMP_ALLOC_LIMBS (xsize); \
516 (yp) = TMP_ALLOC_LIMBS (ysize); \
517 (zp) = TMP_ALLOC_LIMBS (zsize); \
519 else \
521 (xp) = TMP_ALLOC_LIMBS ((xsize) + (ysize) + (zsize)); \
522 (yp) = (xp) + (xsize); \
523 (zp) = (yp) + (ysize); \
525 } while (0)
527 /* From gmp.h, nicer names for internal use. */
528 #define CRAY_Pragma(str) __GMP_CRAY_Pragma(str)
529 #define MPN_CMP(result, xp, yp, size) __GMPN_CMP(result, xp, yp, size)
530 #define LIKELY(cond) __GMP_LIKELY(cond)
531 #define UNLIKELY(cond) __GMP_UNLIKELY(cond)
533 #define ABS(x) ((x) >= 0 ? (x) : -(x))
534 #define NEG_CAST(T,x) (- (__GMP_CAST (T, (x) + 1) - 1))
535 #define ABS_CAST(T,x) ((x) >= 0 ? __GMP_CAST (T, x) : NEG_CAST (T,x))
536 #undef MIN
537 #define MIN(l,o) ((l) < (o) ? (l) : (o))
538 #undef MAX
539 #define MAX(h,i) ((h) > (i) ? (h) : (i))
540 #define numberof(x) (sizeof (x) / sizeof ((x)[0]))
542 /* Field access macros. */
543 #define SIZ(x) ((x)->_mp_size)
544 #define ABSIZ(x) ABS (SIZ (x))
545 #define PTR(x) ((x)->_mp_d)
546 #define EXP(x) ((x)->_mp_exp)
547 #define PREC(x) ((x)->_mp_prec)
548 #define ALLOC(x) ((x)->_mp_alloc)
549 #define NUM(x) mpq_numref(x)
550 #define DEN(x) mpq_denref(x)
552 /* n-1 inverts any low zeros and the lowest one bit. If n&(n-1) leaves zero
553 then that lowest one bit must have been the only bit set. n==0 will
554 return true though, so avoid that. */
555 #define POW2_P(n) (((n) & ((n) - 1)) == 0)
557 /* This is intended for constant THRESHOLDs only, where the compiler
558 can completely fold the result. */
559 #define LOG2C(n) \
560 (((n) >= 0x1) + ((n) >= 0x2) + ((n) >= 0x4) + ((n) >= 0x8) + \
561 ((n) >= 0x10) + ((n) >= 0x20) + ((n) >= 0x40) + ((n) >= 0x80) + \
562 ((n) >= 0x100) + ((n) >= 0x200) + ((n) >= 0x400) + ((n) >= 0x800) + \
563 ((n) >= 0x1000) + ((n) >= 0x2000) + ((n) >= 0x4000) + ((n) >= 0x8000))
565 #define MP_LIMB_T_MAX (~ (mp_limb_t) 0)
567 /* Must cast ULONG_MAX etc to unsigned long etc, since they might not be
568 unsigned on a K&R compiler. In particular the HP-UX 10 bundled K&R cc
569 treats the plain decimal values in <limits.h> as signed. */
570 #define ULONG_HIGHBIT (ULONG_MAX ^ ((unsigned long) ULONG_MAX >> 1))
571 #define UINT_HIGHBIT (UINT_MAX ^ ((unsigned) UINT_MAX >> 1))
572 #define USHRT_HIGHBIT (USHRT_MAX ^ ((unsigned short) USHRT_MAX >> 1))
573 #define GMP_LIMB_HIGHBIT (MP_LIMB_T_MAX ^ (MP_LIMB_T_MAX >> 1))
575 #if __GMP_MP_SIZE_T_INT
576 #define MP_SIZE_T_MAX INT_MAX
577 #define MP_SIZE_T_MIN INT_MIN
578 #else
579 #define MP_SIZE_T_MAX LONG_MAX
580 #define MP_SIZE_T_MIN LONG_MIN
581 #endif
583 /* mp_exp_t is the same as mp_size_t */
584 #define MP_EXP_T_MAX MP_SIZE_T_MAX
585 #define MP_EXP_T_MIN MP_SIZE_T_MIN
587 #define LONG_HIGHBIT LONG_MIN
588 #define INT_HIGHBIT INT_MIN
589 #define SHRT_HIGHBIT SHRT_MIN
592 #define GMP_NUMB_HIGHBIT (CNST_LIMB(1) << (GMP_NUMB_BITS-1))
594 #if GMP_NAIL_BITS == 0
595 #define GMP_NAIL_LOWBIT CNST_LIMB(0)
596 #else
597 #define GMP_NAIL_LOWBIT (CNST_LIMB(1) << GMP_NUMB_BITS)
598 #endif
600 #if GMP_NAIL_BITS != 0
601 /* Set various *_THRESHOLD values to be used for nails. Thus we avoid using
602 code that has not yet been qualified. */
604 #undef DC_DIV_QR_THRESHOLD
605 #define DC_DIV_QR_THRESHOLD 50
607 #undef DIVREM_1_NORM_THRESHOLD
608 #undef DIVREM_1_UNNORM_THRESHOLD
609 #undef MOD_1_NORM_THRESHOLD
610 #undef MOD_1_UNNORM_THRESHOLD
611 #undef USE_PREINV_DIVREM_1
612 #undef DIVREM_2_THRESHOLD
613 #undef DIVEXACT_1_THRESHOLD
614 #define DIVREM_1_NORM_THRESHOLD MP_SIZE_T_MAX /* no preinv */
615 #define DIVREM_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* no preinv */
616 #define MOD_1_NORM_THRESHOLD MP_SIZE_T_MAX /* no preinv */
617 #define MOD_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* no preinv */
618 #define USE_PREINV_DIVREM_1 0 /* no preinv */
619 #define DIVREM_2_THRESHOLD MP_SIZE_T_MAX /* no preinv */
621 /* mpn/generic/mul_fft.c is not nails-capable. */
622 #undef MUL_FFT_THRESHOLD
623 #undef SQR_FFT_THRESHOLD
624 #define MUL_FFT_THRESHOLD MP_SIZE_T_MAX
625 #define SQR_FFT_THRESHOLD MP_SIZE_T_MAX
626 #endif
628 /* Swap macros. */
630 #define MP_LIMB_T_SWAP(x, y) \
631 do { \
632 mp_limb_t __mp_limb_t_swap__tmp = (x); \
633 (x) = (y); \
634 (y) = __mp_limb_t_swap__tmp; \
635 } while (0)
636 #define MP_SIZE_T_SWAP(x, y) \
637 do { \
638 mp_size_t __mp_size_t_swap__tmp = (x); \
639 (x) = (y); \
640 (y) = __mp_size_t_swap__tmp; \
641 } while (0)
643 #define MP_PTR_SWAP(x, y) \
644 do { \
645 mp_ptr __mp_ptr_swap__tmp = (x); \
646 (x) = (y); \
647 (y) = __mp_ptr_swap__tmp; \
648 } while (0)
649 #define MP_SRCPTR_SWAP(x, y) \
650 do { \
651 mp_srcptr __mp_srcptr_swap__tmp = (x); \
652 (x) = (y); \
653 (y) = __mp_srcptr_swap__tmp; \
654 } while (0)
656 #define MPN_PTR_SWAP(xp,xs, yp,ys) \
657 do { \
658 MP_PTR_SWAP (xp, yp); \
659 MP_SIZE_T_SWAP (xs, ys); \
660 } while(0)
661 #define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \
662 do { \
663 MP_SRCPTR_SWAP (xp, yp); \
664 MP_SIZE_T_SWAP (xs, ys); \
665 } while(0)
667 #define MPZ_PTR_SWAP(x, y) \
668 do { \
669 mpz_ptr __mpz_ptr_swap__tmp = (x); \
670 (x) = (y); \
671 (y) = __mpz_ptr_swap__tmp; \
672 } while (0)
673 #define MPZ_SRCPTR_SWAP(x, y) \
674 do { \
675 mpz_srcptr __mpz_srcptr_swap__tmp = (x); \
676 (x) = (y); \
677 (y) = __mpz_srcptr_swap__tmp; \
678 } while (0)
680 #define MPQ_PTR_SWAP(x, y) \
681 do { \
682 mpq_ptr __mpq_ptr_swap__tmp = (x); \
683 (x) = (y); \
684 (y) = __mpq_ptr_swap__tmp; \
685 } while (0)
686 #define MPQ_SRCPTR_SWAP(x, y) \
687 do { \
688 mpq_srcptr __mpq_srcptr_swap__tmp = (x); \
689 (x) = (y); \
690 (y) = __mpq_srcptr_swap__tmp; \
691 } while (0)
694 /* Enhancement: __gmp_allocate_func could have "__attribute__ ((malloc))",
695 but current gcc (3.0) doesn't seem to support that. */
696 __GMP_DECLSPEC extern void * (*__gmp_allocate_func) (size_t);
697 __GMP_DECLSPEC extern void * (*__gmp_reallocate_func) (void *, size_t, size_t);
698 __GMP_DECLSPEC extern void (*__gmp_free_func) (void *, size_t);
700 __GMP_DECLSPEC void *__gmp_default_allocate (size_t);
701 __GMP_DECLSPEC void *__gmp_default_reallocate (void *, size_t, size_t);
702 __GMP_DECLSPEC void __gmp_default_free (void *, size_t);
704 #define __GMP_ALLOCATE_FUNC_TYPE(n,type) \
705 ((type *) (*__gmp_allocate_func) ((n) * sizeof (type)))
706 #define __GMP_ALLOCATE_FUNC_LIMBS(n) __GMP_ALLOCATE_FUNC_TYPE (n, mp_limb_t)
708 #define __GMP_REALLOCATE_FUNC_TYPE(p, old_size, new_size, type) \
709 ((type *) (*__gmp_reallocate_func) \
710 (p, (old_size) * sizeof (type), (new_size) * sizeof (type)))
711 #define __GMP_REALLOCATE_FUNC_LIMBS(p, old_size, new_size) \
712 __GMP_REALLOCATE_FUNC_TYPE(p, old_size, new_size, mp_limb_t)
714 #define __GMP_FREE_FUNC_TYPE(p,n,type) (*__gmp_free_func) (p, (n) * sizeof (type))
715 #define __GMP_FREE_FUNC_LIMBS(p,n) __GMP_FREE_FUNC_TYPE (p, n, mp_limb_t)
717 #define __GMP_REALLOCATE_FUNC_MAYBE(ptr, oldsize, newsize) \
718 do { \
719 if ((oldsize) != (newsize)) \
720 (ptr) = (*__gmp_reallocate_func) (ptr, oldsize, newsize); \
721 } while (0)
723 #define __GMP_REALLOCATE_FUNC_MAYBE_TYPE(ptr, oldsize, newsize, type) \
724 do { \
725 if ((oldsize) != (newsize)) \
726 (ptr) = (type *) (*__gmp_reallocate_func) \
727 (ptr, (oldsize) * sizeof (type), (newsize) * sizeof (type)); \
728 } while (0)
731 /* Dummy for non-gcc, code involving it will go dead. */
732 #if ! defined (__GNUC__) || __GNUC__ < 2
733 #define __builtin_constant_p(x) 0
734 #endif
737 /* In gcc 2.96 and up on i386, tail calls are optimized to jumps if the
738 stack usage is compatible. __attribute__ ((regparm (N))) helps by
739 putting leading parameters in registers, avoiding extra stack.
741 regparm cannot be used with calls going through the PLT, because the
742 binding code there may clobber the registers (%eax, %edx, %ecx) used for
743 the regparm parameters. Calls to local (ie. static) functions could
744 still use this, if we cared to differentiate locals and globals.
746 On athlon-unknown-freebsd4.9 with gcc 3.3.3, regparm cannot be used with
747 -p or -pg profiling, since that version of gcc doesn't realize the
748 .mcount calls will clobber the parameter registers. Other systems are
749 ok, like debian with glibc 2.3.2 (mcount doesn't clobber), but we don't
750 bother to try to detect this. regparm is only an optimization so we just
751 disable it when profiling (profiling being a slowdown anyway). */
753 #if HAVE_HOST_CPU_FAMILY_x86 && __GMP_GNUC_PREREQ (2,96) && ! defined (PIC) \
754 && ! WANT_PROFILING_PROF && ! WANT_PROFILING_GPROF
755 #define USE_LEADING_REGPARM 1
756 #else
757 #define USE_LEADING_REGPARM 0
758 #endif
760 /* Macros for altering parameter order according to regparm usage. */
761 #if USE_LEADING_REGPARM
762 #define REGPARM_2_1(a,b,x) x,a,b
763 #define REGPARM_3_1(a,b,c,x) x,a,b,c
764 #define REGPARM_ATTR(n) __attribute__ ((regparm (n)))
765 #else
766 #define REGPARM_2_1(a,b,x) a,b,x
767 #define REGPARM_3_1(a,b,c,x) a,b,c,x
768 #define REGPARM_ATTR(n)
769 #endif
772 /* ASM_L gives a local label for a gcc asm block, for use when temporary
773 local labels like "1:" might not be available, which is the case for
774 instance on the x86s (the SCO assembler doesn't support them).
776 The label generated is made unique by including "%=" which is a unique
777 number for each insn. This ensures the same name can be used in multiple
778 asm blocks, perhaps via a macro. Since jumps between asm blocks are not
779 allowed there's no need for a label to be usable outside a single
780 block. */
782 #define ASM_L(name) LSYM_PREFIX "asm_%=_" #name
785 #if defined (__GNUC__) && HAVE_HOST_CPU_FAMILY_x86
786 #if 0
787 /* FIXME: Check that these actually improve things.
788 FIXME: Need a cld after each std.
789 FIXME: Can't have inputs in clobbered registers, must describe them as
790 dummy outputs, and add volatile. */
791 #define MPN_COPY_INCR(DST, SRC, N) \
792 __asm__ ("cld\n\trep\n\tmovsl" : : \
793 "D" (DST), "S" (SRC), "c" (N) : \
794 "cx", "di", "si", "memory")
795 #define MPN_COPY_DECR(DST, SRC, N) \
796 __asm__ ("std\n\trep\n\tmovsl" : : \
797 "D" ((DST) + (N) - 1), "S" ((SRC) + (N) - 1), "c" (N) : \
798 "cx", "di", "si", "memory")
799 #endif
800 #endif
803 __GMP_DECLSPEC void __gmpz_aorsmul_1 (REGPARM_3_1 (mpz_ptr, mpz_srcptr, mp_limb_t, mp_size_t)) REGPARM_ATTR(1);
804 #define mpz_aorsmul_1(w,u,v,sub) __gmpz_aorsmul_1 (REGPARM_3_1 (w, u, v, sub))
806 #define mpz_n_pow_ui __gmpz_n_pow_ui
807 __GMP_DECLSPEC void mpz_n_pow_ui (mpz_ptr, mp_srcptr, mp_size_t, unsigned long);
810 #define mpn_addmul_1c __MPN(addmul_1c)
811 __GMP_DECLSPEC mp_limb_t mpn_addmul_1c (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t);
813 #ifndef mpn_addmul_2 /* if not done with cpuvec in a fat binary */
814 #define mpn_addmul_2 __MPN(addmul_2)
815 __GMP_DECLSPEC mp_limb_t mpn_addmul_2 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
816 #endif
818 #define mpn_addmul_3 __MPN(addmul_3)
819 __GMP_DECLSPEC mp_limb_t mpn_addmul_3 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
821 #define mpn_addmul_4 __MPN(addmul_4)
822 __GMP_DECLSPEC mp_limb_t mpn_addmul_4 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
824 #define mpn_addmul_5 __MPN(addmul_5)
825 __GMP_DECLSPEC mp_limb_t mpn_addmul_5 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
827 #define mpn_addmul_6 __MPN(addmul_6)
828 __GMP_DECLSPEC mp_limb_t mpn_addmul_6 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
830 #define mpn_addmul_7 __MPN(addmul_7)
831 __GMP_DECLSPEC mp_limb_t mpn_addmul_7 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
833 #define mpn_addmul_8 __MPN(addmul_8)
834 __GMP_DECLSPEC mp_limb_t mpn_addmul_8 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
836 /* Alternative entry point in mpn_addmul_2 for the benefit of mpn_sqr_basecase. */
837 #define mpn_addmul_2s __MPN(addmul_2s)
838 __GMP_DECLSPEC mp_limb_t mpn_addmul_2s (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
840 /* mpn_addlsh1_n(c,a,b,n), when it exists, sets {c,n} to {a,n}+2*{b,n}, and
841 returns the carry out (0, 1 or 2). Use _ip1 when a=c. */
842 #ifndef mpn_addlsh1_n /* if not done with cpuvec in a fat binary */
843 #define mpn_addlsh1_n __MPN(addlsh1_n)
844 __GMP_DECLSPEC mp_limb_t mpn_addlsh1_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
845 #endif
846 #define mpn_addlsh1_nc __MPN(addlsh1_nc)
847 __GMP_DECLSPEC mp_limb_t mpn_addlsh1_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
848 #if HAVE_NATIVE_mpn_addlsh1_n && ! HAVE_NATIVE_mpn_addlsh1_n_ip1
849 #define mpn_addlsh1_n_ip1(dst,src,n) mpn_addlsh1_n(dst,dst,src,n)
850 #define HAVE_NATIVE_mpn_addlsh1_n_ip1 1
851 #else
852 #define mpn_addlsh1_n_ip1 __MPN(addlsh1_n_ip1)
853 __GMP_DECLSPEC mp_limb_t mpn_addlsh1_n_ip1 (mp_ptr, mp_srcptr, mp_size_t);
854 #endif
855 #if HAVE_NATIVE_mpn_addlsh1_nc && ! HAVE_NATIVE_mpn_addlsh1_nc_ip1
856 #define mpn_addlsh1_nc_ip1(dst,src,n,c) mpn_addlsh1_nc(dst,dst,src,n,c)
857 #define HAVE_NATIVE_mpn_addlsh1_nc_ip1 1
858 #else
859 #define mpn_addlsh1_nc_ip1 __MPN(addlsh1_nc_ip1)
860 __GMP_DECLSPEC mp_limb_t mpn_addlsh1_nc_ip1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
861 #endif
863 #ifndef mpn_addlsh2_n /* if not done with cpuvec in a fat binary */
864 /* mpn_addlsh2_n(c,a,b,n), when it exists, sets {c,n} to {a,n}+4*{b,n}, and
865 returns the carry out (0, ..., 4). Use _ip1 when a=c. */
866 #define mpn_addlsh2_n __MPN(addlsh2_n)
867 __GMP_DECLSPEC mp_limb_t mpn_addlsh2_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
868 #endif
869 #define mpn_addlsh2_nc __MPN(addlsh2_nc)
870 __GMP_DECLSPEC mp_limb_t mpn_addlsh2_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
871 #if HAVE_NATIVE_mpn_addlsh2_n && ! HAVE_NATIVE_mpn_addlsh2_n_ip1
872 #define mpn_addlsh2_n_ip1(dst,src,n) mpn_addlsh2_n(dst,dst,src,n)
873 #define HAVE_NATIVE_mpn_addlsh2_n_ip1 1
874 #else
875 #define mpn_addlsh2_n_ip1 __MPN(addlsh2_n_ip1)
876 __GMP_DECLSPEC mp_limb_t mpn_addlsh2_n_ip1 (mp_ptr, mp_srcptr, mp_size_t);
877 #endif
878 #if HAVE_NATIVE_mpn_addlsh2_nc && ! HAVE_NATIVE_mpn_addlsh2_nc_ip1
879 #define mpn_addlsh2_nc_ip1(dst,src,n,c) mpn_addlsh2_nc(dst,dst,src,n,c)
880 #define HAVE_NATIVE_mpn_addlsh2_nc_ip1 1
881 #else
882 #define mpn_addlsh2_nc_ip1 __MPN(addlsh2_nc_ip1)
883 __GMP_DECLSPEC mp_limb_t mpn_addlsh2_nc_ip1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
884 #endif
886 /* mpn_addlsh_n(c,a,b,n,k), when it exists, sets {c,n} to {a,n}+2^k*{b,n}, and
887 returns the carry out (0, ..., 2^k). Use _ip1 when a=c. */
888 #define mpn_addlsh_n __MPN(addlsh_n)
889 __GMP_DECLSPEC mp_limb_t mpn_addlsh_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, unsigned int);
890 #define mpn_addlsh_nc __MPN(addlsh_nc)
891 __GMP_DECLSPEC mp_limb_t mpn_addlsh_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, unsigned int, mp_limb_t);
892 #if HAVE_NATIVE_mpn_addlsh_n && ! HAVE_NATIVE_mpn_addlsh_n_ip1
893 #define mpn_addlsh_n_ip1(dst,src,n,s) mpn_addlsh_n(dst,dst,src,n,s)
894 #define HAVE_NATIVE_mpn_addlsh_n_ip1 1
895 #else
896 #define mpn_addlsh_n_ip1 __MPN(addlsh_n_ip1)
897 __GMP_DECLSPEC mp_limb_t mpn_addlsh_n_ip1 (mp_ptr, mp_srcptr, mp_size_t, unsigned int);
898 #endif
899 #if HAVE_NATIVE_mpn_addlsh_nc && ! HAVE_NATIVE_mpn_addlsh_nc_ip1
900 #define mpn_addlsh_nc_ip1(dst,src,n,s,c) mpn_addlsh_nc(dst,dst,src,n,s,c)
901 #define HAVE_NATIVE_mpn_addlsh_nc_ip1 1
902 #else
903 #define mpn_addlsh_nc_ip1 __MPN(addlsh_nc_ip1)
904 __GMP_DECLSPEC mp_limb_t mpn_addlsh_nc_ip1 (mp_ptr, mp_srcptr, mp_size_t, unsigned int, mp_limb_t);
905 #endif
907 #ifndef mpn_sublsh1_n /* if not done with cpuvec in a fat binary */
908 /* mpn_sublsh1_n(c,a,b,n), when it exists, sets {c,n} to {a,n}-2*{b,n}, and
909 returns the borrow out (0, 1 or 2). Use _ip1 when a=c. */
910 #define mpn_sublsh1_n __MPN(sublsh1_n)
911 __GMP_DECLSPEC mp_limb_t mpn_sublsh1_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
912 #endif
913 #define mpn_sublsh1_nc __MPN(sublsh1_nc)
914 __GMP_DECLSPEC mp_limb_t mpn_sublsh1_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
915 #if HAVE_NATIVE_mpn_sublsh1_n && ! HAVE_NATIVE_mpn_sublsh1_n_ip1
916 #define mpn_sublsh1_n_ip1(dst,src,n) mpn_sublsh1_n(dst,dst,src,n)
917 #define HAVE_NATIVE_mpn_sublsh1_n_ip1 1
918 #else
919 #define mpn_sublsh1_n_ip1 __MPN(sublsh1_n_ip1)
920 __GMP_DECLSPEC mp_limb_t mpn_sublsh1_n_ip1 (mp_ptr, mp_srcptr, mp_size_t);
921 #endif
922 #if HAVE_NATIVE_mpn_sublsh1_nc && ! HAVE_NATIVE_mpn_sublsh1_nc_ip1
923 #define mpn_sublsh1_nc_ip1(dst,src,n,c) mpn_sublsh1_nc(dst,dst,src,n,c)
924 #define HAVE_NATIVE_mpn_sublsh1_nc_ip1 1
925 #else
926 #define mpn_sublsh1_nc_ip1 __MPN(sublsh1_nc_ip1)
927 __GMP_DECLSPEC mp_limb_t mpn_sublsh1_nc_ip1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
928 #endif
930 /* mpn_rsblsh1_n(c,a,b,n), when it exists, sets {c,n} to 2*{b,n}-{a,n}, and
931 returns the carry out (-1, 0, 1). */
932 #define mpn_rsblsh1_n __MPN(rsblsh1_n)
933 __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh1_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
934 #define mpn_rsblsh1_nc __MPN(rsblsh1_nc)
935 __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh1_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
937 /* mpn_sublsh2_n(c,a,b,n), when it exists, sets {c,n} to {a,n}-4*{b,n}, and
938 returns the borrow out (0, ..., 4). Use _ip1 when a=c. */
939 #define mpn_sublsh2_n __MPN(sublsh2_n)
940 __GMP_DECLSPEC mp_limb_t mpn_sublsh2_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
941 #define mpn_sublsh2_nc __MPN(sublsh2_nc)
942 __GMP_DECLSPEC mp_limb_t mpn_sublsh2_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
943 #if HAVE_NATIVE_mpn_sublsh2_n && ! HAVE_NATIVE_mpn_sublsh2_n_ip1
944 #define mpn_sublsh2_n_ip1(dst,src,n) mpn_sublsh2_n(dst,dst,src,n)
945 #define HAVE_NATIVE_mpn_sublsh2_n_ip1 1
946 #else
947 #define mpn_sublsh2_n_ip1 __MPN(sublsh2_n_ip1)
948 __GMP_DECLSPEC mp_limb_t mpn_sublsh2_n_ip1 (mp_ptr, mp_srcptr, mp_size_t);
949 #endif
950 #if HAVE_NATIVE_mpn_sublsh2_nc && ! HAVE_NATIVE_mpn_sublsh2_nc_ip1
951 #define mpn_sublsh2_nc_ip1(dst,src,n,c) mpn_sublsh2_nc(dst,dst,src,n,c)
952 #define HAVE_NATIVE_mpn_sublsh2_nc_ip1 1
953 #else
954 #define mpn_sublsh2_nc_ip1 __MPN(sublsh2_nc_ip1)
955 __GMP_DECLSPEC mp_limb_t mpn_sublsh2_nc_ip1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
956 #endif
958 /* mpn_sublsh_n(c,a,b,n,k), when it exists, sets {c,n} to {a,n}-2^k*{b,n}, and
959 returns the carry out (0, ..., 2^k). Use _ip1 when a=c. */
960 #define mpn_sublsh_n __MPN(sublsh_n)
961 __GMP_DECLSPEC mp_limb_t mpn_sublsh_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, unsigned int);
962 #if HAVE_NATIVE_mpn_sublsh_n && ! HAVE_NATIVE_mpn_sublsh_n_ip1
963 #define mpn_sublsh_n_ip1(dst,src,n,s) mpn_sublsh_n(dst,dst,src,n,s)
964 #define HAVE_NATIVE_mpn_sublsh_n_ip1 1
965 #else
966 #define mpn_sublsh_n_ip1 __MPN(sublsh_n_ip1)
967 __GMP_DECLSPEC mp_limb_t mpn_sublsh_n_ip1 (mp_ptr, mp_srcptr, mp_size_t, unsigned int);
968 #endif
969 #if HAVE_NATIVE_mpn_sublsh_nc && ! HAVE_NATIVE_mpn_sublsh_nc_ip1
970 #define mpn_sublsh_nc_ip1(dst,src,n,s,c) mpn_sublsh_nc(dst,dst,src,n,s,c)
971 #define HAVE_NATIVE_mpn_sublsh_nc_ip1 1
972 #else
973 #define mpn_sublsh_nc_ip1 __MPN(sublsh_nc_ip1)
974 __GMP_DECLSPEC mp_limb_t mpn_sublsh_nc_ip1 (mp_ptr, mp_srcptr, mp_size_t, unsigned int, mp_limb_t);
975 #endif
977 /* mpn_rsblsh2_n(c,a,b,n), when it exists, sets {c,n} to 4*{b,n}-{a,n}, and
978 returns the carry out (-1, ..., 3). */
979 #define mpn_rsblsh2_n __MPN(rsblsh2_n)
980 __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh2_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
981 #define mpn_rsblsh2_nc __MPN(rsblsh2_nc)
982 __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh2_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
984 /* mpn_rsblsh_n(c,a,b,n,k), when it exists, sets {c,n} to 2^k*{b,n}-{a,n}, and
985 returns the carry out (-1, 0, ..., 2^k-1). */
986 #define mpn_rsblsh_n __MPN(rsblsh_n)
987 __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, unsigned int);
988 #define mpn_rsblsh_nc __MPN(rsblsh_nc)
989 __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, unsigned int, mp_limb_t);
991 /* mpn_rsh1add_n(c,a,b,n), when it exists, sets {c,n} to ({a,n} + {b,n}) >> 1,
992 and returns the bit rshifted out (0 or 1). */
993 #define mpn_rsh1add_n __MPN(rsh1add_n)
994 __GMP_DECLSPEC mp_limb_t mpn_rsh1add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
995 #define mpn_rsh1add_nc __MPN(rsh1add_nc)
996 __GMP_DECLSPEC mp_limb_t mpn_rsh1add_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
998 /* mpn_rsh1sub_n(c,a,b,n), when it exists, sets {c,n} to ({a,n} - {b,n}) >> 1,
999 and returns the bit rshifted out (0 or 1). If there's a borrow from the
1000 subtract, it's stored as a 1 in the high bit of c[n-1], like a twos
1001 complement negative. */
1002 #define mpn_rsh1sub_n __MPN(rsh1sub_n)
1003 __GMP_DECLSPEC mp_limb_t mpn_rsh1sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
1004 #define mpn_rsh1sub_nc __MPN(rsh1sub_nc)
1005 __GMP_DECLSPEC mp_limb_t mpn_rsh1sub_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
1007 #ifndef mpn_lshiftc /* if not done with cpuvec in a fat binary */
1008 #define mpn_lshiftc __MPN(lshiftc)
1009 __GMP_DECLSPEC mp_limb_t mpn_lshiftc (mp_ptr, mp_srcptr, mp_size_t, unsigned int);
1010 #endif
1012 #define mpn_add_err1_n __MPN(add_err1_n)
1013 __GMP_DECLSPEC mp_limb_t mpn_add_err1_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
1015 #define mpn_add_err2_n __MPN(add_err2_n)
1016 __GMP_DECLSPEC mp_limb_t mpn_add_err2_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
1018 #define mpn_add_err3_n __MPN(add_err3_n)
1019 __GMP_DECLSPEC mp_limb_t mpn_add_err3_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
1021 #define mpn_sub_err1_n __MPN(sub_err1_n)
1022 __GMP_DECLSPEC mp_limb_t mpn_sub_err1_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
1024 #define mpn_sub_err2_n __MPN(sub_err2_n)
1025 __GMP_DECLSPEC mp_limb_t mpn_sub_err2_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
1027 #define mpn_sub_err3_n __MPN(sub_err3_n)
1028 __GMP_DECLSPEC mp_limb_t mpn_sub_err3_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
1030 #define mpn_add_n_sub_n __MPN(add_n_sub_n)
1031 __GMP_DECLSPEC mp_limb_t mpn_add_n_sub_n (mp_ptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
1033 #define mpn_add_n_sub_nc __MPN(add_n_sub_nc)
1034 __GMP_DECLSPEC mp_limb_t mpn_add_n_sub_nc (mp_ptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
1036 #define mpn_addaddmul_1msb0 __MPN(addaddmul_1msb0)
1037 __GMP_DECLSPEC mp_limb_t mpn_addaddmul_1msb0 (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t);
1039 #define mpn_divrem_1c __MPN(divrem_1c)
1040 __GMP_DECLSPEC mp_limb_t mpn_divrem_1c (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t);
1042 #define mpn_dump __MPN(dump)
1043 __GMP_DECLSPEC void mpn_dump (mp_srcptr, mp_size_t);
1045 #define mpn_fib2_ui __MPN(fib2_ui)
1046 __GMP_DECLSPEC mp_size_t mpn_fib2_ui (mp_ptr, mp_ptr, unsigned long);
1048 /* Remap names of internal mpn functions. */
1049 #define __clz_tab __MPN(clz_tab)
1050 #define mpn_udiv_w_sdiv __MPN(udiv_w_sdiv)
1052 #define mpn_jacobi_base __MPN(jacobi_base)
1053 __GMP_DECLSPEC int mpn_jacobi_base (mp_limb_t, mp_limb_t, int) ATTRIBUTE_CONST;
1055 #define mpn_jacobi_2 __MPN(jacobi_2)
1056 __GMP_DECLSPEC int mpn_jacobi_2 (mp_srcptr, mp_srcptr, unsigned);
1058 #define mpn_jacobi_n __MPN(jacobi_n)
1059 __GMP_DECLSPEC int mpn_jacobi_n (mp_ptr, mp_ptr, mp_size_t, unsigned);
1061 #define mpn_mod_1c __MPN(mod_1c)
1062 __GMP_DECLSPEC mp_limb_t mpn_mod_1c (mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t) __GMP_ATTRIBUTE_PURE;
1064 #define mpn_mul_1c __MPN(mul_1c)
1065 __GMP_DECLSPEC mp_limb_t mpn_mul_1c (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t);
1067 #define mpn_mul_2 __MPN(mul_2)
1068 __GMP_DECLSPEC mp_limb_t mpn_mul_2 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
1070 #define mpn_mul_3 __MPN(mul_3)
1071 __GMP_DECLSPEC mp_limb_t mpn_mul_3 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
1073 #define mpn_mul_4 __MPN(mul_4)
1074 __GMP_DECLSPEC mp_limb_t mpn_mul_4 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
1076 #define mpn_mul_5 __MPN(mul_5)
1077 __GMP_DECLSPEC mp_limb_t mpn_mul_5 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
1079 #define mpn_mul_6 __MPN(mul_6)
1080 __GMP_DECLSPEC mp_limb_t mpn_mul_6 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
1082 #ifndef mpn_mul_basecase /* if not done with cpuvec in a fat binary */
1083 #define mpn_mul_basecase __MPN(mul_basecase)
1084 __GMP_DECLSPEC void mpn_mul_basecase (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t);
1085 #endif
1087 #define mpn_mullo_n __MPN(mullo_n)
1088 __GMP_DECLSPEC void mpn_mullo_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
1090 #ifndef mpn_mullo_basecase /* if not done with cpuvec in a fat binary */
1091 #define mpn_mullo_basecase __MPN(mullo_basecase)
1092 __GMP_DECLSPEC void mpn_mullo_basecase (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
1093 #endif
1095 #ifndef mpn_sqr_basecase /* if not done with cpuvec in a fat binary */
1096 #define mpn_sqr_basecase __MPN(sqr_basecase)
1097 __GMP_DECLSPEC void mpn_sqr_basecase (mp_ptr, mp_srcptr, mp_size_t);
1098 #endif
1100 #define mpn_sqrlo __MPN(sqrlo)
1101 __GMP_DECLSPEC void mpn_sqrlo (mp_ptr, mp_srcptr, mp_size_t);
1103 #define mpn_sqrlo_basecase __MPN(sqrlo_basecase)
1104 __GMP_DECLSPEC void mpn_sqrlo_basecase (mp_ptr, mp_srcptr, mp_size_t);
1106 #define mpn_mulmid_basecase __MPN(mulmid_basecase)
1107 __GMP_DECLSPEC void mpn_mulmid_basecase (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t);
1109 #define mpn_mulmid_n __MPN(mulmid_n)
1110 __GMP_DECLSPEC void mpn_mulmid_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
1112 #define mpn_mulmid __MPN(mulmid)
1113 __GMP_DECLSPEC void mpn_mulmid (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t);
1115 #define mpn_submul_1c __MPN(submul_1c)
1116 __GMP_DECLSPEC mp_limb_t mpn_submul_1c (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t);
1118 #ifndef mpn_redc_1 /* if not done with cpuvec in a fat binary */
1119 #define mpn_redc_1 __MPN(redc_1)
1120 __GMP_DECLSPEC mp_limb_t mpn_redc_1 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
1121 #endif
1123 #ifndef mpn_redc_2 /* if not done with cpuvec in a fat binary */
1124 #define mpn_redc_2 __MPN(redc_2)
1125 __GMP_DECLSPEC mp_limb_t mpn_redc_2 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
1126 #endif
1128 #define mpn_redc_n __MPN(redc_n)
1129 __GMP_DECLSPEC void mpn_redc_n (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
1132 #ifndef mpn_mod_1_1p_cps /* if not done with cpuvec in a fat binary */
1133 #define mpn_mod_1_1p_cps __MPN(mod_1_1p_cps)
1134 __GMP_DECLSPEC void mpn_mod_1_1p_cps (mp_limb_t [4], mp_limb_t);
1135 #endif
1136 #ifndef mpn_mod_1_1p /* if not done with cpuvec in a fat binary */
1137 #define mpn_mod_1_1p __MPN(mod_1_1p)
1138 __GMP_DECLSPEC mp_limb_t mpn_mod_1_1p (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t [4]) __GMP_ATTRIBUTE_PURE;
1139 #endif
1141 #ifndef mpn_mod_1s_2p_cps /* if not done with cpuvec in a fat binary */
1142 #define mpn_mod_1s_2p_cps __MPN(mod_1s_2p_cps)
1143 __GMP_DECLSPEC void mpn_mod_1s_2p_cps (mp_limb_t [5], mp_limb_t);
1144 #endif
1145 #ifndef mpn_mod_1s_2p /* if not done with cpuvec in a fat binary */
1146 #define mpn_mod_1s_2p __MPN(mod_1s_2p)
1147 __GMP_DECLSPEC mp_limb_t mpn_mod_1s_2p (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t [5]) __GMP_ATTRIBUTE_PURE;
1148 #endif
1150 #ifndef mpn_mod_1s_3p_cps /* if not done with cpuvec in a fat binary */
1151 #define mpn_mod_1s_3p_cps __MPN(mod_1s_3p_cps)
1152 __GMP_DECLSPEC void mpn_mod_1s_3p_cps (mp_limb_t [6], mp_limb_t);
1153 #endif
1154 #ifndef mpn_mod_1s_3p /* if not done with cpuvec in a fat binary */
1155 #define mpn_mod_1s_3p __MPN(mod_1s_3p)
1156 __GMP_DECLSPEC mp_limb_t mpn_mod_1s_3p (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t [6]) __GMP_ATTRIBUTE_PURE;
1157 #endif
1159 #ifndef mpn_mod_1s_4p_cps /* if not done with cpuvec in a fat binary */
1160 #define mpn_mod_1s_4p_cps __MPN(mod_1s_4p_cps)
1161 __GMP_DECLSPEC void mpn_mod_1s_4p_cps (mp_limb_t [7], mp_limb_t);
1162 #endif
1163 #ifndef mpn_mod_1s_4p /* if not done with cpuvec in a fat binary */
1164 #define mpn_mod_1s_4p __MPN(mod_1s_4p)
1165 __GMP_DECLSPEC mp_limb_t mpn_mod_1s_4p (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t [7]) __GMP_ATTRIBUTE_PURE;
1166 #endif
1168 #define mpn_bc_mulmod_bnm1 __MPN(bc_mulmod_bnm1)
1169 __GMP_DECLSPEC void mpn_bc_mulmod_bnm1 (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr);
1170 #define mpn_mulmod_bnm1 __MPN(mulmod_bnm1)
1171 __GMP_DECLSPEC void mpn_mulmod_bnm1 (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1172 #define mpn_mulmod_bnm1_next_size __MPN(mulmod_bnm1_next_size)
1173 __GMP_DECLSPEC mp_size_t mpn_mulmod_bnm1_next_size (mp_size_t) ATTRIBUTE_CONST;
1174 static inline mp_size_t
1175 mpn_mulmod_bnm1_itch (mp_size_t rn, mp_size_t an, mp_size_t bn) {
1176 mp_size_t n, itch;
1177 n = rn >> 1;
1178 itch = rn + 4 +
1179 (an > n ? (bn > n ? rn : n) : 0);
1180 return itch;
1183 #define mpn_sqrmod_bnm1 __MPN(sqrmod_bnm1)
1184 __GMP_DECLSPEC void mpn_sqrmod_bnm1 (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1185 #define mpn_sqrmod_bnm1_next_size __MPN(sqrmod_bnm1_next_size)
1186 __GMP_DECLSPEC mp_size_t mpn_sqrmod_bnm1_next_size (mp_size_t) ATTRIBUTE_CONST;
1187 static inline mp_size_t
1188 mpn_sqrmod_bnm1_itch (mp_size_t rn, mp_size_t an) {
1189 mp_size_t n, itch;
1190 n = rn >> 1;
1191 itch = rn + 3 +
1192 (an > n ? an : 0);
1193 return itch;
1196 typedef __gmp_randstate_struct *gmp_randstate_ptr;
1197 typedef const __gmp_randstate_struct *gmp_randstate_srcptr;
1199 /* Pseudo-random number generator function pointers structure. */
1200 typedef struct {
1201 void (*randseed_fn) (gmp_randstate_t, mpz_srcptr);
1202 void (*randget_fn) (gmp_randstate_t, mp_ptr, unsigned long int);
1203 void (*randclear_fn) (gmp_randstate_t);
1204 void (*randiset_fn) (gmp_randstate_ptr, gmp_randstate_srcptr);
1205 } gmp_randfnptr_t;
1207 /* Macro to obtain a void pointer to the function pointers structure. */
1208 #define RNG_FNPTR(rstate) ((rstate)->_mp_algdata._mp_lc)
1210 /* Macro to obtain a pointer to the generator's state.
1211 When used as a lvalue the rvalue needs to be cast to mp_ptr. */
1212 #define RNG_STATE(rstate) ((rstate)->_mp_seed->_mp_d)
1214 /* Write a given number of random bits to rp. */
1215 #define _gmp_rand(rp, state, bits) \
1216 do { \
1217 gmp_randstate_ptr __rstate = (state); \
1218 (*((gmp_randfnptr_t *) RNG_FNPTR (__rstate))->randget_fn) \
1219 (__rstate, rp, bits); \
1220 } while (0)
1222 __GMP_DECLSPEC void __gmp_randinit_mt_noseed (gmp_randstate_t);
1225 /* __gmp_rands is the global state for the old-style random functions, and
1226 is also used in the test programs (hence the __GMP_DECLSPEC).
1228 There's no seeding here, so mpz_random etc will generate the same
1229 sequence every time. This is not unlike the C library random functions
1230 if you don't seed them, so perhaps it's acceptable. Digging up a seed
1231 from /dev/random or the like would work on many systems, but might
1232 encourage a false confidence, since it'd be pretty much impossible to do
1233 something that would work reliably everywhere. In any case the new style
1234 functions are recommended to applications which care about randomness, so
1235 the old functions aren't too important. */
1237 __GMP_DECLSPEC extern char __gmp_rands_initialized;
1238 __GMP_DECLSPEC extern gmp_randstate_t __gmp_rands;
1240 #define RANDS \
1241 ((__gmp_rands_initialized ? 0 \
1242 : (__gmp_rands_initialized = 1, \
1243 __gmp_randinit_mt_noseed (__gmp_rands), 0)), \
1244 __gmp_rands)
1246 /* this is used by the test programs, to free memory */
1247 #define RANDS_CLEAR() \
1248 do { \
1249 if (__gmp_rands_initialized) \
1251 __gmp_rands_initialized = 0; \
1252 gmp_randclear (__gmp_rands); \
1254 } while (0)
1257 /* For a threshold between algorithms A and B, size>=thresh is where B
1258 should be used. Special value MP_SIZE_T_MAX means only ever use A, or
1259 value 0 means only ever use B. The tests for these special values will
1260 be compile-time constants, so the compiler should be able to eliminate
1261 the code for the unwanted algorithm. */
1263 #if ! defined (__GNUC__) || __GNUC__ < 2
1264 #define ABOVE_THRESHOLD(size,thresh) \
1265 ((thresh) == 0 \
1266 || ((thresh) != MP_SIZE_T_MAX \
1267 && (size) >= (thresh)))
1268 #else
1269 #define ABOVE_THRESHOLD(size,thresh) \
1270 ((__builtin_constant_p (thresh) && (thresh) == 0) \
1271 || (!(__builtin_constant_p (thresh) && (thresh) == MP_SIZE_T_MAX) \
1272 && (size) >= (thresh)))
1273 #endif
1274 #define BELOW_THRESHOLD(size,thresh) (! ABOVE_THRESHOLD (size, thresh))
1276 #define MPN_TOOM22_MUL_MINSIZE 4
1277 #define MPN_TOOM2_SQR_MINSIZE 4
1279 #define MPN_TOOM33_MUL_MINSIZE 17
1280 #define MPN_TOOM3_SQR_MINSIZE 17
1282 #define MPN_TOOM44_MUL_MINSIZE 30
1283 #define MPN_TOOM4_SQR_MINSIZE 30
1285 #define MPN_TOOM6H_MUL_MINSIZE 46
1286 #define MPN_TOOM6_SQR_MINSIZE 46
1288 #define MPN_TOOM8H_MUL_MINSIZE 86
1289 #define MPN_TOOM8_SQR_MINSIZE 86
1291 #define MPN_TOOM32_MUL_MINSIZE 10
1292 #define MPN_TOOM42_MUL_MINSIZE 10
1293 #define MPN_TOOM43_MUL_MINSIZE 25
1294 #define MPN_TOOM53_MUL_MINSIZE 17
1295 #define MPN_TOOM54_MUL_MINSIZE 31
1296 #define MPN_TOOM63_MUL_MINSIZE 49
1298 #define MPN_TOOM42_MULMID_MINSIZE 4
1300 #define mpn_sqr_diagonal __MPN(sqr_diagonal)
1301 __GMP_DECLSPEC void mpn_sqr_diagonal (mp_ptr, mp_srcptr, mp_size_t);
1303 #define mpn_sqr_diag_addlsh1 __MPN(sqr_diag_addlsh1)
1304 __GMP_DECLSPEC void mpn_sqr_diag_addlsh1 (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t);
1306 #define mpn_toom_interpolate_5pts __MPN(toom_interpolate_5pts)
1307 __GMP_DECLSPEC void mpn_toom_interpolate_5pts (mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_size_t, int, mp_limb_t);
1309 enum toom6_flags {toom6_all_pos = 0, toom6_vm1_neg = 1, toom6_vm2_neg = 2};
1310 #define mpn_toom_interpolate_6pts __MPN(toom_interpolate_6pts)
1311 __GMP_DECLSPEC void mpn_toom_interpolate_6pts (mp_ptr, mp_size_t, enum toom6_flags, mp_ptr, mp_ptr, mp_ptr, mp_size_t);
1313 enum toom7_flags { toom7_w1_neg = 1, toom7_w3_neg = 2 };
1314 #define mpn_toom_interpolate_7pts __MPN(toom_interpolate_7pts)
1315 __GMP_DECLSPEC void mpn_toom_interpolate_7pts (mp_ptr, mp_size_t, enum toom7_flags, mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_ptr);
1317 #define mpn_toom_interpolate_8pts __MPN(toom_interpolate_8pts)
1318 __GMP_DECLSPEC void mpn_toom_interpolate_8pts (mp_ptr, mp_size_t, mp_ptr, mp_ptr, mp_size_t, mp_ptr);
1320 #define mpn_toom_interpolate_12pts __MPN(toom_interpolate_12pts)
1321 __GMP_DECLSPEC void mpn_toom_interpolate_12pts (mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_size_t, int, mp_ptr);
1323 #define mpn_toom_interpolate_16pts __MPN(toom_interpolate_16pts)
1324 __GMP_DECLSPEC void mpn_toom_interpolate_16pts (mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_size_t, int, mp_ptr);
1326 #define mpn_toom_couple_handling __MPN(toom_couple_handling)
1327 __GMP_DECLSPEC void mpn_toom_couple_handling (mp_ptr, mp_size_t, mp_ptr, int, mp_size_t, int, int);
1329 #define mpn_toom_eval_dgr3_pm1 __MPN(toom_eval_dgr3_pm1)
1330 __GMP_DECLSPEC int mpn_toom_eval_dgr3_pm1 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_size_t, mp_ptr);
1332 #define mpn_toom_eval_dgr3_pm2 __MPN(toom_eval_dgr3_pm2)
1333 __GMP_DECLSPEC int mpn_toom_eval_dgr3_pm2 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_size_t, mp_ptr);
1335 #define mpn_toom_eval_pm1 __MPN(toom_eval_pm1)
1336 __GMP_DECLSPEC int mpn_toom_eval_pm1 (mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, mp_ptr);
1338 #define mpn_toom_eval_pm2 __MPN(toom_eval_pm2)
1339 __GMP_DECLSPEC int mpn_toom_eval_pm2 (mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, mp_ptr);
1341 #define mpn_toom_eval_pm2exp __MPN(toom_eval_pm2exp)
1342 __GMP_DECLSPEC int mpn_toom_eval_pm2exp (mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, unsigned, mp_ptr);
1344 #define mpn_toom_eval_pm2rexp __MPN(toom_eval_pm2rexp)
1345 __GMP_DECLSPEC int mpn_toom_eval_pm2rexp (mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, unsigned, mp_ptr);
1347 #define mpn_toom22_mul __MPN(toom22_mul)
1348 __GMP_DECLSPEC void mpn_toom22_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1350 #define mpn_toom32_mul __MPN(toom32_mul)
1351 __GMP_DECLSPEC void mpn_toom32_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1353 #define mpn_toom42_mul __MPN(toom42_mul)
1354 __GMP_DECLSPEC void mpn_toom42_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1356 #define mpn_toom52_mul __MPN(toom52_mul)
1357 __GMP_DECLSPEC void mpn_toom52_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1359 #define mpn_toom62_mul __MPN(toom62_mul)
1360 __GMP_DECLSPEC void mpn_toom62_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1362 #define mpn_toom2_sqr __MPN(toom2_sqr)
1363 __GMP_DECLSPEC void mpn_toom2_sqr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1365 #define mpn_toom33_mul __MPN(toom33_mul)
1366 __GMP_DECLSPEC void mpn_toom33_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1368 #define mpn_toom43_mul __MPN(toom43_mul)
1369 __GMP_DECLSPEC void mpn_toom43_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1371 #define mpn_toom53_mul __MPN(toom53_mul)
1372 __GMP_DECLSPEC void mpn_toom53_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1374 #define mpn_toom54_mul __MPN(toom54_mul)
1375 __GMP_DECLSPEC void mpn_toom54_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1377 #define mpn_toom63_mul __MPN(toom63_mul)
1378 __GMP_DECLSPEC void mpn_toom63_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1380 #define mpn_toom3_sqr __MPN(toom3_sqr)
1381 __GMP_DECLSPEC void mpn_toom3_sqr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1383 #define mpn_toom44_mul __MPN(toom44_mul)
1384 __GMP_DECLSPEC void mpn_toom44_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1386 #define mpn_toom4_sqr __MPN(toom4_sqr)
1387 __GMP_DECLSPEC void mpn_toom4_sqr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1389 #define mpn_toom6h_mul __MPN(toom6h_mul)
1390 __GMP_DECLSPEC void mpn_toom6h_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1392 #define mpn_toom6_sqr __MPN(toom6_sqr)
1393 __GMP_DECLSPEC void mpn_toom6_sqr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1395 #define mpn_toom8h_mul __MPN(toom8h_mul)
1396 __GMP_DECLSPEC void mpn_toom8h_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1398 #define mpn_toom8_sqr __MPN(toom8_sqr)
1399 __GMP_DECLSPEC void mpn_toom8_sqr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1401 #define mpn_toom42_mulmid __MPN(toom42_mulmid)
1402 __GMP_DECLSPEC void mpn_toom42_mulmid (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr);
1404 #define mpn_fft_best_k __MPN(fft_best_k)
1405 __GMP_DECLSPEC int mpn_fft_best_k (mp_size_t, int) ATTRIBUTE_CONST;
1407 #define mpn_mul_fft __MPN(mul_fft)
1408 __GMP_DECLSPEC mp_limb_t mpn_mul_fft (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, int);
1410 #define mpn_mul_fft_full __MPN(mul_fft_full)
1411 __GMP_DECLSPEC void mpn_mul_fft_full (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t);
1413 #define mpn_nussbaumer_mul __MPN(nussbaumer_mul)
1414 __GMP_DECLSPEC void mpn_nussbaumer_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t);
1416 #define mpn_fft_next_size __MPN(fft_next_size)
1417 __GMP_DECLSPEC mp_size_t mpn_fft_next_size (mp_size_t, int) ATTRIBUTE_CONST;
1419 #define mpn_div_qr_1n_pi1 __MPN(div_qr_1n_pi1)
1420 __GMP_DECLSPEC mp_limb_t mpn_div_qr_1n_pi1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, mp_limb_t);
1422 #define mpn_div_qr_2n_pi1 __MPN(div_qr_2n_pi1)
1423 __GMP_DECLSPEC mp_limb_t mpn_div_qr_2n_pi1 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, mp_limb_t);
1425 #define mpn_div_qr_2u_pi1 __MPN(div_qr_2u_pi1)
1426 __GMP_DECLSPEC mp_limb_t mpn_div_qr_2u_pi1 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int, mp_limb_t);
1428 #define mpn_sbpi1_div_qr __MPN(sbpi1_div_qr)
1429 __GMP_DECLSPEC mp_limb_t mpn_sbpi1_div_qr (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t);
1431 #define mpn_sbpi1_div_q __MPN(sbpi1_div_q)
1432 __GMP_DECLSPEC mp_limb_t mpn_sbpi1_div_q (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t);
1434 #define mpn_sbpi1_divappr_q __MPN(sbpi1_divappr_q)
1435 __GMP_DECLSPEC mp_limb_t mpn_sbpi1_divappr_q (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t);
1437 #define mpn_dcpi1_div_qr __MPN(dcpi1_div_qr)
1438 __GMP_DECLSPEC mp_limb_t mpn_dcpi1_div_qr (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, gmp_pi1_t *);
1439 #define mpn_dcpi1_div_qr_n __MPN(dcpi1_div_qr_n)
1440 __GMP_DECLSPEC mp_limb_t mpn_dcpi1_div_qr_n (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, gmp_pi1_t *, mp_ptr);
1442 #define mpn_dcpi1_div_q __MPN(dcpi1_div_q)
1443 __GMP_DECLSPEC mp_limb_t mpn_dcpi1_div_q (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, gmp_pi1_t *);
1445 #define mpn_dcpi1_divappr_q __MPN(dcpi1_divappr_q)
1446 __GMP_DECLSPEC mp_limb_t mpn_dcpi1_divappr_q (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, gmp_pi1_t *);
1447 #define mpn_dcpi1_divappr_q_n __MPN(dcpi1_divappr_q_n)
1448 __GMP_DECLSPEC mp_limb_t mpn_dcpi1_divappr_q_n (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, gmp_pi1_t *, mp_ptr);
1450 #define mpn_mu_div_qr __MPN(mu_div_qr)
1451 __GMP_DECLSPEC mp_limb_t mpn_mu_div_qr (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1452 #define mpn_mu_div_qr_itch __MPN(mu_div_qr_itch)
1453 __GMP_DECLSPEC mp_size_t mpn_mu_div_qr_itch (mp_size_t, mp_size_t, int) ATTRIBUTE_CONST;
1454 #define mpn_mu_div_qr_choose_in __MPN(mu_div_qr_choose_in)
1455 __GMP_DECLSPEC mp_size_t mpn_mu_div_qr_choose_in (mp_size_t, mp_size_t, int);
1457 #define mpn_preinv_mu_div_qr __MPN(preinv_mu_div_qr)
1458 __GMP_DECLSPEC mp_limb_t mpn_preinv_mu_div_qr (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1459 #define mpn_preinv_mu_div_qr_itch __MPN(preinv_mu_div_qr_itch)
1460 __GMP_DECLSPEC mp_size_t mpn_preinv_mu_div_qr_itch (mp_size_t, mp_size_t, mp_size_t) ATTRIBUTE_CONST;
1462 #define mpn_mu_divappr_q __MPN(mu_divappr_q)
1463 __GMP_DECLSPEC mp_limb_t mpn_mu_divappr_q (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1464 #define mpn_mu_divappr_q_itch __MPN(mu_divappr_q_itch)
1465 __GMP_DECLSPEC mp_size_t mpn_mu_divappr_q_itch (mp_size_t, mp_size_t, int) ATTRIBUTE_CONST;
1466 #define mpn_mu_divappr_q_choose_in __MPN(mu_divappr_q_choose_in)
1467 __GMP_DECLSPEC mp_size_t mpn_mu_divappr_q_choose_in (mp_size_t, mp_size_t, int);
1469 #define mpn_preinv_mu_divappr_q __MPN(preinv_mu_divappr_q)
1470 __GMP_DECLSPEC mp_limb_t mpn_preinv_mu_divappr_q (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1472 #define mpn_mu_div_q __MPN(mu_div_q)
1473 __GMP_DECLSPEC mp_limb_t mpn_mu_div_q (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1474 #define mpn_mu_div_q_itch __MPN(mu_div_q_itch)
1475 __GMP_DECLSPEC mp_size_t mpn_mu_div_q_itch (mp_size_t, mp_size_t, int) ATTRIBUTE_CONST;
1477 #define mpn_div_q __MPN(div_q)
1478 __GMP_DECLSPEC void mpn_div_q (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1480 #define mpn_invert __MPN(invert)
1481 __GMP_DECLSPEC void mpn_invert (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1482 #define mpn_invert_itch(n) mpn_invertappr_itch(n)
1484 #define mpn_ni_invertappr __MPN(ni_invertappr)
1485 __GMP_DECLSPEC mp_limb_t mpn_ni_invertappr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1486 #define mpn_invertappr __MPN(invertappr)
1487 __GMP_DECLSPEC mp_limb_t mpn_invertappr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1488 #define mpn_invertappr_itch(n) (2 * (n))
1490 #define mpn_binvert __MPN(binvert)
1491 __GMP_DECLSPEC void mpn_binvert (mp_ptr, mp_srcptr, mp_size_t, mp_ptr);
1492 #define mpn_binvert_itch __MPN(binvert_itch)
1493 __GMP_DECLSPEC mp_size_t mpn_binvert_itch (mp_size_t) ATTRIBUTE_CONST;
1495 #define mpn_bdiv_q_1 __MPN(bdiv_q_1)
1496 __GMP_DECLSPEC mp_limb_t mpn_bdiv_q_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
1498 #define mpn_pi1_bdiv_q_1 __MPN(pi1_bdiv_q_1)
1499 __GMP_DECLSPEC mp_limb_t mpn_pi1_bdiv_q_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int);
1501 #define mpn_sbpi1_bdiv_qr __MPN(sbpi1_bdiv_qr)
1502 __GMP_DECLSPEC mp_limb_t mpn_sbpi1_bdiv_qr (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t);
1504 #define mpn_sbpi1_bdiv_q __MPN(sbpi1_bdiv_q)
1505 __GMP_DECLSPEC void mpn_sbpi1_bdiv_q (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t);
1507 #define mpn_dcpi1_bdiv_qr __MPN(dcpi1_bdiv_qr)
1508 __GMP_DECLSPEC mp_limb_t mpn_dcpi1_bdiv_qr (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t);
1509 #define mpn_dcpi1_bdiv_qr_n_itch __MPN(dcpi1_bdiv_qr_n_itch)
1510 __GMP_DECLSPEC mp_size_t mpn_dcpi1_bdiv_qr_n_itch (mp_size_t) ATTRIBUTE_CONST;
1512 #define mpn_dcpi1_bdiv_qr_n __MPN(dcpi1_bdiv_qr_n)
1513 __GMP_DECLSPEC mp_limb_t mpn_dcpi1_bdiv_qr_n (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr);
1514 #define mpn_dcpi1_bdiv_q __MPN(dcpi1_bdiv_q)
1515 __GMP_DECLSPEC void mpn_dcpi1_bdiv_q (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t);
1517 #define mpn_dcpi1_bdiv_q_n __MPN(dcpi1_bdiv_q_n)
1518 __GMP_DECLSPEC void mpn_dcpi1_bdiv_q_n (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr);
1519 #define mpn_dcpi1_bdiv_q_n_itch __MPN(dcpi1_bdiv_q_n_itch)
1520 __GMP_DECLSPEC mp_size_t mpn_dcpi1_bdiv_q_n_itch (mp_size_t) ATTRIBUTE_CONST;
1522 #define mpn_mu_bdiv_qr __MPN(mu_bdiv_qr)
1523 __GMP_DECLSPEC mp_limb_t mpn_mu_bdiv_qr (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1524 #define mpn_mu_bdiv_qr_itch __MPN(mu_bdiv_qr_itch)
1525 __GMP_DECLSPEC mp_size_t mpn_mu_bdiv_qr_itch (mp_size_t, mp_size_t) ATTRIBUTE_CONST;
1527 #define mpn_mu_bdiv_q __MPN(mu_bdiv_q)
1528 __GMP_DECLSPEC void mpn_mu_bdiv_q (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1529 #define mpn_mu_bdiv_q_itch __MPN(mu_bdiv_q_itch)
1530 __GMP_DECLSPEC mp_size_t mpn_mu_bdiv_q_itch (mp_size_t, mp_size_t) ATTRIBUTE_CONST;
1532 #define mpn_bdiv_qr __MPN(bdiv_qr)
1533 __GMP_DECLSPEC mp_limb_t mpn_bdiv_qr (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1534 #define mpn_bdiv_qr_itch __MPN(bdiv_qr_itch)
1535 __GMP_DECLSPEC mp_size_t mpn_bdiv_qr_itch (mp_size_t, mp_size_t) ATTRIBUTE_CONST;
1537 #define mpn_bdiv_q __MPN(bdiv_q)
1538 __GMP_DECLSPEC void mpn_bdiv_q (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1539 #define mpn_bdiv_q_itch __MPN(bdiv_q_itch)
1540 __GMP_DECLSPEC mp_size_t mpn_bdiv_q_itch (mp_size_t, mp_size_t) ATTRIBUTE_CONST;
1542 #define mpn_divexact __MPN(divexact)
1543 __GMP_DECLSPEC void mpn_divexact (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t);
1544 #define mpn_divexact_itch __MPN(divexact_itch)
1545 __GMP_DECLSPEC mp_size_t mpn_divexact_itch (mp_size_t, mp_size_t) ATTRIBUTE_CONST;
1547 #ifndef mpn_bdiv_dbm1c /* if not done with cpuvec in a fat binary */
1548 #define mpn_bdiv_dbm1c __MPN(bdiv_dbm1c)
1549 __GMP_DECLSPEC mp_limb_t mpn_bdiv_dbm1c (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t);
1550 #endif
1552 #define mpn_bdiv_dbm1(dst, src, size, divisor) \
1553 mpn_bdiv_dbm1c (dst, src, size, divisor, __GMP_CAST (mp_limb_t, 0))
1555 #define mpn_powm __MPN(powm)
1556 __GMP_DECLSPEC void mpn_powm (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
1557 #define mpn_powlo __MPN(powlo)
1558 __GMP_DECLSPEC void mpn_powlo (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_size_t, mp_ptr);
1560 #define mpn_sec_pi1_div_qr __MPN(sec_pi1_div_qr)
1561 __GMP_DECLSPEC mp_limb_t mpn_sec_pi1_div_qr (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr);
1562 #define mpn_sec_pi1_div_r __MPN(sec_pi1_div_r)
1563 __GMP_DECLSPEC void mpn_sec_pi1_div_r (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr);
1566 /* Override mpn_addlsh1_n, mpn_addlsh2_n, mpn_sublsh1_n, etc with mpn_addlsh_n,
1567 etc when !HAVE_NATIVE the former but HAVE_NATIVE_ the latter. We then lie
1568 and say these macros represent native functions, but leave a trace by using
1569 the value 2 rather than 1. */
1571 #if HAVE_NATIVE_mpn_addlsh_n && ! HAVE_NATIVE_mpn_addlsh1_n
1572 #undef mpn_addlsh1_n
1573 #define mpn_addlsh1_n(a,b,c,d) mpn_addlsh_n(a,b,c,d,1)
1574 #define HAVE_NATIVE_mpn_addlsh1_n 2
1575 #endif
1577 #if HAVE_NATIVE_mpn_addlsh_n && ! HAVE_NATIVE_mpn_addlsh2_n
1578 #undef mpn_addlsh2_n
1579 #define mpn_addlsh2_n(a,b,c,d) mpn_addlsh_n(a,b,c,d,2)
1580 #define HAVE_NATIVE_mpn_addlsh2_n 2
1581 #endif
1583 #if HAVE_NATIVE_mpn_sublsh_n && ! HAVE_NATIVE_mpn_sublsh1_n
1584 #undef mpn_sublsh1_n
1585 #define mpn_sublsh1_n(a,b,c,d) mpn_sublsh_n(a,b,c,d,1)
1586 #define HAVE_NATIVE_mpn_sublsh1_n 2
1587 #endif
1589 #if HAVE_NATIVE_mpn_sublsh_n && ! HAVE_NATIVE_mpn_sublsh2_n
1590 #undef mpn_sublsh2_n
1591 #define mpn_sublsh2_n(a,b,c,d) mpn_sublsh_n(a,b,c,d,2)
1592 #define HAVE_NATIVE_mpn_sublsh2_n 2
1593 #endif
1595 #if HAVE_NATIVE_mpn_rsblsh_n && ! HAVE_NATIVE_mpn_rsblsh1_n
1596 #undef mpn_rsblsh1_n
1597 #define mpn_rsblsh1_n(a,b,c,d) mpn_rsblsh_n(a,b,c,d,1)
1598 #define HAVE_NATIVE_mpn_rsblsh1_n 2
1599 #endif
1601 #if HAVE_NATIVE_mpn_rsblsh_n && ! HAVE_NATIVE_mpn_rsblsh2_n
1602 #undef mpn_rsblsh2_n
1603 #define mpn_rsblsh2_n(a,b,c,d) mpn_rsblsh_n(a,b,c,d,2)
1604 #define HAVE_NATIVE_mpn_rsblsh2_n 2
1605 #endif
1608 #ifndef DIVEXACT_BY3_METHOD
1609 #if GMP_NUMB_BITS % 2 == 0 && ! defined (HAVE_NATIVE_mpn_divexact_by3c)
1610 #define DIVEXACT_BY3_METHOD 0 /* default to using mpn_bdiv_dbm1c */
1611 #else
1612 #define DIVEXACT_BY3_METHOD 1
1613 #endif
1614 #endif
1616 #if DIVEXACT_BY3_METHOD == 0
1617 #undef mpn_divexact_by3
1618 #define mpn_divexact_by3(dst,src,size) \
1619 (3 & mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 3)))
1620 /* override mpn_divexact_by3c defined in gmp.h */
1622 #undef mpn_divexact_by3c
1623 #define mpn_divexact_by3c(dst,src,size,cy) \
1624 (3 & mpn_bdiv_dbm1c (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 3, GMP_NUMB_MASK / 3 * cy)))
1626 #endif
1628 #if GMP_NUMB_BITS % 4 == 0
1629 #define mpn_divexact_by5(dst,src,size) \
1630 (7 & 3 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 5)))
1631 #endif
1633 #if GMP_NUMB_BITS % 3 == 0
1634 #define mpn_divexact_by7(dst,src,size) \
1635 (7 & 1 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 7)))
1636 #endif
1638 #if GMP_NUMB_BITS % 6 == 0
1639 #define mpn_divexact_by9(dst,src,size) \
1640 (15 & 7 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 9)))
1641 #endif
1643 #if GMP_NUMB_BITS % 10 == 0
1644 #define mpn_divexact_by11(dst,src,size) \
1645 (15 & 5 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 11)))
1646 #endif
1648 #if GMP_NUMB_BITS % 12 == 0
1649 #define mpn_divexact_by13(dst,src,size) \
1650 (15 & 3 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 13)))
1651 #endif
1653 #if GMP_NUMB_BITS % 4 == 0
1654 #define mpn_divexact_by15(dst,src,size) \
1655 (15 & 1 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 15)))
1656 #endif
1658 #define mpz_divexact_gcd __gmpz_divexact_gcd
1659 __GMP_DECLSPEC void mpz_divexact_gcd (mpz_ptr, mpz_srcptr, mpz_srcptr);
1661 #define mpz_prodlimbs __gmpz_prodlimbs
1662 __GMP_DECLSPEC mp_size_t mpz_prodlimbs (mpz_ptr, mp_ptr, mp_size_t);
1664 #define mpz_oddfac_1 __gmpz_oddfac_1
1665 __GMP_DECLSPEC void mpz_oddfac_1 (mpz_ptr, mp_limb_t, unsigned);
1667 #define mpz_inp_str_nowhite __gmpz_inp_str_nowhite
1668 #ifdef _GMP_H_HAVE_FILE
1669 __GMP_DECLSPEC size_t mpz_inp_str_nowhite (mpz_ptr, FILE *, int, int, size_t);
1670 #endif
1672 #define mpn_divisible_p __MPN(divisible_p)
1673 __GMP_DECLSPEC int mpn_divisible_p (mp_srcptr, mp_size_t, mp_srcptr, mp_size_t) __GMP_ATTRIBUTE_PURE;
1675 #define mpn_rootrem __MPN(rootrem)
1676 __GMP_DECLSPEC mp_size_t mpn_rootrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
1678 #define mpn_broot __MPN(broot)
1679 __GMP_DECLSPEC void mpn_broot (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
1681 #define mpn_broot_invm1 __MPN(broot_invm1)
1682 __GMP_DECLSPEC void mpn_broot_invm1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
1684 #define mpn_brootinv __MPN(brootinv)
1685 __GMP_DECLSPEC void mpn_brootinv (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr);
1687 #define mpn_bsqrt __MPN(bsqrt)
1688 __GMP_DECLSPEC void mpn_bsqrt (mp_ptr, mp_srcptr, mp_bitcnt_t, mp_ptr);
1690 #define mpn_bsqrtinv __MPN(bsqrtinv)
1691 __GMP_DECLSPEC int mpn_bsqrtinv (mp_ptr, mp_srcptr, mp_bitcnt_t, mp_ptr);
1693 #if defined (_CRAY)
1694 #define MPN_COPY_INCR(dst, src, n) \
1695 do { \
1696 int __i; /* Faster on some Crays with plain int */ \
1697 _Pragma ("_CRI ivdep"); \
1698 for (__i = 0; __i < (n); __i++) \
1699 (dst)[__i] = (src)[__i]; \
1700 } while (0)
1701 #endif
1703 /* used by test programs, hence __GMP_DECLSPEC */
1704 #ifndef mpn_copyi /* if not done with cpuvec in a fat binary */
1705 #define mpn_copyi __MPN(copyi)
1706 __GMP_DECLSPEC void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t);
1707 #endif
1709 #if ! defined (MPN_COPY_INCR) && HAVE_NATIVE_mpn_copyi
1710 #define MPN_COPY_INCR(dst, src, size) \
1711 do { \
1712 ASSERT ((size) >= 0); \
1713 ASSERT (MPN_SAME_OR_INCR_P (dst, src, size)); \
1714 mpn_copyi (dst, src, size); \
1715 } while (0)
1716 #endif
1718 /* Copy N limbs from SRC to DST incrementing, N==0 allowed. */
1719 #if ! defined (MPN_COPY_INCR)
1720 #define MPN_COPY_INCR(dst, src, n) \
1721 do { \
1722 ASSERT ((n) >= 0); \
1723 ASSERT (MPN_SAME_OR_INCR_P (dst, src, n)); \
1724 if ((n) != 0) \
1726 mp_size_t __n = (n) - 1; \
1727 mp_ptr __dst = (dst); \
1728 mp_srcptr __src = (src); \
1729 mp_limb_t __x; \
1730 __x = *__src++; \
1731 if (__n != 0) \
1733 do \
1735 *__dst++ = __x; \
1736 __x = *__src++; \
1738 while (--__n); \
1740 *__dst++ = __x; \
1742 } while (0)
1743 #endif
1746 #if defined (_CRAY)
1747 #define MPN_COPY_DECR(dst, src, n) \
1748 do { \
1749 int __i; /* Faster on some Crays with plain int */ \
1750 _Pragma ("_CRI ivdep"); \
1751 for (__i = (n) - 1; __i >= 0; __i--) \
1752 (dst)[__i] = (src)[__i]; \
1753 } while (0)
1754 #endif
1756 /* used by test programs, hence __GMP_DECLSPEC */
1757 #ifndef mpn_copyd /* if not done with cpuvec in a fat binary */
1758 #define mpn_copyd __MPN(copyd)
1759 __GMP_DECLSPEC void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t);
1760 #endif
1762 #if ! defined (MPN_COPY_DECR) && HAVE_NATIVE_mpn_copyd
1763 #define MPN_COPY_DECR(dst, src, size) \
1764 do { \
1765 ASSERT ((size) >= 0); \
1766 ASSERT (MPN_SAME_OR_DECR_P (dst, src, size)); \
1767 mpn_copyd (dst, src, size); \
1768 } while (0)
1769 #endif
1771 /* Copy N limbs from SRC to DST decrementing, N==0 allowed. */
1772 #if ! defined (MPN_COPY_DECR)
1773 #define MPN_COPY_DECR(dst, src, n) \
1774 do { \
1775 ASSERT ((n) >= 0); \
1776 ASSERT (MPN_SAME_OR_DECR_P (dst, src, n)); \
1777 if ((n) != 0) \
1779 mp_size_t __n = (n) - 1; \
1780 mp_ptr __dst = (dst) + __n; \
1781 mp_srcptr __src = (src) + __n; \
1782 mp_limb_t __x; \
1783 __x = *__src--; \
1784 if (__n != 0) \
1786 do \
1788 *__dst-- = __x; \
1789 __x = *__src--; \
1791 while (--__n); \
1793 *__dst-- = __x; \
1795 } while (0)
1796 #endif
1799 #ifndef MPN_COPY
1800 #define MPN_COPY(d,s,n) \
1801 do { \
1802 ASSERT (MPN_SAME_OR_SEPARATE_P (d, s, n)); \
1803 MPN_COPY_INCR (d, s, n); \
1804 } while (0)
1805 #endif
1808 /* Set {dst,size} to the limbs of {src,size} in reverse order. */
1809 #define MPN_REVERSE(dst, src, size) \
1810 do { \
1811 mp_ptr __dst = (dst); \
1812 mp_size_t __size = (size); \
1813 mp_srcptr __src = (src) + __size - 1; \
1814 mp_size_t __i; \
1815 ASSERT ((size) >= 0); \
1816 ASSERT (! MPN_OVERLAP_P (dst, size, src, size)); \
1817 CRAY_Pragma ("_CRI ivdep"); \
1818 for (__i = 0; __i < __size; __i++) \
1820 *__dst = *__src; \
1821 __dst++; \
1822 __src--; \
1824 } while (0)
1827 /* Zero n limbs at dst.
1829 For power and powerpc we want an inline stu/bdnz loop for zeroing. On
1830 ppc630 for instance this is optimal since it can sustain only 1 store per
1831 cycle.
1833 gcc 2.95.x (for powerpc64 -maix64, or powerpc32) doesn't recognise the
1834 "for" loop in the generic code below can become stu/bdnz. The do/while
1835 here helps it get to that. The same caveat about plain -mpowerpc64 mode
1836 applies here as to __GMPN_COPY_INCR in gmp.h.
1838 xlc 3.1 already generates stu/bdnz from the generic C, and does so from
1839 this loop too.
1841 Enhancement: GLIBC does some trickery with dcbz to zero whole cache lines
1842 at a time. MPN_ZERO isn't all that important in GMP, so it might be more
1843 trouble than it's worth to do the same, though perhaps a call to memset
1844 would be good when on a GNU system. */
1846 #if HAVE_HOST_CPU_FAMILY_power || HAVE_HOST_CPU_FAMILY_powerpc
1847 #define MPN_FILL(dst, n, f) \
1848 do { \
1849 mp_ptr __dst = (dst) - 1; \
1850 mp_size_t __n = (n); \
1851 ASSERT (__n > 0); \
1852 do \
1853 *++__dst = (f); \
1854 while (--__n); \
1855 } while (0)
1856 #endif
1858 #ifndef MPN_FILL
1859 #define MPN_FILL(dst, n, f) \
1860 do { \
1861 mp_ptr __dst = (dst); \
1862 mp_size_t __n = (n); \
1863 ASSERT (__n > 0); \
1864 do \
1865 *__dst++ = (f); \
1866 while (--__n); \
1867 } while (0)
1868 #endif
1870 #define MPN_ZERO(dst, n) \
1871 do { \
1872 ASSERT ((n) >= 0); \
1873 if ((n) != 0) \
1874 MPN_FILL (dst, n, CNST_LIMB (0)); \
1875 } while (0)
1877 /* On the x86s repe/scasl doesn't seem useful, since it takes many cycles to
1878 start up and would need to strip a lot of zeros before it'd be faster
1879 than a simple cmpl loop. Here are some times in cycles for
1880 std/repe/scasl/cld and cld/repe/scasl (the latter would be for stripping
1881 low zeros).
1883 std cld
1884 P5 18 16
1885 P6 46 38
1886 K6 36 13
1887 K7 21 20
1889 #ifndef MPN_NORMALIZE
1890 #define MPN_NORMALIZE(DST, NLIMBS) \
1891 do { \
1892 while ((NLIMBS) > 0) \
1894 if ((DST)[(NLIMBS) - 1] != 0) \
1895 break; \
1896 (NLIMBS)--; \
1898 } while (0)
1899 #endif
1900 #ifndef MPN_NORMALIZE_NOT_ZERO
1901 #define MPN_NORMALIZE_NOT_ZERO(DST, NLIMBS) \
1902 do { \
1903 while (1) \
1905 ASSERT ((NLIMBS) >= 1); \
1906 if ((DST)[(NLIMBS) - 1] != 0) \
1907 break; \
1908 (NLIMBS)--; \
1910 } while (0)
1911 #endif
1913 /* Strip least significant zero limbs from {ptr,size} by incrementing ptr
1914 and decrementing size. low should be ptr[0], and will be the new ptr[0]
1915 on returning. The number in {ptr,size} must be non-zero, ie. size!=0 and
1916 somewhere a non-zero limb. */
1917 #define MPN_STRIP_LOW_ZEROS_NOT_ZERO(ptr, size, low) \
1918 do { \
1919 ASSERT ((size) >= 1); \
1920 ASSERT ((low) == (ptr)[0]); \
1922 while ((low) == 0) \
1924 (size)--; \
1925 ASSERT ((size) >= 1); \
1926 (ptr)++; \
1927 (low) = *(ptr); \
1929 } while (0)
1931 /* Initialize X of type mpz_t with space for NLIMBS limbs. X should be a
1932 temporary variable; it will be automatically cleared out at function
1933 return. We use __x here to make it possible to accept both mpz_ptr and
1934 mpz_t arguments. */
1935 #define MPZ_TMP_INIT(X, NLIMBS) \
1936 do { \
1937 mpz_ptr __x = (X); \
1938 ASSERT ((NLIMBS) >= 1); \
1939 __x->_mp_alloc = (NLIMBS); \
1940 __x->_mp_d = TMP_ALLOC_LIMBS (NLIMBS); \
1941 } while (0)
1943 #if WANT_ASSERT
1944 static inline void *
1945 _mpz_newalloc (mpz_ptr z, mp_size_t n)
1947 void * res = _mpz_realloc(z,n);
1948 /* If we are checking the code, force a random change to limbs. */
1949 ((mp_ptr) res)[0] = ~ ((mp_ptr) res)[ALLOC (z) - 1];
1950 return res;
1952 #else
1953 #define _mpz_newalloc _mpz_realloc
1954 #endif
1955 /* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */
1956 #define MPZ_REALLOC(z,n) (UNLIKELY ((n) > ALLOC(z)) \
1957 ? (mp_ptr) _mpz_realloc(z,n) \
1958 : PTR(z))
1959 #define MPZ_NEWALLOC(z,n) (UNLIKELY ((n) > ALLOC(z)) \
1960 ? (mp_ptr) _mpz_newalloc(z,n) \
1961 : PTR(z))
1963 #define MPZ_EQUAL_1_P(z) (SIZ(z)==1 && PTR(z)[0] == 1)
1966 /* MPN_FIB2_SIZE(n) is the size in limbs required by mpn_fib2_ui for fp and
1967 f1p.
1969 From Knuth vol 1 section 1.2.8, F[n] = phi^n/sqrt(5) rounded to the
1970 nearest integer, where phi=(1+sqrt(5))/2 is the golden ratio. So the
1971 number of bits required is n*log_2((1+sqrt(5))/2) = n*0.6942419.
1973 The multiplier used is 23/32=0.71875 for efficient calculation on CPUs
1974 without good floating point. There's +2 for rounding up, and a further
1975 +2 since at the last step x limbs are doubled into a 2x+1 limb region
1976 whereas the actual F[2k] value might be only 2x-1 limbs.
1978 Note that a division is done first, since on a 32-bit system it's at
1979 least conceivable to go right up to n==ULONG_MAX. (F[2^32-1] would be
1980 about 380Mbytes, plus temporary workspace of about 1.2Gbytes here and
1981 whatever a multiply of two 190Mbyte numbers takes.)
1983 Enhancement: When GMP_NUMB_BITS is not a power of 2 the division could be
1984 worked into the multiplier. */
1986 #define MPN_FIB2_SIZE(n) \
1987 ((mp_size_t) ((n) / 32 * 23 / GMP_NUMB_BITS) + 4)
1990 /* FIB_TABLE(n) returns the Fibonacci number F[n]. Must have n in the range
1991 -1 <= n <= FIB_TABLE_LIMIT (that constant in fib_table.h).
1993 FIB_TABLE_LUCNUM_LIMIT (in fib_table.h) is the largest n for which L[n] =
1994 F[n] + 2*F[n-1] fits in a limb. */
1996 __GMP_DECLSPEC extern const mp_limb_t __gmp_fib_table[];
1997 #define FIB_TABLE(n) (__gmp_fib_table[(n)+1])
1999 extern const mp_limb_t __gmp_oddfac_table[];
2000 extern const mp_limb_t __gmp_odd2fac_table[];
2001 extern const unsigned char __gmp_fac2cnt_table[];
2002 extern const mp_limb_t __gmp_limbroots_table[];
2004 /* n^log <= GMP_NUMB_MAX, a limb can store log factors less than n */
2005 static inline unsigned
2006 log_n_max (mp_limb_t n)
2008 unsigned log;
2009 for (log = 8; n > __gmp_limbroots_table[log - 1]; log--);
2010 return log;
2013 #define SIEVESIZE 512 /* FIXME: Allow gmp_init_primesieve to choose */
2014 typedef struct
2016 unsigned long d; /* current index in s[] */
2017 unsigned long s0; /* number corresponding to s[0] */
2018 unsigned long sqrt_s0; /* misnomer for sqrt(s[SIEVESIZE-1]) */
2019 unsigned char s[SIEVESIZE + 1]; /* sieve table */
2020 } gmp_primesieve_t;
2022 #define gmp_init_primesieve __gmp_init_primesieve
2023 __GMP_DECLSPEC void gmp_init_primesieve (gmp_primesieve_t *);
2025 #define gmp_nextprime __gmp_nextprime
2026 __GMP_DECLSPEC unsigned long int gmp_nextprime (gmp_primesieve_t *);
2028 #define gmp_primesieve __gmp_primesieve
2029 __GMP_DECLSPEC mp_limb_t gmp_primesieve (mp_ptr, mp_limb_t);
2032 #ifndef MUL_TOOM22_THRESHOLD
2033 #define MUL_TOOM22_THRESHOLD 30
2034 #endif
2036 #ifndef MUL_TOOM33_THRESHOLD
2037 #define MUL_TOOM33_THRESHOLD 100
2038 #endif
2040 #ifndef MUL_TOOM44_THRESHOLD
2041 #define MUL_TOOM44_THRESHOLD 300
2042 #endif
2044 #ifndef MUL_TOOM6H_THRESHOLD
2045 #define MUL_TOOM6H_THRESHOLD 350
2046 #endif
2048 #ifndef SQR_TOOM6_THRESHOLD
2049 #define SQR_TOOM6_THRESHOLD MUL_TOOM6H_THRESHOLD
2050 #endif
2052 #ifndef MUL_TOOM8H_THRESHOLD
2053 #define MUL_TOOM8H_THRESHOLD 450
2054 #endif
2056 #ifndef SQR_TOOM8_THRESHOLD
2057 #define SQR_TOOM8_THRESHOLD MUL_TOOM8H_THRESHOLD
2058 #endif
2060 #ifndef MUL_TOOM32_TO_TOOM43_THRESHOLD
2061 #define MUL_TOOM32_TO_TOOM43_THRESHOLD 100
2062 #endif
2064 #ifndef MUL_TOOM32_TO_TOOM53_THRESHOLD
2065 #define MUL_TOOM32_TO_TOOM53_THRESHOLD 110
2066 #endif
2068 #ifndef MUL_TOOM42_TO_TOOM53_THRESHOLD
2069 #define MUL_TOOM42_TO_TOOM53_THRESHOLD 100
2070 #endif
2072 #ifndef MUL_TOOM42_TO_TOOM63_THRESHOLD
2073 #define MUL_TOOM42_TO_TOOM63_THRESHOLD 110
2074 #endif
2076 #ifndef MUL_TOOM43_TO_TOOM54_THRESHOLD
2077 #define MUL_TOOM43_TO_TOOM54_THRESHOLD 150
2078 #endif
2080 /* MUL_TOOM22_THRESHOLD_LIMIT is the maximum for MUL_TOOM22_THRESHOLD. In a
2081 normal build MUL_TOOM22_THRESHOLD is a constant and we use that. In a fat
2082 binary or tune program build MUL_TOOM22_THRESHOLD is a variable and a
2083 separate hard limit will have been defined. Similarly for TOOM3. */
2084 #ifndef MUL_TOOM22_THRESHOLD_LIMIT
2085 #define MUL_TOOM22_THRESHOLD_LIMIT MUL_TOOM22_THRESHOLD
2086 #endif
2087 #ifndef MUL_TOOM33_THRESHOLD_LIMIT
2088 #define MUL_TOOM33_THRESHOLD_LIMIT MUL_TOOM33_THRESHOLD
2089 #endif
2090 #ifndef MULLO_BASECASE_THRESHOLD_LIMIT
2091 #define MULLO_BASECASE_THRESHOLD_LIMIT MULLO_BASECASE_THRESHOLD
2092 #endif
2093 #ifndef SQRLO_BASECASE_THRESHOLD_LIMIT
2094 #define SQRLO_BASECASE_THRESHOLD_LIMIT SQRLO_BASECASE_THRESHOLD
2095 #endif
2096 #ifndef SQRLO_DC_THRESHOLD_LIMIT
2097 #define SQRLO_DC_THRESHOLD_LIMIT SQRLO_DC_THRESHOLD
2098 #endif
2100 /* SQR_BASECASE_THRESHOLD is where mpn_sqr_basecase should take over from
2101 mpn_mul_basecase. Default is to use mpn_sqr_basecase from 0. (Note that we
2102 certainly always want it if there's a native assembler mpn_sqr_basecase.)
2104 If it turns out that mpn_toom2_sqr becomes faster than mpn_mul_basecase
2105 before mpn_sqr_basecase does, then SQR_BASECASE_THRESHOLD is the toom2
2106 threshold and SQR_TOOM2_THRESHOLD is 0. This oddity arises more or less
2107 because SQR_TOOM2_THRESHOLD represents the size up to which mpn_sqr_basecase
2108 should be used, and that may be never. */
2110 #ifndef SQR_BASECASE_THRESHOLD
2111 #define SQR_BASECASE_THRESHOLD 0 /* never use mpn_mul_basecase */
2112 #endif
2114 #ifndef SQR_TOOM2_THRESHOLD
2115 #define SQR_TOOM2_THRESHOLD 50
2116 #endif
2118 #ifndef SQR_TOOM3_THRESHOLD
2119 #define SQR_TOOM3_THRESHOLD 120
2120 #endif
2122 #ifndef SQR_TOOM4_THRESHOLD
2123 #define SQR_TOOM4_THRESHOLD 400
2124 #endif
2126 /* See comments above about MUL_TOOM33_THRESHOLD_LIMIT. */
2127 #ifndef SQR_TOOM3_THRESHOLD_LIMIT
2128 #define SQR_TOOM3_THRESHOLD_LIMIT SQR_TOOM3_THRESHOLD
2129 #endif
2131 #ifndef MULMID_TOOM42_THRESHOLD
2132 #define MULMID_TOOM42_THRESHOLD MUL_TOOM22_THRESHOLD
2133 #endif
2135 #ifndef MULLO_BASECASE_THRESHOLD
2136 #define MULLO_BASECASE_THRESHOLD 0 /* never use mpn_mul_basecase */
2137 #endif
2139 #ifndef MULLO_DC_THRESHOLD
2140 #define MULLO_DC_THRESHOLD (2*MUL_TOOM22_THRESHOLD)
2141 #endif
2143 #ifndef MULLO_MUL_N_THRESHOLD
2144 #define MULLO_MUL_N_THRESHOLD (2*MUL_FFT_THRESHOLD)
2145 #endif
2147 #ifndef SQRLO_BASECASE_THRESHOLD
2148 #define SQRLO_BASECASE_THRESHOLD 0 /* never use mpn_sqr_basecase */
2149 #endif
2151 #ifndef SQRLO_DC_THRESHOLD
2152 #define SQRLO_DC_THRESHOLD (MULLO_DC_THRESHOLD)
2153 #endif
2155 #ifndef SQRLO_SQR_THRESHOLD
2156 #define SQRLO_SQR_THRESHOLD (MULLO_MUL_N_THRESHOLD)
2157 #endif
2159 #ifndef DC_DIV_QR_THRESHOLD
2160 #define DC_DIV_QR_THRESHOLD (2*MUL_TOOM22_THRESHOLD)
2161 #endif
2163 #ifndef DC_DIVAPPR_Q_THRESHOLD
2164 #define DC_DIVAPPR_Q_THRESHOLD 200
2165 #endif
2167 #ifndef DC_BDIV_QR_THRESHOLD
2168 #define DC_BDIV_QR_THRESHOLD (2*MUL_TOOM22_THRESHOLD)
2169 #endif
2171 #ifndef DC_BDIV_Q_THRESHOLD
2172 #define DC_BDIV_Q_THRESHOLD 180
2173 #endif
2175 #ifndef DIVEXACT_JEB_THRESHOLD
2176 #define DIVEXACT_JEB_THRESHOLD 25
2177 #endif
2179 #ifndef INV_MULMOD_BNM1_THRESHOLD
2180 #define INV_MULMOD_BNM1_THRESHOLD (4*MULMOD_BNM1_THRESHOLD)
2181 #endif
2183 #ifndef INV_APPR_THRESHOLD
2184 #define INV_APPR_THRESHOLD INV_NEWTON_THRESHOLD
2185 #endif
2187 #ifndef INV_NEWTON_THRESHOLD
2188 #define INV_NEWTON_THRESHOLD 200
2189 #endif
2191 #ifndef BINV_NEWTON_THRESHOLD
2192 #define BINV_NEWTON_THRESHOLD 300
2193 #endif
2195 #ifndef MU_DIVAPPR_Q_THRESHOLD
2196 #define MU_DIVAPPR_Q_THRESHOLD 2000
2197 #endif
2199 #ifndef MU_DIV_QR_THRESHOLD
2200 #define MU_DIV_QR_THRESHOLD 2000
2201 #endif
2203 #ifndef MUPI_DIV_QR_THRESHOLD
2204 #define MUPI_DIV_QR_THRESHOLD 200
2205 #endif
2207 #ifndef MU_BDIV_Q_THRESHOLD
2208 #define MU_BDIV_Q_THRESHOLD 2000
2209 #endif
2211 #ifndef MU_BDIV_QR_THRESHOLD
2212 #define MU_BDIV_QR_THRESHOLD 2000
2213 #endif
2215 #ifndef MULMOD_BNM1_THRESHOLD
2216 #define MULMOD_BNM1_THRESHOLD 16
2217 #endif
2219 #ifndef SQRMOD_BNM1_THRESHOLD
2220 #define SQRMOD_BNM1_THRESHOLD 16
2221 #endif
2223 #ifndef MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD
2224 #define MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD (INV_MULMOD_BNM1_THRESHOLD/2)
2225 #endif
2227 #if HAVE_NATIVE_mpn_addmul_2 || HAVE_NATIVE_mpn_redc_2
2229 #ifndef REDC_1_TO_REDC_2_THRESHOLD
2230 #define REDC_1_TO_REDC_2_THRESHOLD 15
2231 #endif
2232 #ifndef REDC_2_TO_REDC_N_THRESHOLD
2233 #define REDC_2_TO_REDC_N_THRESHOLD 100
2234 #endif
2236 #else
2238 #ifndef REDC_1_TO_REDC_N_THRESHOLD
2239 #define REDC_1_TO_REDC_N_THRESHOLD 100
2240 #endif
2242 #endif /* HAVE_NATIVE_mpn_addmul_2 || HAVE_NATIVE_mpn_redc_2 */
2245 /* First k to use for an FFT modF multiply. A modF FFT is an order
2246 log(2^k)/log(2^(k-1)) algorithm, so k=3 is merely 1.5 like karatsuba,
2247 whereas k=4 is 1.33 which is faster than toom3 at 1.485. */
2248 #define FFT_FIRST_K 4
2250 /* Threshold at which FFT should be used to do a modF NxN -> N multiply. */
2251 #ifndef MUL_FFT_MODF_THRESHOLD
2252 #define MUL_FFT_MODF_THRESHOLD (MUL_TOOM33_THRESHOLD * 3)
2253 #endif
2254 #ifndef SQR_FFT_MODF_THRESHOLD
2255 #define SQR_FFT_MODF_THRESHOLD (SQR_TOOM3_THRESHOLD * 3)
2256 #endif
2258 /* Threshold at which FFT should be used to do an NxN -> 2N multiply. This
2259 will be a size where FFT is using k=7 or k=8, since an FFT-k used for an
2260 NxN->2N multiply and not recursing into itself is an order
2261 log(2^k)/log(2^(k-2)) algorithm, so it'll be at least k=7 at 1.39 which
2262 is the first better than toom3. */
2263 #ifndef MUL_FFT_THRESHOLD
2264 #define MUL_FFT_THRESHOLD (MUL_FFT_MODF_THRESHOLD * 10)
2265 #endif
2266 #ifndef SQR_FFT_THRESHOLD
2267 #define SQR_FFT_THRESHOLD (SQR_FFT_MODF_THRESHOLD * 10)
2268 #endif
2270 /* Table of thresholds for successive modF FFT "k"s. The first entry is
2271 where FFT_FIRST_K+1 should be used, the second FFT_FIRST_K+2,
2272 etc. See mpn_fft_best_k(). */
2273 #ifndef MUL_FFT_TABLE
2274 #define MUL_FFT_TABLE \
2275 { MUL_TOOM33_THRESHOLD * 4, /* k=5 */ \
2276 MUL_TOOM33_THRESHOLD * 8, /* k=6 */ \
2277 MUL_TOOM33_THRESHOLD * 16, /* k=7 */ \
2278 MUL_TOOM33_THRESHOLD * 32, /* k=8 */ \
2279 MUL_TOOM33_THRESHOLD * 96, /* k=9 */ \
2280 MUL_TOOM33_THRESHOLD * 288, /* k=10 */ \
2282 #endif
2283 #ifndef SQR_FFT_TABLE
2284 #define SQR_FFT_TABLE \
2285 { SQR_TOOM3_THRESHOLD * 4, /* k=5 */ \
2286 SQR_TOOM3_THRESHOLD * 8, /* k=6 */ \
2287 SQR_TOOM3_THRESHOLD * 16, /* k=7 */ \
2288 SQR_TOOM3_THRESHOLD * 32, /* k=8 */ \
2289 SQR_TOOM3_THRESHOLD * 96, /* k=9 */ \
2290 SQR_TOOM3_THRESHOLD * 288, /* k=10 */ \
2292 #endif
2294 struct fft_table_nk
2296 unsigned int n:27;
2297 unsigned int k:5;
2300 #ifndef FFT_TABLE_ATTRS
2301 #define FFT_TABLE_ATTRS static const
2302 #endif
2304 #define MPN_FFT_TABLE_SIZE 16
2307 #ifndef DC_DIV_QR_THRESHOLD
2308 #define DC_DIV_QR_THRESHOLD (3 * MUL_TOOM22_THRESHOLD)
2309 #endif
2311 #ifndef GET_STR_DC_THRESHOLD
2312 #define GET_STR_DC_THRESHOLD 18
2313 #endif
2315 #ifndef GET_STR_PRECOMPUTE_THRESHOLD
2316 #define GET_STR_PRECOMPUTE_THRESHOLD 35
2317 #endif
2319 #ifndef SET_STR_DC_THRESHOLD
2320 #define SET_STR_DC_THRESHOLD 750
2321 #endif
2323 #ifndef SET_STR_PRECOMPUTE_THRESHOLD
2324 #define SET_STR_PRECOMPUTE_THRESHOLD 2000
2325 #endif
2327 #ifndef FAC_ODD_THRESHOLD
2328 #define FAC_ODD_THRESHOLD 35
2329 #endif
2331 #ifndef FAC_DSC_THRESHOLD
2332 #define FAC_DSC_THRESHOLD 400
2333 #endif
2335 /* Return non-zero if xp,xsize and yp,ysize overlap.
2336 If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no
2337 overlap. If both these are false, there's an overlap. */
2338 #define MPN_OVERLAP_P(xp, xsize, yp, ysize) \
2339 ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
2340 #define MEM_OVERLAP_P(xp, xsize, yp, ysize) \
2341 ( (char *) (xp) + (xsize) > (char *) (yp) \
2342 && (char *) (yp) + (ysize) > (char *) (xp))
2344 /* Return non-zero if xp,xsize and yp,ysize are either identical or not
2345 overlapping. Return zero if they're partially overlapping. */
2346 #define MPN_SAME_OR_SEPARATE_P(xp, yp, size) \
2347 MPN_SAME_OR_SEPARATE2_P(xp, size, yp, size)
2348 #define MPN_SAME_OR_SEPARATE2_P(xp, xsize, yp, ysize) \
2349 ((xp) == (yp) || ! MPN_OVERLAP_P (xp, xsize, yp, ysize))
2351 /* Return non-zero if dst,dsize and src,ssize are either identical or
2352 overlapping in a way suitable for an incrementing/decrementing algorithm.
2353 Return zero if they're partially overlapping in an unsuitable fashion. */
2354 #define MPN_SAME_OR_INCR2_P(dst, dsize, src, ssize) \
2355 ((dst) <= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
2356 #define MPN_SAME_OR_INCR_P(dst, src, size) \
2357 MPN_SAME_OR_INCR2_P(dst, size, src, size)
2358 #define MPN_SAME_OR_DECR2_P(dst, dsize, src, ssize) \
2359 ((dst) >= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
2360 #define MPN_SAME_OR_DECR_P(dst, src, size) \
2361 MPN_SAME_OR_DECR2_P(dst, size, src, size)
2364 /* ASSERT() is a private assertion checking scheme, similar to <assert.h>.
2365 ASSERT() does the check only if WANT_ASSERT is selected, ASSERT_ALWAYS()
2366 does it always. Generally assertions are meant for development, but
2367 might help when looking for a problem later too. */
2369 #ifdef __LINE__
2370 #define ASSERT_LINE __LINE__
2371 #else
2372 #define ASSERT_LINE -1
2373 #endif
2375 #ifdef __FILE__
2376 #define ASSERT_FILE __FILE__
2377 #else
2378 #define ASSERT_FILE ""
2379 #endif
2381 __GMP_DECLSPEC void __gmp_assert_header (const char *, int);
2382 __GMP_DECLSPEC void __gmp_assert_fail (const char *, int, const char *) ATTRIBUTE_NORETURN;
2384 #define ASSERT_FAIL(expr) __gmp_assert_fail (ASSERT_FILE, ASSERT_LINE, #expr)
2386 #define ASSERT_ALWAYS(expr) \
2387 do { \
2388 if (UNLIKELY (!(expr))) \
2389 ASSERT_FAIL (expr); \
2390 } while (0)
2392 #if WANT_ASSERT
2393 #define ASSERT(expr) ASSERT_ALWAYS (expr)
2394 #else
2395 #define ASSERT(expr) do {} while (0)
2396 #endif
2399 /* ASSERT_CARRY checks the expression is non-zero, and ASSERT_NOCARRY checks
2400 that it's zero. In both cases if assertion checking is disabled the
2401 expression is still evaluated. These macros are meant for use with
2402 routines like mpn_add_n() where the return value represents a carry or
2403 whatever that should or shouldn't occur in some context. For example,
2404 ASSERT_NOCARRY (mpn_add_n (rp, s1p, s2p, size)); */
2405 #if WANT_ASSERT
2406 #define ASSERT_CARRY(expr) ASSERT_ALWAYS ((expr) != 0)
2407 #define ASSERT_NOCARRY(expr) ASSERT_ALWAYS ((expr) == 0)
2408 #else
2409 #define ASSERT_CARRY(expr) (expr)
2410 #define ASSERT_NOCARRY(expr) (expr)
2411 #endif
2414 /* ASSERT_CODE includes code when assertion checking is wanted. This is the
2415 same as writing "#if WANT_ASSERT", but more compact. */
2416 #if WANT_ASSERT
2417 #define ASSERT_CODE(expr) expr
2418 #else
2419 #define ASSERT_CODE(expr)
2420 #endif
2423 /* Test that an mpq_t is in fully canonical form. This can be used as
2424 protection on routines like mpq_equal which give wrong results on
2425 non-canonical inputs. */
2426 #if WANT_ASSERT
2427 #define ASSERT_MPQ_CANONICAL(q) \
2428 do { \
2429 ASSERT (q->_mp_den._mp_size > 0); \
2430 if (q->_mp_num._mp_size == 0) \
2432 /* zero should be 0/1 */ \
2433 ASSERT (mpz_cmp_ui (mpq_denref(q), 1L) == 0); \
2435 else \
2437 /* no common factors */ \
2438 mpz_t __g; \
2439 mpz_init (__g); \
2440 mpz_gcd (__g, mpq_numref(q), mpq_denref(q)); \
2441 ASSERT (mpz_cmp_ui (__g, 1) == 0); \
2442 mpz_clear (__g); \
2444 } while (0)
2445 #else
2446 #define ASSERT_MPQ_CANONICAL(q) do {} while (0)
2447 #endif
2449 /* Check that the nail parts are zero. */
2450 #define ASSERT_ALWAYS_LIMB(limb) \
2451 do { \
2452 mp_limb_t __nail = (limb) & GMP_NAIL_MASK; \
2453 ASSERT_ALWAYS (__nail == 0); \
2454 } while (0)
2455 #define ASSERT_ALWAYS_MPN(ptr, size) \
2456 do { \
2457 /* let whole loop go dead when no nails */ \
2458 if (GMP_NAIL_BITS != 0) \
2460 mp_size_t __i; \
2461 for (__i = 0; __i < (size); __i++) \
2462 ASSERT_ALWAYS_LIMB ((ptr)[__i]); \
2464 } while (0)
2465 #if WANT_ASSERT
2466 #define ASSERT_LIMB(limb) ASSERT_ALWAYS_LIMB (limb)
2467 #define ASSERT_MPN(ptr, size) ASSERT_ALWAYS_MPN (ptr, size)
2468 #else
2469 #define ASSERT_LIMB(limb) do {} while (0)
2470 #define ASSERT_MPN(ptr, size) do {} while (0)
2471 #endif
2474 /* Assert that an mpn region {ptr,size} is zero, or non-zero.
2475 size==0 is allowed, and in that case {ptr,size} considered to be zero. */
2476 #if WANT_ASSERT
2477 #define ASSERT_MPN_ZERO_P(ptr,size) \
2478 do { \
2479 mp_size_t __i; \
2480 ASSERT ((size) >= 0); \
2481 for (__i = 0; __i < (size); __i++) \
2482 ASSERT ((ptr)[__i] == 0); \
2483 } while (0)
2484 #define ASSERT_MPN_NONZERO_P(ptr,size) \
2485 do { \
2486 mp_size_t __i; \
2487 int __nonzero = 0; \
2488 ASSERT ((size) >= 0); \
2489 for (__i = 0; __i < (size); __i++) \
2490 if ((ptr)[__i] != 0) \
2492 __nonzero = 1; \
2493 break; \
2495 ASSERT (__nonzero); \
2496 } while (0)
2497 #else
2498 #define ASSERT_MPN_ZERO_P(ptr,size) do {} while (0)
2499 #define ASSERT_MPN_NONZERO_P(ptr,size) do {} while (0)
2500 #endif
2503 #if ! HAVE_NATIVE_mpn_com
2504 #undef mpn_com
2505 #define mpn_com(d,s,n) \
2506 do { \
2507 mp_ptr __d = (d); \
2508 mp_srcptr __s = (s); \
2509 mp_size_t __n = (n); \
2510 ASSERT (__n >= 1); \
2511 ASSERT (MPN_SAME_OR_SEPARATE_P (__d, __s, __n)); \
2512 do \
2513 *__d++ = (~ *__s++) & GMP_NUMB_MASK; \
2514 while (--__n); \
2515 } while (0)
2516 #endif
2518 #define MPN_LOGOPS_N_INLINE(rp, up, vp, n, operation) \
2519 do { \
2520 mp_srcptr __up = (up); \
2521 mp_srcptr __vp = (vp); \
2522 mp_ptr __rp = (rp); \
2523 mp_size_t __n = (n); \
2524 mp_limb_t __a, __b; \
2525 ASSERT (__n > 0); \
2526 ASSERT (MPN_SAME_OR_SEPARATE_P (__rp, __up, __n)); \
2527 ASSERT (MPN_SAME_OR_SEPARATE_P (__rp, __vp, __n)); \
2528 __up += __n; \
2529 __vp += __n; \
2530 __rp += __n; \
2531 __n = -__n; \
2532 do { \
2533 __a = __up[__n]; \
2534 __b = __vp[__n]; \
2535 __rp[__n] = operation; \
2536 } while (++__n); \
2537 } while (0)
2540 #if ! HAVE_NATIVE_mpn_and_n
2541 #undef mpn_and_n
2542 #define mpn_and_n(rp, up, vp, n) \
2543 MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a & __b)
2544 #endif
2546 #if ! HAVE_NATIVE_mpn_andn_n
2547 #undef mpn_andn_n
2548 #define mpn_andn_n(rp, up, vp, n) \
2549 MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a & ~__b)
2550 #endif
2552 #if ! HAVE_NATIVE_mpn_nand_n
2553 #undef mpn_nand_n
2554 #define mpn_nand_n(rp, up, vp, n) \
2555 MPN_LOGOPS_N_INLINE (rp, up, vp, n, ~(__a & __b) & GMP_NUMB_MASK)
2556 #endif
2558 #if ! HAVE_NATIVE_mpn_ior_n
2559 #undef mpn_ior_n
2560 #define mpn_ior_n(rp, up, vp, n) \
2561 MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a | __b)
2562 #endif
2564 #if ! HAVE_NATIVE_mpn_iorn_n
2565 #undef mpn_iorn_n
2566 #define mpn_iorn_n(rp, up, vp, n) \
2567 MPN_LOGOPS_N_INLINE (rp, up, vp, n, (__a | ~__b) & GMP_NUMB_MASK)
2568 #endif
2570 #if ! HAVE_NATIVE_mpn_nior_n
2571 #undef mpn_nior_n
2572 #define mpn_nior_n(rp, up, vp, n) \
2573 MPN_LOGOPS_N_INLINE (rp, up, vp, n, ~(__a | __b) & GMP_NUMB_MASK)
2574 #endif
2576 #if ! HAVE_NATIVE_mpn_xor_n
2577 #undef mpn_xor_n
2578 #define mpn_xor_n(rp, up, vp, n) \
2579 MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a ^ __b)
2580 #endif
2582 #if ! HAVE_NATIVE_mpn_xnor_n
2583 #undef mpn_xnor_n
2584 #define mpn_xnor_n(rp, up, vp, n) \
2585 MPN_LOGOPS_N_INLINE (rp, up, vp, n, ~(__a ^ __b) & GMP_NUMB_MASK)
2586 #endif
2588 #define mpn_trialdiv __MPN(trialdiv)
2589 __GMP_DECLSPEC mp_limb_t mpn_trialdiv (mp_srcptr, mp_size_t, mp_size_t, int *);
2591 #define mpn_remove __MPN(remove)
2592 __GMP_DECLSPEC mp_bitcnt_t mpn_remove (mp_ptr, mp_size_t *, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_bitcnt_t);
2595 /* ADDC_LIMB sets w=x+y and cout to 0 or 1 for a carry from that addition. */
2596 #if GMP_NAIL_BITS == 0
2597 #define ADDC_LIMB(cout, w, x, y) \
2598 do { \
2599 mp_limb_t __x = (x); \
2600 mp_limb_t __y = (y); \
2601 mp_limb_t __w = __x + __y; \
2602 (w) = __w; \
2603 (cout) = __w < __x; \
2604 } while (0)
2605 #else
2606 #define ADDC_LIMB(cout, w, x, y) \
2607 do { \
2608 mp_limb_t __w; \
2609 ASSERT_LIMB (x); \
2610 ASSERT_LIMB (y); \
2611 __w = (x) + (y); \
2612 (w) = __w & GMP_NUMB_MASK; \
2613 (cout) = __w >> GMP_NUMB_BITS; \
2614 } while (0)
2615 #endif
2617 /* SUBC_LIMB sets w=x-y and cout to 0 or 1 for a borrow from that
2618 subtract. */
2619 #if GMP_NAIL_BITS == 0
2620 #define SUBC_LIMB(cout, w, x, y) \
2621 do { \
2622 mp_limb_t __x = (x); \
2623 mp_limb_t __y = (y); \
2624 mp_limb_t __w = __x - __y; \
2625 (w) = __w; \
2626 (cout) = __w > __x; \
2627 } while (0)
2628 #else
2629 #define SUBC_LIMB(cout, w, x, y) \
2630 do { \
2631 mp_limb_t __w = (x) - (y); \
2632 (w) = __w & GMP_NUMB_MASK; \
2633 (cout) = __w >> (GMP_LIMB_BITS-1); \
2634 } while (0)
2635 #endif
2638 /* MPN_INCR_U does {ptr,size} += n, MPN_DECR_U does {ptr,size} -= n, both
2639 expecting no carry (or borrow) from that.
2641 The size parameter is only for the benefit of assertion checking. In a
2642 normal build it's unused and the carry/borrow is just propagated as far
2643 as it needs to go.
2645 On random data, usually only one or two limbs of {ptr,size} get updated,
2646 so there's no need for any sophisticated looping, just something compact
2647 and sensible.
2649 FIXME: Switch all code from mpn_{incr,decr}_u to MPN_{INCR,DECR}_U,
2650 declaring their operand sizes, then remove the former. This is purely
2651 for the benefit of assertion checking. */
2653 #if defined (__GNUC__) && GMP_NAIL_BITS == 0 && ! defined (NO_ASM) \
2654 && (defined(HAVE_HOST_CPU_FAMILY_x86) || defined(HAVE_HOST_CPU_FAMILY_x86_64)) \
2655 && ! WANT_ASSERT
2656 /* Better flags handling than the generic C gives on i386, saving a few
2657 bytes of code and maybe a cycle or two. */
2659 #define MPN_IORD_U(ptr, incr, aors) \
2660 do { \
2661 mp_ptr __ptr_dummy; \
2662 if (__builtin_constant_p (incr) && (incr) == 0) \
2665 else if (__builtin_constant_p (incr) && (incr) == 1) \
2667 __asm__ __volatile__ \
2668 ("\n" ASM_L(top) ":\n" \
2669 "\t" aors "\t$1, (%0)\n" \
2670 "\tlea\t%c2(%0), %0\n" \
2671 "\tjc\t" ASM_L(top) \
2672 : "=r" (__ptr_dummy) \
2673 : "0" (ptr), "n" (sizeof(mp_limb_t)) \
2674 : "memory"); \
2676 else \
2678 __asm__ __volatile__ \
2679 ( aors "\t%2, (%0)\n" \
2680 "\tjnc\t" ASM_L(done) "\n" \
2681 ASM_L(top) ":\n" \
2682 "\t" aors "\t$1, %c3(%0)\n" \
2683 "\tlea\t%c3(%0), %0\n" \
2684 "\tjc\t" ASM_L(top) "\n" \
2685 ASM_L(done) ":\n" \
2686 : "=r" (__ptr_dummy) \
2687 : "0" (ptr), \
2688 "ri" ((mp_limb_t) (incr)), "n" (sizeof(mp_limb_t)) \
2689 : "memory"); \
2691 } while (0)
2693 #if GMP_LIMB_BITS == 32
2694 #define MPN_INCR_U(ptr, size, incr) MPN_IORD_U (ptr, incr, "addl")
2695 #define MPN_DECR_U(ptr, size, incr) MPN_IORD_U (ptr, incr, "subl")
2696 #endif
2697 #if GMP_LIMB_BITS == 64
2698 #define MPN_INCR_U(ptr, size, incr) MPN_IORD_U (ptr, incr, "addq")
2699 #define MPN_DECR_U(ptr, size, incr) MPN_IORD_U (ptr, incr, "subq")
2700 #endif
2701 #define mpn_incr_u(ptr, incr) MPN_INCR_U (ptr, 0, incr)
2702 #define mpn_decr_u(ptr, incr) MPN_DECR_U (ptr, 0, incr)
2703 #endif
2705 #if GMP_NAIL_BITS == 0
2706 #ifndef mpn_incr_u
2707 #define mpn_incr_u(p,incr) \
2708 do { \
2709 mp_limb_t __x; \
2710 mp_ptr __p = (p); \
2711 if (__builtin_constant_p (incr) && (incr) == 1) \
2713 while (++(*(__p++)) == 0) \
2716 else \
2718 __x = *__p + (incr); \
2719 *__p = __x; \
2720 if (__x < (incr)) \
2721 while (++(*(++__p)) == 0) \
2724 } while (0)
2725 #endif
2726 #ifndef mpn_decr_u
2727 #define mpn_decr_u(p,incr) \
2728 do { \
2729 mp_limb_t __x; \
2730 mp_ptr __p = (p); \
2731 if (__builtin_constant_p (incr) && (incr) == 1) \
2733 while ((*(__p++))-- == 0) \
2736 else \
2738 __x = *__p; \
2739 *__p = __x - (incr); \
2740 if (__x < (incr)) \
2741 while ((*(++__p))-- == 0) \
2744 } while (0)
2745 #endif
2746 #endif
2748 #if GMP_NAIL_BITS >= 1
2749 #ifndef mpn_incr_u
2750 #define mpn_incr_u(p,incr) \
2751 do { \
2752 mp_limb_t __x; \
2753 mp_ptr __p = (p); \
2754 if (__builtin_constant_p (incr) && (incr) == 1) \
2756 do \
2758 __x = (*__p + 1) & GMP_NUMB_MASK; \
2759 *__p++ = __x; \
2761 while (__x == 0); \
2763 else \
2765 __x = (*__p + (incr)); \
2766 *__p++ = __x & GMP_NUMB_MASK; \
2767 if (__x >> GMP_NUMB_BITS != 0) \
2769 do \
2771 __x = (*__p + 1) & GMP_NUMB_MASK; \
2772 *__p++ = __x; \
2774 while (__x == 0); \
2777 } while (0)
2778 #endif
2779 #ifndef mpn_decr_u
2780 #define mpn_decr_u(p,incr) \
2781 do { \
2782 mp_limb_t __x; \
2783 mp_ptr __p = (p); \
2784 if (__builtin_constant_p (incr) && (incr) == 1) \
2786 do \
2788 __x = *__p; \
2789 *__p++ = (__x - 1) & GMP_NUMB_MASK; \
2791 while (__x == 0); \
2793 else \
2795 __x = *__p - (incr); \
2796 *__p++ = __x & GMP_NUMB_MASK; \
2797 if (__x >> GMP_NUMB_BITS != 0) \
2799 do \
2801 __x = *__p; \
2802 *__p++ = (__x - 1) & GMP_NUMB_MASK; \
2804 while (__x == 0); \
2807 } while (0)
2808 #endif
2809 #endif
2811 #ifndef MPN_INCR_U
2812 #if WANT_ASSERT
2813 #define MPN_INCR_U(ptr, size, n) \
2814 do { \
2815 ASSERT ((size) >= 1); \
2816 ASSERT_NOCARRY (mpn_add_1 (ptr, ptr, size, n)); \
2817 } while (0)
2818 #else
2819 #define MPN_INCR_U(ptr, size, n) mpn_incr_u (ptr, n)
2820 #endif
2821 #endif
2823 #ifndef MPN_DECR_U
2824 #if WANT_ASSERT
2825 #define MPN_DECR_U(ptr, size, n) \
2826 do { \
2827 ASSERT ((size) >= 1); \
2828 ASSERT_NOCARRY (mpn_sub_1 (ptr, ptr, size, n)); \
2829 } while (0)
2830 #else
2831 #define MPN_DECR_U(ptr, size, n) mpn_decr_u (ptr, n)
2832 #endif
2833 #endif
2836 /* Structure for conversion between internal binary format and strings. */
2837 struct bases
2839 /* Number of digits in the conversion base that always fits in an mp_limb_t.
2840 For example, for base 10 on a machine where an mp_limb_t has 32 bits this
2841 is 9, since 10**9 is the largest number that fits into an mp_limb_t. */
2842 int chars_per_limb;
2844 /* log(2)/log(conversion_base) */
2845 mp_limb_t logb2;
2847 /* log(conversion_base)/log(2) */
2848 mp_limb_t log2b;
2850 /* base**chars_per_limb, i.e. the biggest number that fits a word, built by
2851 factors of base. Exception: For 2, 4, 8, etc, big_base is log2(base),
2852 i.e. the number of bits used to represent each digit in the base. */
2853 mp_limb_t big_base;
2855 /* A GMP_LIMB_BITS bit approximation to 1/big_base, represented as a
2856 fixed-point number. Instead of dividing by big_base an application can
2857 choose to multiply by big_base_inverted. */
2858 mp_limb_t big_base_inverted;
2861 #define mp_bases __MPN(bases)
2862 __GMP_DECLSPEC extern const struct bases mp_bases[257];
2865 /* Compute the number of digits in base for nbits bits, making sure the result
2866 is never too small. The two variants of the macro implement the same
2867 function; the GT2 variant below works just for bases > 2. */
2868 #define DIGITS_IN_BASE_FROM_BITS(res, nbits, b) \
2869 do { \
2870 mp_limb_t _ph, _dummy; \
2871 size_t _nbits = (nbits); \
2872 umul_ppmm (_ph, _dummy, mp_bases[b].logb2, _nbits); \
2873 _ph += (_dummy + _nbits < _dummy); \
2874 res = _ph + 1; \
2875 } while (0)
2876 #define DIGITS_IN_BASEGT2_FROM_BITS(res, nbits, b) \
2877 do { \
2878 mp_limb_t _ph, _dummy; \
2879 size_t _nbits = (nbits); \
2880 umul_ppmm (_ph, _dummy, mp_bases[b].logb2 + 1, _nbits); \
2881 res = _ph + 1; \
2882 } while (0)
2884 /* For power of 2 bases this is exact. For other bases the result is either
2885 exact or one too big.
2887 To be exact always it'd be necessary to examine all the limbs of the
2888 operand, since numbers like 100..000 and 99...999 generally differ only
2889 in the lowest limb. It'd be possible to examine just a couple of high
2890 limbs to increase the probability of being exact, but that doesn't seem
2891 worth bothering with. */
2893 #define MPN_SIZEINBASE(result, ptr, size, base) \
2894 do { \
2895 int __lb_base, __cnt; \
2896 size_t __totbits; \
2898 ASSERT ((size) >= 0); \
2899 ASSERT ((base) >= 2); \
2900 ASSERT ((base) < numberof (mp_bases)); \
2902 /* Special case for X == 0. */ \
2903 if ((size) == 0) \
2904 (result) = 1; \
2905 else \
2907 /* Calculate the total number of significant bits of X. */ \
2908 count_leading_zeros (__cnt, (ptr)[(size)-1]); \
2909 __totbits = (size_t) (size) * GMP_NUMB_BITS - (__cnt - GMP_NAIL_BITS);\
2911 if (POW2_P (base)) \
2913 __lb_base = mp_bases[base].big_base; \
2914 (result) = (__totbits + __lb_base - 1) / __lb_base; \
2916 else \
2918 DIGITS_IN_BASEGT2_FROM_BITS (result, __totbits, base); \
2921 } while (0)
2923 #define MPN_SIZEINBASE_2EXP(result, ptr, size, base2exp) \
2924 do { \
2925 int __cnt; \
2926 mp_bitcnt_t __totbits; \
2927 ASSERT ((size) > 0); \
2928 ASSERT ((ptr)[(size)-1] != 0); \
2929 count_leading_zeros (__cnt, (ptr)[(size)-1]); \
2930 __totbits = (mp_bitcnt_t) (size) * GMP_NUMB_BITS - (__cnt - GMP_NAIL_BITS); \
2931 (result) = (__totbits + (base2exp)-1) / (base2exp); \
2932 } while (0)
2935 /* bit count to limb count, rounding up */
2936 #define BITS_TO_LIMBS(n) (((n) + (GMP_NUMB_BITS - 1)) / GMP_NUMB_BITS)
2938 /* MPN_SET_UI sets an mpn (ptr, cnt) to given ui. MPZ_FAKE_UI creates fake
2939 mpz_t from ui. The zp argument must have room for LIMBS_PER_ULONG limbs
2940 in both cases (LIMBS_PER_ULONG is also defined here.) */
2941 #if BITS_PER_ULONG <= GMP_NUMB_BITS /* need one limb per ulong */
2943 #define LIMBS_PER_ULONG 1
2944 #define MPN_SET_UI(zp, zn, u) \
2945 (zp)[0] = (u); \
2946 (zn) = ((zp)[0] != 0);
2947 #define MPZ_FAKE_UI(z, zp, u) \
2948 (zp)[0] = (u); \
2949 PTR (z) = (zp); \
2950 SIZ (z) = ((zp)[0] != 0); \
2951 ASSERT_CODE (ALLOC (z) = 1);
2953 #else /* need two limbs per ulong */
2955 #define LIMBS_PER_ULONG 2
2956 #define MPN_SET_UI(zp, zn, u) \
2957 (zp)[0] = (u) & GMP_NUMB_MASK; \
2958 (zp)[1] = (u) >> GMP_NUMB_BITS; \
2959 (zn) = ((zp)[1] != 0 ? 2 : (zp)[0] != 0 ? 1 : 0);
2960 #define MPZ_FAKE_UI(z, zp, u) \
2961 (zp)[0] = (u) & GMP_NUMB_MASK; \
2962 (zp)[1] = (u) >> GMP_NUMB_BITS; \
2963 SIZ (z) = ((zp)[1] != 0 ? 2 : (zp)[0] != 0 ? 1 : 0); \
2964 PTR (z) = (zp); \
2965 ASSERT_CODE (ALLOC (z) = 2);
2967 #endif
2970 #if HAVE_HOST_CPU_FAMILY_x86
2971 #define TARGET_REGISTER_STARVED 1
2972 #else
2973 #define TARGET_REGISTER_STARVED 0
2974 #endif
2977 /* LIMB_HIGHBIT_TO_MASK(n) examines the high bit of a limb value and turns 1
2978 or 0 there into a limb 0xFF..FF or 0 respectively.
2980 On most CPUs this is just an arithmetic right shift by GMP_LIMB_BITS-1,
2981 but C99 doesn't guarantee signed right shifts are arithmetic, so we have
2982 a little compile-time test and a fallback to a "? :" form. The latter is
2983 necessary for instance on Cray vector systems.
2985 Recent versions of gcc (eg. 3.3) will in fact optimize a "? :" like this
2986 to an arithmetic right shift anyway, but it's good to get the desired
2987 shift on past versions too (in particular since an important use of
2988 LIMB_HIGHBIT_TO_MASK is in udiv_qrnnd_preinv). */
2990 #define LIMB_HIGHBIT_TO_MASK(n) \
2991 (((mp_limb_signed_t) -1 >> 1) < 0 \
2992 ? (mp_limb_signed_t) (n) >> (GMP_LIMB_BITS - 1) \
2993 : (n) & GMP_LIMB_HIGHBIT ? MP_LIMB_T_MAX : CNST_LIMB(0))
2996 /* Use a library function for invert_limb, if available. */
2997 #define mpn_invert_limb __MPN(invert_limb)
2998 __GMP_DECLSPEC mp_limb_t mpn_invert_limb (mp_limb_t) ATTRIBUTE_CONST;
2999 #if ! defined (invert_limb) && HAVE_NATIVE_mpn_invert_limb
3000 #define invert_limb(invxl,xl) \
3001 do { \
3002 (invxl) = mpn_invert_limb (xl); \
3003 } while (0)
3004 #endif
3006 #ifndef invert_limb
3007 #define invert_limb(invxl,xl) \
3008 do { \
3009 mp_limb_t _dummy; \
3010 ASSERT ((xl) != 0); \
3011 udiv_qrnnd (invxl, _dummy, ~(xl), ~CNST_LIMB(0), xl); \
3012 } while (0)
3013 #endif
3015 #define invert_pi1(dinv, d1, d0) \
3016 do { \
3017 mp_limb_t _v, _p, _t1, _t0, _mask; \
3018 invert_limb (_v, d1); \
3019 _p = (d1) * _v; \
3020 _p += (d0); \
3021 if (_p < (d0)) \
3023 _v--; \
3024 _mask = -(mp_limb_t) (_p >= (d1)); \
3025 _p -= (d1); \
3026 _v += _mask; \
3027 _p -= _mask & (d1); \
3029 umul_ppmm (_t1, _t0, d0, _v); \
3030 _p += _t1; \
3031 if (_p < _t1) \
3033 _v--; \
3034 if (UNLIKELY (_p >= (d1))) \
3036 if (_p > (d1) || _t0 >= (d0)) \
3037 _v--; \
3040 (dinv).inv32 = _v; \
3041 } while (0)
3044 /* udiv_qrnnd_preinv -- Based on work by Niels Möller and Torbjörn Granlund.
3045 We write things strangely below, to help gcc. A more straightforward
3046 version:
3047 _r = (nl) - _qh * (d);
3048 _t = _r + (d);
3049 if (_r >= _ql)
3051 _qh--;
3052 _r = _t;
3054 For one operation shorter critical path, one may want to use this form:
3055 _p = _qh * (d)
3056 _s = (nl) + (d);
3057 _r = (nl) - _p;
3058 _t = _s - _p;
3059 if (_r >= _ql)
3061 _qh--;
3062 _r = _t;
3065 #define udiv_qrnnd_preinv(q, r, nh, nl, d, di) \
3066 do { \
3067 mp_limb_t _qh, _ql, _r, _mask; \
3068 umul_ppmm (_qh, _ql, (nh), (di)); \
3069 if (__builtin_constant_p (nl) && (nl) == 0) \
3071 _qh += (nh) + 1; \
3072 _r = - _qh * (d); \
3073 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
3074 _qh += _mask; \
3075 _r += _mask & (d); \
3077 else \
3079 add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \
3080 _r = (nl) - _qh * (d); \
3081 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
3082 _qh += _mask; \
3083 _r += _mask & (d); \
3084 if (UNLIKELY (_r >= (d))) \
3086 _r -= (d); \
3087 _qh++; \
3090 (r) = _r; \
3091 (q) = _qh; \
3092 } while (0)
3094 /* Dividing (NH, NL) by D, returning the remainder only. Unlike
3095 udiv_qrnnd_preinv, works also for the case NH == D, where the
3096 quotient doesn't quite fit in a single limb. */
3097 #define udiv_rnnd_preinv(r, nh, nl, d, di) \
3098 do { \
3099 mp_limb_t _qh, _ql, _r, _mask; \
3100 umul_ppmm (_qh, _ql, (nh), (di)); \
3101 if (__builtin_constant_p (nl) && (nl) == 0) \
3103 _r = ~(_qh + (nh)) * (d); \
3104 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
3105 _r += _mask & (d); \
3107 else \
3109 add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \
3110 _r = (nl) - _qh * (d); \
3111 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
3112 _r += _mask & (d); \
3113 if (UNLIKELY (_r >= (d))) \
3114 _r -= (d); \
3116 (r) = _r; \
3117 } while (0)
3119 /* Compute quotient the quotient and remainder for n / d. Requires d
3120 >= B^2 / 2 and n < d B. di is the inverse
3122 floor ((B^3 - 1) / (d0 + d1 B)) - B.
3124 NOTE: Output variables are updated multiple times. Only some inputs
3125 and outputs may overlap.
3127 #define udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \
3128 do { \
3129 mp_limb_t _q0, _t1, _t0, _mask; \
3130 umul_ppmm ((q), _q0, (n2), (dinv)); \
3131 add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \
3133 /* Compute the two most significant limbs of n - q'd */ \
3134 (r1) = (n1) - (d1) * (q); \
3135 sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \
3136 umul_ppmm (_t1, _t0, (d0), (q)); \
3137 sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \
3138 (q)++; \
3140 /* Conditionally adjust q and the remainders */ \
3141 _mask = - (mp_limb_t) ((r1) >= _q0); \
3142 (q) += _mask; \
3143 add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \
3144 if (UNLIKELY ((r1) >= (d1))) \
3146 if ((r1) > (d1) || (r0) >= (d0)) \
3148 (q)++; \
3149 sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \
3152 } while (0)
3154 #ifndef mpn_preinv_divrem_1 /* if not done with cpuvec in a fat binary */
3155 #define mpn_preinv_divrem_1 __MPN(preinv_divrem_1)
3156 __GMP_DECLSPEC mp_limb_t mpn_preinv_divrem_1 (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int);
3157 #endif
3160 /* USE_PREINV_DIVREM_1 is whether to use mpn_preinv_divrem_1, as opposed to the
3161 plain mpn_divrem_1. The default is yes, since the few CISC chips where
3162 preinv is not good have defines saying so. */
3163 #ifndef USE_PREINV_DIVREM_1
3164 #define USE_PREINV_DIVREM_1 1
3165 #endif
3167 #if USE_PREINV_DIVREM_1
3168 #define MPN_DIVREM_OR_PREINV_DIVREM_1(qp,xsize,ap,size,d,dinv,shift) \
3169 mpn_preinv_divrem_1 (qp, xsize, ap, size, d, dinv, shift)
3170 #else
3171 #define MPN_DIVREM_OR_PREINV_DIVREM_1(qp,xsize,ap,size,d,dinv,shift) \
3172 mpn_divrem_1 (qp, xsize, ap, size, d)
3173 #endif
3175 #ifndef PREINV_MOD_1_TO_MOD_1_THRESHOLD
3176 #define PREINV_MOD_1_TO_MOD_1_THRESHOLD 10
3177 #endif
3179 /* This selection may seem backwards. The reason mpn_mod_1 typically takes
3180 over for larger sizes is that it uses the mod_1_1 function. */
3181 #define MPN_MOD_OR_PREINV_MOD_1(src,size,divisor,inverse) \
3182 (BELOW_THRESHOLD (size, PREINV_MOD_1_TO_MOD_1_THRESHOLD) \
3183 ? mpn_preinv_mod_1 (src, size, divisor, inverse) \
3184 : mpn_mod_1 (src, size, divisor))
3187 #ifndef mpn_mod_34lsub1 /* if not done with cpuvec in a fat binary */
3188 #define mpn_mod_34lsub1 __MPN(mod_34lsub1)
3189 __GMP_DECLSPEC mp_limb_t mpn_mod_34lsub1 (mp_srcptr, mp_size_t) __GMP_ATTRIBUTE_PURE;
3190 #endif
3193 /* DIVEXACT_1_THRESHOLD is at what size to use mpn_divexact_1, as opposed to
3194 plain mpn_divrem_1. Likewise BMOD_1_TO_MOD_1_THRESHOLD for
3195 mpn_modexact_1_odd against plain mpn_mod_1. On most CPUs divexact and
3196 modexact are faster at all sizes, so the defaults are 0. Those CPUs
3197 where this is not right have a tuned threshold. */
3198 #ifndef DIVEXACT_1_THRESHOLD
3199 #define DIVEXACT_1_THRESHOLD 0
3200 #endif
3201 #ifndef BMOD_1_TO_MOD_1_THRESHOLD
3202 #define BMOD_1_TO_MOD_1_THRESHOLD 10
3203 #endif
3205 #define MPN_DIVREM_OR_DIVEXACT_1(rp, up, n, d) \
3206 do { \
3207 if (BELOW_THRESHOLD (n, DIVEXACT_1_THRESHOLD)) \
3208 ASSERT_NOCARRY (mpn_divrem_1 (rp, (mp_size_t) 0, up, n, d)); \
3209 else \
3211 ASSERT (mpn_mod_1 (up, n, d) == 0); \
3212 mpn_divexact_1 (rp, up, n, d); \
3214 } while (0)
3216 #ifndef mpn_modexact_1c_odd /* if not done with cpuvec in a fat binary */
3217 #define mpn_modexact_1c_odd __MPN(modexact_1c_odd)
3218 __GMP_DECLSPEC mp_limb_t mpn_modexact_1c_odd (mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t) __GMP_ATTRIBUTE_PURE;
3219 #endif
3221 #if HAVE_NATIVE_mpn_modexact_1_odd
3222 #define mpn_modexact_1_odd __MPN(modexact_1_odd)
3223 __GMP_DECLSPEC mp_limb_t mpn_modexact_1_odd (mp_srcptr, mp_size_t, mp_limb_t) __GMP_ATTRIBUTE_PURE;
3224 #else
3225 #define mpn_modexact_1_odd(src,size,divisor) \
3226 mpn_modexact_1c_odd (src, size, divisor, CNST_LIMB(0))
3227 #endif
3229 #define MPN_MOD_OR_MODEXACT_1_ODD(src,size,divisor) \
3230 (BELOW_THRESHOLD (size, BMOD_1_TO_MOD_1_THRESHOLD) \
3231 ? mpn_modexact_1_odd (src, size, divisor) \
3232 : mpn_mod_1 (src, size, divisor))
3234 /* binvert_limb() sets inv to the multiplicative inverse of n modulo
3235 2^GMP_NUMB_BITS, ie. satisfying inv*n == 1 mod 2^GMP_NUMB_BITS.
3236 n must be odd (otherwise such an inverse doesn't exist).
3238 This is not to be confused with invert_limb(), which is completely
3239 different.
3241 The table lookup gives an inverse with the low 8 bits valid, and each
3242 multiply step doubles the number of bits. See Jebelean "An algorithm for
3243 exact division" end of section 4 (reference in gmp.texi).
3245 Possible enhancement: Could use UHWtype until the last step, if half-size
3246 multiplies are faster (might help under _LONG_LONG_LIMB).
3248 Alternative: As noted in Granlund and Montgomery "Division by Invariant
3249 Integers using Multiplication" (reference in gmp.texi), n itself gives a
3250 3-bit inverse immediately, and could be used instead of a table lookup.
3251 A 4-bit inverse can be obtained effectively from xoring bits 1 and 2 into
3252 bit 3, for instance with (((n + 2) & 4) << 1) ^ n. */
3254 #define binvert_limb_table __gmp_binvert_limb_table
3255 __GMP_DECLSPEC extern const unsigned char binvert_limb_table[128];
3257 #define binvert_limb(inv,n) \
3258 do { \
3259 mp_limb_t __n = (n); \
3260 mp_limb_t __inv; \
3261 ASSERT ((__n & 1) == 1); \
3263 __inv = binvert_limb_table[(__n/2) & 0x7F]; /* 8 */ \
3264 if (GMP_NUMB_BITS > 8) __inv = 2 * __inv - __inv * __inv * __n; \
3265 if (GMP_NUMB_BITS > 16) __inv = 2 * __inv - __inv * __inv * __n; \
3266 if (GMP_NUMB_BITS > 32) __inv = 2 * __inv - __inv * __inv * __n; \
3268 if (GMP_NUMB_BITS > 64) \
3270 int __invbits = 64; \
3271 do { \
3272 __inv = 2 * __inv - __inv * __inv * __n; \
3273 __invbits *= 2; \
3274 } while (__invbits < GMP_NUMB_BITS); \
3277 ASSERT ((__inv * __n & GMP_NUMB_MASK) == 1); \
3278 (inv) = __inv & GMP_NUMB_MASK; \
3279 } while (0)
3280 #define modlimb_invert binvert_limb /* backward compatibility */
3282 /* Multiplicative inverse of 3, modulo 2^GMP_NUMB_BITS.
3283 Eg. 0xAAAAAAAB for 32 bits, 0xAAAAAAAAAAAAAAAB for 64 bits.
3284 GMP_NUMB_MAX/3*2+1 is right when GMP_NUMB_BITS is even, but when it's odd
3285 we need to start from GMP_NUMB_MAX>>1. */
3286 #define MODLIMB_INVERSE_3 (((GMP_NUMB_MAX >> (GMP_NUMB_BITS % 2)) / 3) * 2 + 1)
3288 /* ceil(GMP_NUMB_MAX/3) and ceil(2*GMP_NUMB_MAX/3).
3289 These expressions work because GMP_NUMB_MAX%3 != 0 for all GMP_NUMB_BITS. */
3290 #define GMP_NUMB_CEIL_MAX_DIV3 (GMP_NUMB_MAX / 3 + 1)
3291 #define GMP_NUMB_CEIL_2MAX_DIV3 ((GMP_NUMB_MAX>>1) / 3 + 1 + GMP_NUMB_HIGHBIT)
3294 /* Set r to -a mod d. a>=d is allowed. Can give r>d. All should be limbs.
3296 It's not clear whether this is the best way to do this calculation.
3297 Anything congruent to -a would be fine for the one limb congruence
3298 tests. */
3300 #define NEG_MOD(r, a, d) \
3301 do { \
3302 ASSERT ((d) != 0); \
3303 ASSERT_LIMB (a); \
3304 ASSERT_LIMB (d); \
3306 if ((a) <= (d)) \
3308 /* small a is reasonably likely */ \
3309 (r) = (d) - (a); \
3311 else \
3313 unsigned __twos; \
3314 mp_limb_t __dnorm; \
3315 count_leading_zeros (__twos, d); \
3316 __twos -= GMP_NAIL_BITS; \
3317 __dnorm = (d) << __twos; \
3318 (r) = ((a) <= __dnorm ? __dnorm : 2*__dnorm) - (a); \
3321 ASSERT_LIMB (r); \
3322 } while (0)
3324 /* A bit mask of all the least significant zero bits of n, or -1 if n==0. */
3325 #define LOW_ZEROS_MASK(n) (((n) & -(n)) - 1)
3328 /* ULONG_PARITY sets "p" to 1 if there's an odd number of 1 bits in "n", or
3329 to 0 if there's an even number. "n" should be an unsigned long and "p"
3330 an int. */
3332 #if defined (__GNUC__) && ! defined (NO_ASM) && HAVE_HOST_CPU_alpha_CIX
3333 #define ULONG_PARITY(p, n) \
3334 do { \
3335 int __p; \
3336 __asm__ ("ctpop %1, %0" : "=r" (__p) : "r" (n)); \
3337 (p) = __p & 1; \
3338 } while (0)
3339 #endif
3341 /* Cray intrinsic _popcnt. */
3342 #ifdef _CRAY
3343 #define ULONG_PARITY(p, n) \
3344 do { \
3345 (p) = _popcnt (n) & 1; \
3346 } while (0)
3347 #endif
3349 #if defined (__GNUC__) && ! defined (__INTEL_COMPILER) \
3350 && ! defined (NO_ASM) && defined (__ia64)
3351 /* unsigned long is either 32 or 64 bits depending on the ABI, zero extend
3352 to a 64 bit unsigned long long for popcnt */
3353 #define ULONG_PARITY(p, n) \
3354 do { \
3355 unsigned long long __n = (unsigned long) (n); \
3356 int __p; \
3357 __asm__ ("popcnt %0 = %1" : "=r" (__p) : "r" (__n)); \
3358 (p) = __p & 1; \
3359 } while (0)
3360 #endif
3362 #if defined (__GNUC__) && ! defined (__INTEL_COMPILER) \
3363 && ! defined (NO_ASM) && HAVE_HOST_CPU_FAMILY_x86
3364 #if __GMP_GNUC_PREREQ (3,1)
3365 #define __GMP_qm "=Qm"
3366 #define __GMP_q "=Q"
3367 #else
3368 #define __GMP_qm "=qm"
3369 #define __GMP_q "=q"
3370 #endif
3371 #define ULONG_PARITY(p, n) \
3372 do { \
3373 char __p; \
3374 unsigned long __n = (n); \
3375 __n ^= (__n >> 16); \
3376 __asm__ ("xorb %h1, %b1\n\t" \
3377 "setpo %0" \
3378 : __GMP_qm (__p), __GMP_q (__n) \
3379 : "1" (__n)); \
3380 (p) = __p; \
3381 } while (0)
3382 #endif
3384 #if ! defined (ULONG_PARITY)
3385 #define ULONG_PARITY(p, n) \
3386 do { \
3387 unsigned long __n = (n); \
3388 int __p = 0; \
3389 do \
3391 __p ^= 0x96696996L >> (__n & 0x1F); \
3392 __n >>= 5; \
3394 while (__n != 0); \
3396 (p) = __p & 1; \
3397 } while (0)
3398 #endif
3401 /* 3 cycles on 604 or 750 since shifts and rlwimi's can pair. gcc (as of
3402 version 3.1 at least) doesn't seem to know how to generate rlwimi for
3403 anything other than bit-fields, so use "asm". */
3404 #if defined (__GNUC__) && ! defined (NO_ASM) \
3405 && HAVE_HOST_CPU_FAMILY_powerpc && GMP_LIMB_BITS == 32
3406 #define BSWAP_LIMB(dst, src) \
3407 do { \
3408 mp_limb_t __bswapl_src = (src); \
3409 mp_limb_t __tmp1 = __bswapl_src >> 24; /* low byte */ \
3410 mp_limb_t __tmp2 = __bswapl_src << 24; /* high byte */ \
3411 __asm__ ("rlwimi %0, %2, 24, 16, 23" /* 2nd low */ \
3412 : "=r" (__tmp1) : "0" (__tmp1), "r" (__bswapl_src)); \
3413 __asm__ ("rlwimi %0, %2, 8, 8, 15" /* 3nd high */ \
3414 : "=r" (__tmp2) : "0" (__tmp2), "r" (__bswapl_src)); \
3415 (dst) = __tmp1 | __tmp2; /* whole */ \
3416 } while (0)
3417 #endif
3419 /* bswap is available on i486 and up and is fast. A combination rorw $8 /
3420 roll $16 / rorw $8 is used in glibc for plain i386 (and in the linux
3421 kernel with xchgb instead of rorw), but this is not done here, because
3422 i386 means generic x86 and mixing word and dword operations will cause
3423 partial register stalls on P6 chips. */
3424 #if defined (__GNUC__) && ! defined (NO_ASM) \
3425 && HAVE_HOST_CPU_FAMILY_x86 && ! HAVE_HOST_CPU_i386 \
3426 && GMP_LIMB_BITS == 32
3427 #define BSWAP_LIMB(dst, src) \
3428 do { \
3429 __asm__ ("bswap %0" : "=r" (dst) : "0" (src)); \
3430 } while (0)
3431 #endif
3433 #if defined (__GNUC__) && ! defined (NO_ASM) \
3434 && defined (__amd64__) && GMP_LIMB_BITS == 64
3435 #define BSWAP_LIMB(dst, src) \
3436 do { \
3437 __asm__ ("bswap %q0" : "=r" (dst) : "0" (src)); \
3438 } while (0)
3439 #endif
3441 #if defined (__GNUC__) && ! defined (__INTEL_COMPILER) \
3442 && ! defined (NO_ASM) && defined (__ia64) && GMP_LIMB_BITS == 64
3443 #define BSWAP_LIMB(dst, src) \
3444 do { \
3445 __asm__ ("mux1 %0 = %1, @rev" : "=r" (dst) : "r" (src)); \
3446 } while (0)
3447 #endif
3449 /* As per glibc. */
3450 #if defined (__GNUC__) && ! defined (NO_ASM) \
3451 && HAVE_HOST_CPU_FAMILY_m68k && GMP_LIMB_BITS == 32
3452 #define BSWAP_LIMB(dst, src) \
3453 do { \
3454 mp_limb_t __bswapl_src = (src); \
3455 __asm__ ("ror%.w %#8, %0\n\t" \
3456 "swap %0\n\t" \
3457 "ror%.w %#8, %0" \
3458 : "=d" (dst) \
3459 : "0" (__bswapl_src)); \
3460 } while (0)
3461 #endif
3463 #if ! defined (BSWAP_LIMB)
3464 #if GMP_LIMB_BITS == 8
3465 #define BSWAP_LIMB(dst, src) \
3466 do { (dst) = (src); } while (0)
3467 #endif
3468 #if GMP_LIMB_BITS == 16
3469 #define BSWAP_LIMB(dst, src) \
3470 do { \
3471 (dst) = ((src) << 8) + ((src) >> 8); \
3472 } while (0)
3473 #endif
3474 #if GMP_LIMB_BITS == 32
3475 #define BSWAP_LIMB(dst, src) \
3476 do { \
3477 (dst) = \
3478 ((src) << 24) \
3479 + (((src) & 0xFF00) << 8) \
3480 + (((src) >> 8) & 0xFF00) \
3481 + ((src) >> 24); \
3482 } while (0)
3483 #endif
3484 #if GMP_LIMB_BITS == 64
3485 #define BSWAP_LIMB(dst, src) \
3486 do { \
3487 (dst) = \
3488 ((src) << 56) \
3489 + (((src) & 0xFF00) << 40) \
3490 + (((src) & 0xFF0000) << 24) \
3491 + (((src) & 0xFF000000) << 8) \
3492 + (((src) >> 8) & 0xFF000000) \
3493 + (((src) >> 24) & 0xFF0000) \
3494 + (((src) >> 40) & 0xFF00) \
3495 + ((src) >> 56); \
3496 } while (0)
3497 #endif
3498 #endif
3500 #if ! defined (BSWAP_LIMB)
3501 #define BSWAP_LIMB(dst, src) \
3502 do { \
3503 mp_limb_t __bswapl_src = (src); \
3504 mp_limb_t __dstl = 0; \
3505 int __i; \
3506 for (__i = 0; __i < GMP_LIMB_BYTES; __i++) \
3508 __dstl = (__dstl << 8) | (__bswapl_src & 0xFF); \
3509 __bswapl_src >>= 8; \
3511 (dst) = __dstl; \
3512 } while (0)
3513 #endif
3516 /* Apparently lwbrx might be slow on some PowerPC chips, so restrict it to
3517 those we know are fast. */
3518 #if defined (__GNUC__) && ! defined (NO_ASM) \
3519 && GMP_LIMB_BITS == 32 && HAVE_LIMB_BIG_ENDIAN \
3520 && (HAVE_HOST_CPU_powerpc604 \
3521 || HAVE_HOST_CPU_powerpc604e \
3522 || HAVE_HOST_CPU_powerpc750 \
3523 || HAVE_HOST_CPU_powerpc7400)
3524 #define BSWAP_LIMB_FETCH(limb, src) \
3525 do { \
3526 mp_srcptr __blf_src = (src); \
3527 mp_limb_t __limb; \
3528 __asm__ ("lwbrx %0, 0, %1" \
3529 : "=r" (__limb) \
3530 : "r" (__blf_src), \
3531 "m" (*__blf_src)); \
3532 (limb) = __limb; \
3533 } while (0)
3534 #endif
3536 #if ! defined (BSWAP_LIMB_FETCH)
3537 #define BSWAP_LIMB_FETCH(limb, src) BSWAP_LIMB (limb, *(src))
3538 #endif
3541 /* On the same basis that lwbrx might be slow, restrict stwbrx to those we
3542 know are fast. FIXME: Is this necessary? */
3543 #if defined (__GNUC__) && ! defined (NO_ASM) \
3544 && GMP_LIMB_BITS == 32 && HAVE_LIMB_BIG_ENDIAN \
3545 && (HAVE_HOST_CPU_powerpc604 \
3546 || HAVE_HOST_CPU_powerpc604e \
3547 || HAVE_HOST_CPU_powerpc750 \
3548 || HAVE_HOST_CPU_powerpc7400)
3549 #define BSWAP_LIMB_STORE(dst, limb) \
3550 do { \
3551 mp_ptr __dst = (dst); \
3552 mp_limb_t __limb = (limb); \
3553 __asm__ ("stwbrx %1, 0, %2" \
3554 : "=m" (*__dst) \
3555 : "r" (__limb), \
3556 "r" (__dst)); \
3557 } while (0)
3558 #endif
3560 #if ! defined (BSWAP_LIMB_STORE)
3561 #define BSWAP_LIMB_STORE(dst, limb) BSWAP_LIMB (*(dst), limb)
3562 #endif
3565 /* Byte swap limbs from {src,size} and store at {dst,size}. */
3566 #define MPN_BSWAP(dst, src, size) \
3567 do { \
3568 mp_ptr __dst = (dst); \
3569 mp_srcptr __src = (src); \
3570 mp_size_t __size = (size); \
3571 mp_size_t __i; \
3572 ASSERT ((size) >= 0); \
3573 ASSERT (MPN_SAME_OR_SEPARATE_P (dst, src, size)); \
3574 CRAY_Pragma ("_CRI ivdep"); \
3575 for (__i = 0; __i < __size; __i++) \
3577 BSWAP_LIMB_FETCH (*__dst, __src); \
3578 __dst++; \
3579 __src++; \
3581 } while (0)
3583 /* Byte swap limbs from {dst,size} and store in reverse order at {src,size}. */
3584 #define MPN_BSWAP_REVERSE(dst, src, size) \
3585 do { \
3586 mp_ptr __dst = (dst); \
3587 mp_size_t __size = (size); \
3588 mp_srcptr __src = (src) + __size - 1; \
3589 mp_size_t __i; \
3590 ASSERT ((size) >= 0); \
3591 ASSERT (! MPN_OVERLAP_P (dst, size, src, size)); \
3592 CRAY_Pragma ("_CRI ivdep"); \
3593 for (__i = 0; __i < __size; __i++) \
3595 BSWAP_LIMB_FETCH (*__dst, __src); \
3596 __dst++; \
3597 __src--; \
3599 } while (0)
3602 /* No processor claiming to be SPARC v9 compliant seems to
3603 implement the POPC instruction. Disable pattern for now. */
3604 #if 0
3605 #if defined __GNUC__ && defined __sparc_v9__ && GMP_LIMB_BITS == 64
3606 #define popc_limb(result, input) \
3607 do { \
3608 DItype __res; \
3609 __asm__ ("popc %1,%0" : "=r" (result) : "rI" (input)); \
3610 } while (0)
3611 #endif
3612 #endif
3614 #if defined (__GNUC__) && ! defined (NO_ASM) && HAVE_HOST_CPU_alpha_CIX
3615 #define popc_limb(result, input) \
3616 do { \
3617 __asm__ ("ctpop %1, %0" : "=r" (result) : "r" (input)); \
3618 } while (0)
3619 #endif
3621 /* Cray intrinsic. */
3622 #ifdef _CRAY
3623 #define popc_limb(result, input) \
3624 do { \
3625 (result) = _popcnt (input); \
3626 } while (0)
3627 #endif
3629 #if defined (__GNUC__) && ! defined (__INTEL_COMPILER) \
3630 && ! defined (NO_ASM) && defined (__ia64) && GMP_LIMB_BITS == 64
3631 #define popc_limb(result, input) \
3632 do { \
3633 __asm__ ("popcnt %0 = %1" : "=r" (result) : "r" (input)); \
3634 } while (0)
3635 #endif
3637 /* Cool population count of an mp_limb_t.
3638 You have to figure out how this works, We won't tell you!
3640 The constants could also be expressed as:
3641 0x55... = [2^N / 3] = [(2^N-1)/3]
3642 0x33... = [2^N / 5] = [(2^N-1)/5]
3643 0x0f... = [2^N / 17] = [(2^N-1)/17]
3644 (N is GMP_LIMB_BITS, [] denotes truncation.) */
3646 #if ! defined (popc_limb) && GMP_LIMB_BITS == 8
3647 #define popc_limb(result, input) \
3648 do { \
3649 mp_limb_t __x = (input); \
3650 __x -= (__x >> 1) & MP_LIMB_T_MAX/3; \
3651 __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5); \
3652 __x = ((__x >> 4) + __x); \
3653 (result) = __x & 0x0f; \
3654 } while (0)
3655 #endif
3657 #if ! defined (popc_limb) && GMP_LIMB_BITS == 16
3658 #define popc_limb(result, input) \
3659 do { \
3660 mp_limb_t __x = (input); \
3661 __x -= (__x >> 1) & MP_LIMB_T_MAX/3; \
3662 __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5); \
3663 __x = ((__x >> 4) + __x) & MP_LIMB_T_MAX/17; \
3664 __x = ((__x >> 8) + __x); \
3665 (result) = __x & 0xff; \
3666 } while (0)
3667 #endif
3669 #if ! defined (popc_limb) && GMP_LIMB_BITS == 32
3670 #define popc_limb(result, input) \
3671 do { \
3672 mp_limb_t __x = (input); \
3673 __x -= (__x >> 1) & MP_LIMB_T_MAX/3; \
3674 __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5); \
3675 __x = ((__x >> 4) + __x) & MP_LIMB_T_MAX/17; \
3676 __x = ((__x >> 8) + __x); \
3677 __x = ((__x >> 16) + __x); \
3678 (result) = __x & 0xff; \
3679 } while (0)
3680 #endif
3682 #if ! defined (popc_limb) && GMP_LIMB_BITS == 64
3683 #define popc_limb(result, input) \
3684 do { \
3685 mp_limb_t __x = (input); \
3686 __x -= (__x >> 1) & MP_LIMB_T_MAX/3; \
3687 __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5); \
3688 __x = ((__x >> 4) + __x) & MP_LIMB_T_MAX/17; \
3689 __x = ((__x >> 8) + __x); \
3690 __x = ((__x >> 16) + __x); \
3691 __x = ((__x >> 32) + __x); \
3692 (result) = __x & 0xff; \
3693 } while (0)
3694 #endif
3697 /* Define stuff for longlong.h. */
3698 #if HAVE_ATTRIBUTE_MODE
3699 typedef unsigned int UQItype __attribute__ ((mode (QI)));
3700 typedef int SItype __attribute__ ((mode (SI)));
3701 typedef unsigned int USItype __attribute__ ((mode (SI)));
3702 typedef int DItype __attribute__ ((mode (DI)));
3703 typedef unsigned int UDItype __attribute__ ((mode (DI)));
3704 #else
3705 typedef unsigned char UQItype;
3706 typedef long SItype;
3707 typedef unsigned long USItype;
3708 #if HAVE_LONG_LONG
3709 typedef long long int DItype;
3710 typedef unsigned long long int UDItype;
3711 #else /* Assume `long' gives us a wide enough type. Needed for hppa2.0w. */
3712 typedef long int DItype;
3713 typedef unsigned long int UDItype;
3714 #endif
3715 #endif
3717 typedef mp_limb_t UWtype;
3718 typedef unsigned int UHWtype;
3719 #define W_TYPE_SIZE GMP_LIMB_BITS
3721 /* Define ieee_double_extract and _GMP_IEEE_FLOATS.
3723 Bit field packing is "implementation defined" according to C99, which
3724 leaves us at the compiler's mercy here. For some systems packing is
3725 defined in the ABI (eg. x86). In any case so far it seems universal that
3726 little endian systems pack from low to high, and big endian from high to
3727 low within the given type.
3729 Within the fields we rely on the integer endianness being the same as the
3730 float endianness, this is true everywhere we know of and it'd be a fairly
3731 strange system that did anything else. */
3733 #if HAVE_DOUBLE_IEEE_LITTLE_SWAPPED
3734 #define _GMP_IEEE_FLOATS 1
3735 union ieee_double_extract
3737 struct
3739 gmp_uint_least32_t manh:20;
3740 gmp_uint_least32_t exp:11;
3741 gmp_uint_least32_t sig:1;
3742 gmp_uint_least32_t manl:32;
3743 } s;
3744 double d;
3746 #endif
3748 #if HAVE_DOUBLE_IEEE_LITTLE_ENDIAN
3749 #define _GMP_IEEE_FLOATS 1
3750 union ieee_double_extract
3752 struct
3754 gmp_uint_least32_t manl:32;
3755 gmp_uint_least32_t manh:20;
3756 gmp_uint_least32_t exp:11;
3757 gmp_uint_least32_t sig:1;
3758 } s;
3759 double d;
3761 #endif
3763 #if HAVE_DOUBLE_IEEE_BIG_ENDIAN
3764 #define _GMP_IEEE_FLOATS 1
3765 union ieee_double_extract
3767 struct
3769 gmp_uint_least32_t sig:1;
3770 gmp_uint_least32_t exp:11;
3771 gmp_uint_least32_t manh:20;
3772 gmp_uint_least32_t manl:32;
3773 } s;
3774 double d;
3776 #endif
3778 #if HAVE_DOUBLE_VAX_D
3779 union double_extract
3781 struct
3783 gmp_uint_least32_t man3:7; /* highest 7 bits */
3784 gmp_uint_least32_t exp:8; /* excess-128 exponent */
3785 gmp_uint_least32_t sig:1;
3786 gmp_uint_least32_t man2:16;
3787 gmp_uint_least32_t man1:16;
3788 gmp_uint_least32_t man0:16; /* lowest 16 bits */
3789 } s;
3790 double d;
3792 #endif
3794 /* Use (4.0 * ...) instead of (2.0 * ...) to work around buggy compilers
3795 that don't convert ulong->double correctly (eg. SunOS 4 native cc). */
3796 #define MP_BASE_AS_DOUBLE (4.0 * ((mp_limb_t) 1 << (GMP_NUMB_BITS - 2)))
3797 /* Maximum number of limbs it will take to store any `double'.
3798 We assume doubles have 53 mantissa bits. */
3799 #define LIMBS_PER_DOUBLE ((53 + GMP_NUMB_BITS - 2) / GMP_NUMB_BITS + 1)
3801 __GMP_DECLSPEC int __gmp_extract_double (mp_ptr, double);
3803 #define mpn_get_d __gmpn_get_d
3804 __GMP_DECLSPEC double mpn_get_d (mp_srcptr, mp_size_t, mp_size_t, long) __GMP_ATTRIBUTE_PURE;
3807 /* DOUBLE_NAN_INF_ACTION executes code a_nan if x is a NaN, or executes
3808 a_inf if x is an infinity. Both are considered unlikely values, for
3809 branch prediction. */
3811 #if _GMP_IEEE_FLOATS
3812 #define DOUBLE_NAN_INF_ACTION(x, a_nan, a_inf) \
3813 do { \
3814 union ieee_double_extract u; \
3815 u.d = (x); \
3816 if (UNLIKELY (u.s.exp == 0x7FF)) \
3818 if (u.s.manl == 0 && u.s.manh == 0) \
3819 { a_inf; } \
3820 else \
3821 { a_nan; } \
3823 } while (0)
3824 #endif
3826 #if HAVE_DOUBLE_VAX_D || HAVE_DOUBLE_VAX_G || HAVE_DOUBLE_CRAY_CFP
3827 /* no nans or infs in these formats */
3828 #define DOUBLE_NAN_INF_ACTION(x, a_nan, a_inf) \
3829 do { } while (0)
3830 #endif
3832 #ifndef DOUBLE_NAN_INF_ACTION
3833 /* Unknown format, try something generic.
3834 NaN should be "unordered", so x!=x.
3835 Inf should be bigger than DBL_MAX. */
3836 #define DOUBLE_NAN_INF_ACTION(x, a_nan, a_inf) \
3837 do { \
3839 if (UNLIKELY ((x) != (x))) \
3840 { a_nan; } \
3841 else if (UNLIKELY ((x) > DBL_MAX || (x) < -DBL_MAX)) \
3842 { a_inf; } \
3844 } while (0)
3845 #endif
3847 /* On m68k, x86 and amd64, gcc (and maybe other compilers) can hold doubles
3848 in the coprocessor, which means a bigger exponent range than normal, and
3849 depending on the rounding mode, a bigger mantissa than normal. (See
3850 "Disappointments" in the gcc manual.) FORCE_DOUBLE stores and fetches
3851 "d" through memory to force any rounding and overflows to occur.
3853 On amd64, and on x86s with SSE2, gcc (depending on options) uses the xmm
3854 registers, where there's no such extra precision and no need for the
3855 FORCE_DOUBLE. We don't bother to detect this since the present uses for
3856 FORCE_DOUBLE are only in test programs and default generic C code.
3858 Not quite sure that an "automatic volatile" will use memory, but it does
3859 in gcc. An asm("":"=m"(d):"0"(d)) can't be used to trick gcc, since
3860 apparently matching operands like "0" are only allowed on a register
3861 output. gcc 3.4 warns about this, though in fact it and past versions
3862 seem to put the operand through memory as hoped. */
3864 #if (HAVE_HOST_CPU_FAMILY_m68k || HAVE_HOST_CPU_FAMILY_x86 \
3865 || defined (__amd64__))
3866 #define FORCE_DOUBLE(d) \
3867 do { volatile double __gmp_force = (d); (d) = __gmp_force; } while (0)
3868 #else
3869 #define FORCE_DOUBLE(d) do { } while (0)
3870 #endif
3873 __GMP_DECLSPEC extern const unsigned char __gmp_digit_value_tab[];
3875 __GMP_DECLSPEC extern int __gmp_junk;
3876 __GMP_DECLSPEC extern const int __gmp_0;
3877 __GMP_DECLSPEC void __gmp_exception (int) ATTRIBUTE_NORETURN;
3878 __GMP_DECLSPEC void __gmp_divide_by_zero (void) ATTRIBUTE_NORETURN;
3879 __GMP_DECLSPEC void __gmp_sqrt_of_negative (void) ATTRIBUTE_NORETURN;
3880 __GMP_DECLSPEC void __gmp_invalid_operation (void) ATTRIBUTE_NORETURN;
3881 #define GMP_ERROR(code) __gmp_exception (code)
3882 #define DIVIDE_BY_ZERO __gmp_divide_by_zero ()
3883 #define SQRT_OF_NEGATIVE __gmp_sqrt_of_negative ()
3885 #if defined _LONG_LONG_LIMB
3886 #define CNST_LIMB(C) ((mp_limb_t) C##LL)
3887 #else /* not _LONG_LONG_LIMB */
3888 #define CNST_LIMB(C) ((mp_limb_t) C##L)
3889 #endif /* _LONG_LONG_LIMB */
3891 /* Stuff used by mpn/generic/perfsqr.c and mpz/prime_p.c */
3892 #if GMP_NUMB_BITS == 2
3893 #define PP 0x3 /* 3 */
3894 #define PP_FIRST_OMITTED 5
3895 #endif
3896 #if GMP_NUMB_BITS == 4
3897 #define PP 0xF /* 3 x 5 */
3898 #define PP_FIRST_OMITTED 7
3899 #endif
3900 #if GMP_NUMB_BITS == 8
3901 #define PP 0x69 /* 3 x 5 x 7 */
3902 #define PP_FIRST_OMITTED 11
3903 #endif
3904 #if GMP_NUMB_BITS == 16
3905 #define PP 0x3AA7 /* 3 x 5 x 7 x 11 x 13 */
3906 #define PP_FIRST_OMITTED 17
3907 #endif
3908 #if GMP_NUMB_BITS == 32
3909 #define PP 0xC0CFD797L /* 3 x 5 x 7 x 11 x ... x 29 */
3910 #define PP_INVERTED 0x53E5645CL
3911 #define PP_FIRST_OMITTED 31
3912 #endif
3913 #if GMP_NUMB_BITS == 64
3914 #define PP CNST_LIMB(0xE221F97C30E94E1D) /* 3 x 5 x 7 x 11 x ... x 53 */
3915 #define PP_INVERTED CNST_LIMB(0x21CFE6CFC938B36B)
3916 #define PP_FIRST_OMITTED 59
3917 #endif
3918 #ifndef PP_FIRST_OMITTED
3919 #define PP_FIRST_OMITTED 3
3920 #endif
3922 /* BIT1 means a result value in bit 1 (second least significant bit), with a
3923 zero bit representing +1 and a one bit representing -1. Bits other than
3924 bit 1 are garbage. These are meant to be kept in "int"s, and casts are
3925 used to ensure the expressions are "int"s even if a and/or b might be
3926 other types.
3928 JACOBI_TWOS_U_BIT1 and JACOBI_RECIP_UU_BIT1 are used in mpn_jacobi_base
3929 and their speed is important. Expressions are used rather than
3930 conditionals to accumulate sign changes, which effectively means XORs
3931 instead of conditional JUMPs. */
3933 /* (a/0), with a signed; is 1 if a=+/-1, 0 otherwise */
3934 #define JACOBI_S0(a) (((a) == 1) | ((a) == -1))
3936 /* (a/0), with a unsigned; is 1 if a=+/-1, 0 otherwise */
3937 #define JACOBI_U0(a) ((a) == 1)
3939 /* FIXME: JACOBI_LS0 and JACOBI_0LS are the same, so delete one and
3940 come up with a better name. */
3942 /* (a/0), with a given by low and size;
3943 is 1 if a=+/-1, 0 otherwise */
3944 #define JACOBI_LS0(alow,asize) \
3945 (((asize) == 1 || (asize) == -1) && (alow) == 1)
3947 /* (a/0), with a an mpz_t;
3948 fetch of low limb always valid, even if size is zero */
3949 #define JACOBI_Z0(a) JACOBI_LS0 (PTR(a)[0], SIZ(a))
3951 /* (0/b), with b unsigned; is 1 if b=1, 0 otherwise */
3952 #define JACOBI_0U(b) ((b) == 1)
3954 /* (0/b), with b unsigned; is 1 if b=+/-1, 0 otherwise */
3955 #define JACOBI_0S(b) ((b) == 1 || (b) == -1)
3957 /* (0/b), with b given by low and size; is 1 if b=+/-1, 0 otherwise */
3958 #define JACOBI_0LS(blow,bsize) \
3959 (((bsize) == 1 || (bsize) == -1) && (blow) == 1)
3961 /* Convert a bit1 to +1 or -1. */
3962 #define JACOBI_BIT1_TO_PN(result_bit1) \
3963 (1 - ((int) (result_bit1) & 2))
3965 /* (2/b), with b unsigned and odd;
3966 is (-1)^((b^2-1)/8) which is 1 if b==1,7mod8 or -1 if b==3,5mod8 and
3967 hence obtained from (b>>1)^b */
3968 #define JACOBI_TWO_U_BIT1(b) \
3969 ((int) (((b) >> 1) ^ (b)))
3971 /* (2/b)^twos, with b unsigned and odd */
3972 #define JACOBI_TWOS_U_BIT1(twos, b) \
3973 ((int) ((twos) << 1) & JACOBI_TWO_U_BIT1 (b))
3975 /* (2/b)^twos, with b unsigned and odd */
3976 #define JACOBI_TWOS_U(twos, b) \
3977 (JACOBI_BIT1_TO_PN (JACOBI_TWOS_U_BIT1 (twos, b)))
3979 /* (-1/b), with b odd (signed or unsigned);
3980 is (-1)^((b-1)/2) */
3981 #define JACOBI_N1B_BIT1(b) \
3982 ((int) (b))
3984 /* (a/b) effect due to sign of a: signed/unsigned, b odd;
3985 is (-1/b) if a<0, or +1 if a>=0 */
3986 #define JACOBI_ASGN_SU_BIT1(a, b) \
3987 ((((a) < 0) << 1) & JACOBI_N1B_BIT1(b))
3989 /* (a/b) effect due to sign of b: signed/signed;
3990 is -1 if a and b both negative, +1 otherwise */
3991 #define JACOBI_BSGN_SS_BIT1(a, b) \
3992 ((((a)<0) & ((b)<0)) << 1)
3994 /* (a/b) effect due to sign of b: signed/mpz;
3995 is -1 if a and b both negative, +1 otherwise */
3996 #define JACOBI_BSGN_SZ_BIT1(a, b) \
3997 JACOBI_BSGN_SS_BIT1 (a, SIZ(b))
3999 /* (a/b) effect due to sign of b: mpz/signed;
4000 is -1 if a and b both negative, +1 otherwise */
4001 #define JACOBI_BSGN_ZS_BIT1(a, b) \
4002 JACOBI_BSGN_SZ_BIT1 (b, a)
4004 /* (a/b) reciprocity to switch to (b/a), a,b both unsigned and odd;
4005 is (-1)^((a-1)*(b-1)/4), which means +1 if either a,b==1mod4, or -1 if
4006 both a,b==3mod4, achieved in bit 1 by a&b. No ASSERT()s about a,b odd
4007 because this is used in a couple of places with only bit 1 of a or b
4008 valid. */
4009 #define JACOBI_RECIP_UU_BIT1(a, b) \
4010 ((int) ((a) & (b)))
4012 /* Strip low zero limbs from {b_ptr,b_size} by incrementing b_ptr and
4013 decrementing b_size. b_low should be b_ptr[0] on entry, and will be
4014 updated for the new b_ptr. result_bit1 is updated according to the
4015 factors of 2 stripped, as per (a/2). */
4016 #define JACOBI_STRIP_LOW_ZEROS(result_bit1, a, b_ptr, b_size, b_low) \
4017 do { \
4018 ASSERT ((b_size) >= 1); \
4019 ASSERT ((b_low) == (b_ptr)[0]); \
4021 while (UNLIKELY ((b_low) == 0)) \
4023 (b_size)--; \
4024 ASSERT ((b_size) >= 1); \
4025 (b_ptr)++; \
4026 (b_low) = *(b_ptr); \
4028 ASSERT (((a) & 1) != 0); \
4029 if ((GMP_NUMB_BITS % 2) == 1) \
4030 (result_bit1) ^= JACOBI_TWO_U_BIT1(a); \
4032 } while (0)
4034 /* Set a_rem to {a_ptr,a_size} reduced modulo b, either using mod_1 or
4035 modexact_1_odd, but in either case leaving a_rem<b. b must be odd and
4036 unsigned. modexact_1_odd effectively calculates -a mod b, and
4037 result_bit1 is adjusted for the factor of -1.
4039 The way mpn_modexact_1_odd sometimes bases its remainder on a_size and
4040 sometimes on a_size-1 means if GMP_NUMB_BITS is odd we can't know what
4041 factor to introduce into result_bit1, so for that case use mpn_mod_1
4042 unconditionally.
4044 FIXME: mpn_modexact_1_odd is more efficient, so some way to get it used
4045 for odd GMP_NUMB_BITS would be good. Perhaps it could mung its result,
4046 or not skip a divide step, or something. */
4048 #define JACOBI_MOD_OR_MODEXACT_1_ODD(result_bit1, a_rem, a_ptr, a_size, b) \
4049 do { \
4050 mp_srcptr __a_ptr = (a_ptr); \
4051 mp_size_t __a_size = (a_size); \
4052 mp_limb_t __b = (b); \
4054 ASSERT (__a_size >= 1); \
4055 ASSERT (__b & 1); \
4057 if ((GMP_NUMB_BITS % 2) != 0 \
4058 || ABOVE_THRESHOLD (__a_size, BMOD_1_TO_MOD_1_THRESHOLD)) \
4060 (a_rem) = mpn_mod_1 (__a_ptr, __a_size, __b); \
4062 else \
4064 (result_bit1) ^= JACOBI_N1B_BIT1 (__b); \
4065 (a_rem) = mpn_modexact_1_odd (__a_ptr, __a_size, __b); \
4067 } while (0)
4069 /* State for the Jacobi computation using Lehmer. */
4070 #define jacobi_table __gmp_jacobi_table
4071 __GMP_DECLSPEC extern const unsigned char jacobi_table[208];
4073 /* Bit layout for the initial state. b must be odd.
4075 3 2 1 0
4076 +--+--+--+--+
4077 |a1|a0|b1| s|
4078 +--+--+--+--+
4081 static inline unsigned
4082 mpn_jacobi_init (unsigned a, unsigned b, unsigned s)
4084 ASSERT (b & 1);
4085 ASSERT (s <= 1);
4086 return ((a & 3) << 2) + (b & 2) + s;
4089 static inline int
4090 mpn_jacobi_finish (unsigned bits)
4092 /* (a, b) = (1,0) or (0,1) */
4093 ASSERT ( (bits & 14) == 0);
4095 return 1-2*(bits & 1);
4098 static inline unsigned
4099 mpn_jacobi_update (unsigned bits, unsigned denominator, unsigned q)
4101 /* FIXME: Could halve table size by not including the e bit in the
4102 * index, and instead xor when updating. Then the lookup would be
4103 * like
4105 * bits ^= table[((bits & 30) << 2) + (denominator << 2) + q];
4108 ASSERT (bits < 26);
4109 ASSERT (denominator < 2);
4110 ASSERT (q < 4);
4112 /* For almost all calls, denominator is constant and quite often q
4113 is constant too. So use addition rather than or, so the compiler
4114 can put the constant part can into the offset of an indexed
4115 addressing instruction.
4117 With constant denominator, the below table lookup is compiled to
4119 C Constant q = 1, constant denominator = 1
4120 movzbl table+5(%eax,8), %eax
4124 C q in %edx, constant denominator = 1
4125 movzbl table+4(%edx,%eax,8), %eax
4127 One could maintain the state preshifted 3 bits, to save a shift
4128 here, but at least on x86, that's no real saving.
4130 return bits = jacobi_table[(bits << 3) + (denominator << 2) + q];
4133 /* Matrix multiplication */
4134 #define mpn_matrix22_mul __MPN(matrix22_mul)
4135 __GMP_DECLSPEC void mpn_matrix22_mul (mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr);
4136 #define mpn_matrix22_mul_strassen __MPN(matrix22_mul_strassen)
4137 __GMP_DECLSPEC void mpn_matrix22_mul_strassen (mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr);
4138 #define mpn_matrix22_mul_itch __MPN(matrix22_mul_itch)
4139 __GMP_DECLSPEC mp_size_t mpn_matrix22_mul_itch (mp_size_t, mp_size_t) ATTRIBUTE_CONST;
4141 #ifndef MATRIX22_STRASSEN_THRESHOLD
4142 #define MATRIX22_STRASSEN_THRESHOLD 30
4143 #endif
4145 /* HGCD definitions */
4147 /* Extract one numb, shifting count bits left
4148 ________ ________
4149 |___xh___||___xl___|
4150 |____r____|
4151 >count <
4153 The count includes any nail bits, so it should work fine if count
4154 is computed using count_leading_zeros. If GMP_NAIL_BITS > 0, all of
4155 xh, xl and r include nail bits. Must have 0 < count < GMP_LIMB_BITS.
4157 FIXME: Omit masking with GMP_NUMB_MASK, and let callers do that for
4158 those calls where the count high bits of xh may be non-zero.
4161 #define MPN_EXTRACT_NUMB(count, xh, xl) \
4162 ((((xh) << ((count) - GMP_NAIL_BITS)) & GMP_NUMB_MASK) | \
4163 ((xl) >> (GMP_LIMB_BITS - (count))))
4166 /* The matrix non-negative M = (u, u'; v,v') keeps track of the
4167 reduction (a;b) = M (alpha; beta) where alpha, beta are smaller
4168 than a, b. The determinant must always be one, so that M has an
4169 inverse (v', -u'; -v, u). Elements always fit in GMP_NUMB_BITS - 1
4170 bits. */
4171 struct hgcd_matrix1
4173 mp_limb_t u[2][2];
4176 #define mpn_hgcd2 __MPN (hgcd2)
4177 __GMP_DECLSPEC int mpn_hgcd2 (mp_limb_t, mp_limb_t, mp_limb_t, mp_limb_t, struct hgcd_matrix1 *);
4179 #define mpn_hgcd_mul_matrix1_vector __MPN (hgcd_mul_matrix1_vector)
4180 __GMP_DECLSPEC mp_size_t mpn_hgcd_mul_matrix1_vector (const struct hgcd_matrix1 *, mp_ptr, mp_srcptr, mp_ptr, mp_size_t);
4182 #define mpn_matrix22_mul1_inverse_vector __MPN (matrix22_mul1_inverse_vector)
4183 __GMP_DECLSPEC mp_size_t mpn_matrix22_mul1_inverse_vector (const struct hgcd_matrix1 *, mp_ptr, mp_srcptr, mp_ptr, mp_size_t);
4185 #define mpn_hgcd2_jacobi __MPN (hgcd2_jacobi)
4186 __GMP_DECLSPEC int mpn_hgcd2_jacobi (mp_limb_t, mp_limb_t, mp_limb_t, mp_limb_t, struct hgcd_matrix1 *, unsigned *);
4188 struct hgcd_matrix
4190 mp_size_t alloc; /* for sanity checking only */
4191 mp_size_t n;
4192 mp_ptr p[2][2];
4195 #define MPN_HGCD_MATRIX_INIT_ITCH(n) (4 * ((n+1)/2 + 1))
4197 #define mpn_hgcd_matrix_init __MPN (hgcd_matrix_init)
4198 __GMP_DECLSPEC void mpn_hgcd_matrix_init (struct hgcd_matrix *, mp_size_t, mp_ptr);
4200 #define mpn_hgcd_matrix_update_q __MPN (hgcd_matrix_update_q)
4201 __GMP_DECLSPEC void mpn_hgcd_matrix_update_q (struct hgcd_matrix *, mp_srcptr, mp_size_t, unsigned, mp_ptr);
4203 #define mpn_hgcd_matrix_mul_1 __MPN (hgcd_matrix_mul_1)
4204 __GMP_DECLSPEC void mpn_hgcd_matrix_mul_1 (struct hgcd_matrix *, const struct hgcd_matrix1 *, mp_ptr);
4206 #define mpn_hgcd_matrix_mul __MPN (hgcd_matrix_mul)
4207 __GMP_DECLSPEC void mpn_hgcd_matrix_mul (struct hgcd_matrix *, const struct hgcd_matrix *, mp_ptr);
4209 #define mpn_hgcd_matrix_adjust __MPN (hgcd_matrix_adjust)
4210 __GMP_DECLSPEC mp_size_t mpn_hgcd_matrix_adjust (const struct hgcd_matrix *, mp_size_t, mp_ptr, mp_ptr, mp_size_t, mp_ptr);
4212 #define mpn_hgcd_step __MPN(hgcd_step)
4213 __GMP_DECLSPEC mp_size_t mpn_hgcd_step (mp_size_t, mp_ptr, mp_ptr, mp_size_t, struct hgcd_matrix *, mp_ptr);
4215 #define mpn_hgcd_reduce __MPN(hgcd_reduce)
4216 __GMP_DECLSPEC mp_size_t mpn_hgcd_reduce (struct hgcd_matrix *, mp_ptr, mp_ptr, mp_size_t, mp_size_t, mp_ptr);
4218 #define mpn_hgcd_reduce_itch __MPN(hgcd_reduce_itch)
4219 __GMP_DECLSPEC mp_size_t mpn_hgcd_reduce_itch (mp_size_t, mp_size_t) ATTRIBUTE_CONST;
4221 #define mpn_hgcd_itch __MPN (hgcd_itch)
4222 __GMP_DECLSPEC mp_size_t mpn_hgcd_itch (mp_size_t) ATTRIBUTE_CONST;
4224 #define mpn_hgcd __MPN (hgcd)
4225 __GMP_DECLSPEC mp_size_t mpn_hgcd (mp_ptr, mp_ptr, mp_size_t, struct hgcd_matrix *, mp_ptr);
4227 #define mpn_hgcd_appr_itch __MPN (hgcd_appr_itch)
4228 __GMP_DECLSPEC mp_size_t mpn_hgcd_appr_itch (mp_size_t) ATTRIBUTE_CONST;
4230 #define mpn_hgcd_appr __MPN (hgcd_appr)
4231 __GMP_DECLSPEC int mpn_hgcd_appr (mp_ptr, mp_ptr, mp_size_t, struct hgcd_matrix *, mp_ptr);
4233 #define mpn_hgcd_jacobi __MPN (hgcd_jacobi)
4234 __GMP_DECLSPEC mp_size_t mpn_hgcd_jacobi (mp_ptr, mp_ptr, mp_size_t, struct hgcd_matrix *, unsigned *, mp_ptr);
4236 typedef void gcd_subdiv_step_hook(void *, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, int);
4238 /* Needs storage for the quotient */
4239 #define MPN_GCD_SUBDIV_STEP_ITCH(n) (n)
4241 #define mpn_gcd_subdiv_step __MPN(gcd_subdiv_step)
4242 __GMP_DECLSPEC mp_size_t mpn_gcd_subdiv_step (mp_ptr, mp_ptr, mp_size_t, mp_size_t, gcd_subdiv_step_hook *, void *, mp_ptr);
4244 struct gcdext_ctx
4246 /* Result parameters. */
4247 mp_ptr gp;
4248 mp_size_t gn;
4249 mp_ptr up;
4250 mp_size_t *usize;
4252 /* Cofactors updated in each step. */
4253 mp_size_t un;
4254 mp_ptr u0, u1, tp;
4257 #define mpn_gcdext_hook __MPN (gcdext_hook)
4258 gcd_subdiv_step_hook mpn_gcdext_hook;
4260 #define MPN_GCDEXT_LEHMER_N_ITCH(n) (4*(n) + 3)
4262 #define mpn_gcdext_lehmer_n __MPN(gcdext_lehmer_n)
4263 __GMP_DECLSPEC mp_size_t mpn_gcdext_lehmer_n (mp_ptr, mp_ptr, mp_size_t *, mp_ptr, mp_ptr, mp_size_t, mp_ptr);
4265 /* 4*(an + 1) + 4*(bn + 1) + an */
4266 #define MPN_GCDEXT_LEHMER_ITCH(an, bn) (5*(an) + 4*(bn) + 8)
4268 #ifndef HGCD_THRESHOLD
4269 #define HGCD_THRESHOLD 400
4270 #endif
4272 #ifndef HGCD_APPR_THRESHOLD
4273 #define HGCD_APPR_THRESHOLD 400
4274 #endif
4276 #ifndef HGCD_REDUCE_THRESHOLD
4277 #define HGCD_REDUCE_THRESHOLD 1000
4278 #endif
4280 #ifndef GCD_DC_THRESHOLD
4281 #define GCD_DC_THRESHOLD 1000
4282 #endif
4284 #ifndef GCDEXT_DC_THRESHOLD
4285 #define GCDEXT_DC_THRESHOLD 600
4286 #endif
4288 /* Definitions for mpn_set_str and mpn_get_str */
4289 struct powers
4291 mp_ptr p; /* actual power value */
4292 mp_size_t n; /* # of limbs at p */
4293 mp_size_t shift; /* weight of lowest limb, in limb base B */
4294 size_t digits_in_base; /* number of corresponding digits */
4295 int base;
4297 typedef struct powers powers_t;
4298 #define mpn_dc_set_str_powtab_alloc(n) ((n) + GMP_LIMB_BITS)
4299 #define mpn_dc_set_str_itch(n) ((n) + GMP_LIMB_BITS)
4300 #define mpn_dc_get_str_powtab_alloc(n) ((n) + 2 * GMP_LIMB_BITS)
4301 #define mpn_dc_get_str_itch(n) ((n) + GMP_LIMB_BITS)
4303 #define mpn_dc_set_str __MPN(dc_set_str)
4304 __GMP_DECLSPEC mp_size_t mpn_dc_set_str (mp_ptr, const unsigned char *, size_t, const powers_t *, mp_ptr);
4305 #define mpn_bc_set_str __MPN(bc_set_str)
4306 __GMP_DECLSPEC mp_size_t mpn_bc_set_str (mp_ptr, const unsigned char *, size_t, int);
4307 #define mpn_set_str_compute_powtab __MPN(set_str_compute_powtab)
4308 __GMP_DECLSPEC void mpn_set_str_compute_powtab (powers_t *, mp_ptr, mp_size_t, int);
4311 /* __GMPF_BITS_TO_PREC applies a minimum 53 bits, rounds upwards to a whole
4312 limb and adds an extra limb. __GMPF_PREC_TO_BITS drops that extra limb,
4313 hence giving back the user's size in bits rounded up. Notice that
4314 converting prec->bits->prec gives an unchanged value. */
4315 #define __GMPF_BITS_TO_PREC(n) \
4316 ((mp_size_t) ((__GMP_MAX (53, n) + 2 * GMP_NUMB_BITS - 1) / GMP_NUMB_BITS))
4317 #define __GMPF_PREC_TO_BITS(n) \
4318 ((mp_bitcnt_t) (n) * GMP_NUMB_BITS - GMP_NUMB_BITS)
4320 __GMP_DECLSPEC extern mp_size_t __gmp_default_fp_limb_precision;
4322 /* Compute the number of base-b digits corresponding to nlimbs limbs, rounding
4323 down. */
4324 #define DIGITS_IN_BASE_PER_LIMB(res, nlimbs, b) \
4325 do { \
4326 mp_limb_t _ph, _dummy; \
4327 umul_ppmm (_ph, _dummy, \
4328 mp_bases[b].logb2, GMP_NUMB_BITS * (mp_limb_t) (nlimbs));\
4329 res = _ph; \
4330 } while (0)
4332 /* Compute the number of limbs corresponding to ndigits base-b digits, rounding
4333 up. */
4334 #define LIMBS_PER_DIGIT_IN_BASE(res, ndigits, b) \
4335 do { \
4336 mp_limb_t _ph, _dummy; \
4337 umul_ppmm (_ph, _dummy, mp_bases[b].log2b, (mp_limb_t) (ndigits)); \
4338 res = 8 * _ph / GMP_NUMB_BITS + 2; \
4339 } while (0)
4342 /* Set n to the number of significant digits an mpf of the given _mp_prec
4343 field, in the given base. This is a rounded up value, designed to ensure
4344 there's enough digits to reproduce all the guaranteed part of the value.
4346 There are prec many limbs, but the high might be only "1" so forget it
4347 and just count prec-1 limbs into chars. +1 rounds that upwards, and a
4348 further +1 is because the limbs usually won't fall on digit boundaries.
4350 FIXME: If base is a power of 2 and the bits per digit divides
4351 GMP_LIMB_BITS then the +2 is unnecessary. This happens always for
4352 base==2, and in base==16 with the current 32 or 64 bit limb sizes. */
4354 #define MPF_SIGNIFICANT_DIGITS(n, base, prec) \
4355 do { \
4356 size_t rawn; \
4357 ASSERT (base >= 2 && base < numberof (mp_bases)); \
4358 DIGITS_IN_BASE_PER_LIMB (rawn, (prec) - 1, base); \
4359 n = rawn + 2; \
4360 } while (0)
4363 /* Decimal point string, from the current C locale. Needs <langinfo.h> for
4364 nl_langinfo and constants, preferably with _GNU_SOURCE defined to get
4365 DECIMAL_POINT from glibc, and needs <locale.h> for localeconv, each under
4366 their respective #if HAVE_FOO_H.
4368 GLIBC recommends nl_langinfo because getting only one facet can be
4369 faster, apparently. */
4371 /* DECIMAL_POINT seems to need _GNU_SOURCE defined to get it from glibc. */
4372 #if HAVE_NL_LANGINFO && defined (DECIMAL_POINT)
4373 #define GMP_DECIMAL_POINT (nl_langinfo (DECIMAL_POINT))
4374 #endif
4375 /* RADIXCHAR is deprecated, still in unix98 or some such. */
4376 #if HAVE_NL_LANGINFO && defined (RADIXCHAR) && ! defined (GMP_DECIMAL_POINT)
4377 #define GMP_DECIMAL_POINT (nl_langinfo (RADIXCHAR))
4378 #endif
4379 /* localeconv is slower since it returns all locale stuff */
4380 #if HAVE_LOCALECONV && ! defined (GMP_DECIMAL_POINT)
4381 #define GMP_DECIMAL_POINT (localeconv()->decimal_point)
4382 #endif
4383 #if ! defined (GMP_DECIMAL_POINT)
4384 #define GMP_DECIMAL_POINT (".")
4385 #endif
4388 #define DOPRNT_CONV_FIXED 1
4389 #define DOPRNT_CONV_SCIENTIFIC 2
4390 #define DOPRNT_CONV_GENERAL 3
4392 #define DOPRNT_JUSTIFY_NONE 0
4393 #define DOPRNT_JUSTIFY_LEFT 1
4394 #define DOPRNT_JUSTIFY_RIGHT 2
4395 #define DOPRNT_JUSTIFY_INTERNAL 3
4397 #define DOPRNT_SHOWBASE_YES 1
4398 #define DOPRNT_SHOWBASE_NO 2
4399 #define DOPRNT_SHOWBASE_NONZERO 3
4401 struct doprnt_params_t {
4402 int base; /* negative for upper case */
4403 int conv; /* choices above */
4404 const char *expfmt; /* exponent format */
4405 int exptimes4; /* exponent multiply by 4 */
4406 char fill; /* character */
4407 int justify; /* choices above */
4408 int prec; /* prec field, or -1 for all digits */
4409 int showbase; /* choices above */
4410 int showpoint; /* if radix point always shown */
4411 int showtrailing; /* if trailing zeros wanted */
4412 char sign; /* '+', ' ', or '\0' */
4413 int width; /* width field */
4416 #if _GMP_H_HAVE_VA_LIST
4418 typedef int (*doprnt_format_t) (void *, const char *, va_list);
4419 typedef int (*doprnt_memory_t) (void *, const char *, size_t);
4420 typedef int (*doprnt_reps_t) (void *, int, int);
4421 typedef int (*doprnt_final_t) (void *);
4423 struct doprnt_funs_t {
4424 doprnt_format_t format;
4425 doprnt_memory_t memory;
4426 doprnt_reps_t reps;
4427 doprnt_final_t final; /* NULL if not required */
4430 extern const struct doprnt_funs_t __gmp_fprintf_funs;
4431 extern const struct doprnt_funs_t __gmp_sprintf_funs;
4432 extern const struct doprnt_funs_t __gmp_snprintf_funs;
4433 extern const struct doprnt_funs_t __gmp_obstack_printf_funs;
4434 extern const struct doprnt_funs_t __gmp_ostream_funs;
4436 /* "buf" is a __gmp_allocate_func block of "alloc" many bytes. The first
4437 "size" of these have been written. "alloc > size" is maintained, so
4438 there's room to store a '\0' at the end. "result" is where the
4439 application wants the final block pointer. */
4440 struct gmp_asprintf_t {
4441 char **result;
4442 char *buf;
4443 size_t size;
4444 size_t alloc;
4447 #define GMP_ASPRINTF_T_INIT(d, output) \
4448 do { \
4449 (d).result = (output); \
4450 (d).alloc = 256; \
4451 (d).buf = (char *) (*__gmp_allocate_func) ((d).alloc); \
4452 (d).size = 0; \
4453 } while (0)
4455 /* If a realloc is necessary, use twice the size actually required, so as to
4456 avoid repeated small reallocs. */
4457 #define GMP_ASPRINTF_T_NEED(d, n) \
4458 do { \
4459 size_t alloc, newsize, newalloc; \
4460 ASSERT ((d)->alloc >= (d)->size + 1); \
4462 alloc = (d)->alloc; \
4463 newsize = (d)->size + (n); \
4464 if (alloc <= newsize) \
4466 newalloc = 2*newsize; \
4467 (d)->alloc = newalloc; \
4468 (d)->buf = __GMP_REALLOCATE_FUNC_TYPE ((d)->buf, \
4469 alloc, newalloc, char); \
4471 } while (0)
4473 __GMP_DECLSPEC int __gmp_asprintf_memory (struct gmp_asprintf_t *, const char *, size_t);
4474 __GMP_DECLSPEC int __gmp_asprintf_reps (struct gmp_asprintf_t *, int, int);
4475 __GMP_DECLSPEC int __gmp_asprintf_final (struct gmp_asprintf_t *);
4477 /* buf is where to write the next output, and size is how much space is left
4478 there. If the application passed size==0 then that's what we'll have
4479 here, and nothing at all should be written. */
4480 struct gmp_snprintf_t {
4481 char *buf;
4482 size_t size;
4485 /* Add the bytes printed by the call to the total retval, or bail out on an
4486 error. */
4487 #define DOPRNT_ACCUMULATE(call) \
4488 do { \
4489 int __ret; \
4490 __ret = call; \
4491 if (__ret == -1) \
4492 goto error; \
4493 retval += __ret; \
4494 } while (0)
4495 #define DOPRNT_ACCUMULATE_FUN(fun, params) \
4496 do { \
4497 ASSERT ((fun) != NULL); \
4498 DOPRNT_ACCUMULATE ((*(fun)) params); \
4499 } while (0)
4501 #define DOPRNT_FORMAT(fmt, ap) \
4502 DOPRNT_ACCUMULATE_FUN (funs->format, (data, fmt, ap))
4503 #define DOPRNT_MEMORY(ptr, len) \
4504 DOPRNT_ACCUMULATE_FUN (funs->memory, (data, ptr, len))
4505 #define DOPRNT_REPS(c, n) \
4506 DOPRNT_ACCUMULATE_FUN (funs->reps, (data, c, n))
4508 #define DOPRNT_STRING(str) DOPRNT_MEMORY (str, strlen (str))
4510 #define DOPRNT_REPS_MAYBE(c, n) \
4511 do { \
4512 if ((n) != 0) \
4513 DOPRNT_REPS (c, n); \
4514 } while (0)
4515 #define DOPRNT_MEMORY_MAYBE(ptr, len) \
4516 do { \
4517 if ((len) != 0) \
4518 DOPRNT_MEMORY (ptr, len); \
4519 } while (0)
4521 __GMP_DECLSPEC int __gmp_doprnt (const struct doprnt_funs_t *, void *, const char *, va_list);
4522 __GMP_DECLSPEC int __gmp_doprnt_integer (const struct doprnt_funs_t *, void *, const struct doprnt_params_t *, const char *);
4524 #define __gmp_doprnt_mpf __gmp_doprnt_mpf2
4525 __GMP_DECLSPEC int __gmp_doprnt_mpf (const struct doprnt_funs_t *, void *, const struct doprnt_params_t *, const char *, mpf_srcptr);
4527 __GMP_DECLSPEC int __gmp_replacement_vsnprintf (char *, size_t, const char *, va_list);
4528 #endif /* _GMP_H_HAVE_VA_LIST */
4531 typedef int (*gmp_doscan_scan_t) (void *, const char *, ...);
4532 typedef void *(*gmp_doscan_step_t) (void *, int);
4533 typedef int (*gmp_doscan_get_t) (void *);
4534 typedef int (*gmp_doscan_unget_t) (int, void *);
4536 struct gmp_doscan_funs_t {
4537 gmp_doscan_scan_t scan;
4538 gmp_doscan_step_t step;
4539 gmp_doscan_get_t get;
4540 gmp_doscan_unget_t unget;
4542 extern const struct gmp_doscan_funs_t __gmp_fscanf_funs;
4543 extern const struct gmp_doscan_funs_t __gmp_sscanf_funs;
4545 #if _GMP_H_HAVE_VA_LIST
4546 __GMP_DECLSPEC int __gmp_doscan (const struct gmp_doscan_funs_t *, void *, const char *, va_list);
4547 #endif
4550 /* For testing and debugging. */
4551 #define MPZ_CHECK_FORMAT(z) \
4552 do { \
4553 ASSERT_ALWAYS (SIZ(z) == 0 || PTR(z)[ABSIZ(z) - 1] != 0); \
4554 ASSERT_ALWAYS (ALLOC(z) >= ABSIZ(z)); \
4555 ASSERT_ALWAYS_MPN (PTR(z), ABSIZ(z)); \
4556 } while (0)
4558 #define MPQ_CHECK_FORMAT(q) \
4559 do { \
4560 MPZ_CHECK_FORMAT (mpq_numref (q)); \
4561 MPZ_CHECK_FORMAT (mpq_denref (q)); \
4562 ASSERT_ALWAYS (SIZ(mpq_denref(q)) >= 1); \
4564 if (SIZ(mpq_numref(q)) == 0) \
4566 /* should have zero as 0/1 */ \
4567 ASSERT_ALWAYS (SIZ(mpq_denref(q)) == 1 \
4568 && PTR(mpq_denref(q))[0] == 1); \
4570 else \
4572 /* should have no common factors */ \
4573 mpz_t g; \
4574 mpz_init (g); \
4575 mpz_gcd (g, mpq_numref(q), mpq_denref(q)); \
4576 ASSERT_ALWAYS (mpz_cmp_ui (g, 1) == 0); \
4577 mpz_clear (g); \
4579 } while (0)
4581 #define MPF_CHECK_FORMAT(f) \
4582 do { \
4583 ASSERT_ALWAYS (PREC(f) >= __GMPF_BITS_TO_PREC(53)); \
4584 ASSERT_ALWAYS (ABSIZ(f) <= PREC(f)+1); \
4585 if (SIZ(f) == 0) \
4586 ASSERT_ALWAYS (EXP(f) == 0); \
4587 if (SIZ(f) != 0) \
4588 ASSERT_ALWAYS (PTR(f)[ABSIZ(f) - 1] != 0); \
4589 } while (0)
4592 /* Enhancement: The "mod" and "gcd_1" functions below could have
4593 __GMP_ATTRIBUTE_PURE, but currently (gcc 3.3) that's not supported on
4594 function pointers, only actual functions. It probably doesn't make much
4595 difference to the gmp code, since hopefully we arrange calls so there's
4596 no great need for the compiler to move things around. */
4598 #if WANT_FAT_BINARY && (HAVE_HOST_CPU_FAMILY_x86 || HAVE_HOST_CPU_FAMILY_x86_64)
4599 /* NOTE: The function pointers in this struct are also in CPUVEC_FUNCS_LIST
4600 in mpn/x86/x86-defs.m4 and mpn/x86_64/x86_64-defs.m4. Be sure to update
4601 those when changing here. */
4602 struct cpuvec_t {
4603 DECL_add_n ((*add_n));
4604 DECL_addlsh1_n ((*addlsh1_n));
4605 DECL_addlsh2_n ((*addlsh2_n));
4606 DECL_addmul_1 ((*addmul_1));
4607 DECL_addmul_2 ((*addmul_2));
4608 DECL_bdiv_dbm1c ((*bdiv_dbm1c));
4609 DECL_cnd_add_n ((*cnd_add_n));
4610 DECL_cnd_sub_n ((*cnd_sub_n));
4611 DECL_com ((*com));
4612 DECL_copyd ((*copyd));
4613 DECL_copyi ((*copyi));
4614 DECL_divexact_1 ((*divexact_1));
4615 DECL_divrem_1 ((*divrem_1));
4616 DECL_gcd_1 ((*gcd_1));
4617 DECL_lshift ((*lshift));
4618 DECL_lshiftc ((*lshiftc));
4619 DECL_mod_1 ((*mod_1));
4620 DECL_mod_1_1p ((*mod_1_1p));
4621 DECL_mod_1_1p_cps ((*mod_1_1p_cps));
4622 DECL_mod_1s_2p ((*mod_1s_2p));
4623 DECL_mod_1s_2p_cps ((*mod_1s_2p_cps));
4624 DECL_mod_1s_4p ((*mod_1s_4p));
4625 DECL_mod_1s_4p_cps ((*mod_1s_4p_cps));
4626 DECL_mod_34lsub1 ((*mod_34lsub1));
4627 DECL_modexact_1c_odd ((*modexact_1c_odd));
4628 DECL_mul_1 ((*mul_1));
4629 DECL_mul_basecase ((*mul_basecase));
4630 DECL_mullo_basecase ((*mullo_basecase));
4631 DECL_preinv_divrem_1 ((*preinv_divrem_1));
4632 DECL_preinv_mod_1 ((*preinv_mod_1));
4633 DECL_redc_1 ((*redc_1));
4634 DECL_redc_2 ((*redc_2));
4635 DECL_rshift ((*rshift));
4636 DECL_sqr_basecase ((*sqr_basecase));
4637 DECL_sub_n ((*sub_n));
4638 DECL_sublsh1_n ((*sublsh1_n));
4639 DECL_submul_1 ((*submul_1));
4640 mp_size_t mul_toom22_threshold;
4641 mp_size_t mul_toom33_threshold;
4642 mp_size_t sqr_toom2_threshold;
4643 mp_size_t sqr_toom3_threshold;
4644 mp_size_t bmod_1_to_mod_1_threshold;
4646 __GMP_DECLSPEC extern struct cpuvec_t __gmpn_cpuvec;
4647 __GMP_DECLSPEC extern int __gmpn_cpuvec_initialized;
4648 #endif /* x86 fat binary */
4650 __GMP_DECLSPEC void __gmpn_cpuvec_init (void);
4652 /* Get a threshold "field" from __gmpn_cpuvec, running __gmpn_cpuvec_init()
4653 if that hasn't yet been done (to establish the right values). */
4654 #define CPUVEC_THRESHOLD(field) \
4655 ((LIKELY (__gmpn_cpuvec_initialized) ? 0 : (__gmpn_cpuvec_init (), 0)), \
4656 __gmpn_cpuvec.field)
4659 #if HAVE_NATIVE_mpn_add_nc
4660 #define mpn_add_nc __MPN(add_nc)
4661 __GMP_DECLSPEC mp_limb_t mpn_add_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
4662 #else
4663 static inline
4664 mp_limb_t
4665 mpn_add_nc (mp_ptr rp, mp_srcptr up, mp_srcptr vp, mp_size_t n, mp_limb_t ci)
4667 mp_limb_t co;
4668 co = mpn_add_n (rp, up, vp, n);
4669 co += mpn_add_1 (rp, rp, n, ci);
4670 return co;
4672 #endif
4674 #if HAVE_NATIVE_mpn_sub_nc
4675 #define mpn_sub_nc __MPN(sub_nc)
4676 __GMP_DECLSPEC mp_limb_t mpn_sub_nc (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
4677 #else
4678 static inline mp_limb_t
4679 mpn_sub_nc (mp_ptr rp, mp_srcptr up, mp_srcptr vp, mp_size_t n, mp_limb_t ci)
4681 mp_limb_t co;
4682 co = mpn_sub_n (rp, up, vp, n);
4683 co += mpn_sub_1 (rp, rp, n, ci);
4684 return co;
4686 #endif
4688 #if TUNE_PROGRAM_BUILD
4689 /* Some extras wanted when recompiling some .c files for use by the tune
4690 program. Not part of a normal build.
4692 It's necessary to keep these thresholds as #defines (just to an
4693 identically named variable), since various defaults are established based
4694 on #ifdef in the .c files. For some this is not so (the defaults are
4695 instead established above), but all are done this way for consistency. */
4697 #undef MUL_TOOM22_THRESHOLD
4698 #define MUL_TOOM22_THRESHOLD mul_toom22_threshold
4699 extern mp_size_t mul_toom22_threshold;
4701 #undef MUL_TOOM33_THRESHOLD
4702 #define MUL_TOOM33_THRESHOLD mul_toom33_threshold
4703 extern mp_size_t mul_toom33_threshold;
4705 #undef MUL_TOOM44_THRESHOLD
4706 #define MUL_TOOM44_THRESHOLD mul_toom44_threshold
4707 extern mp_size_t mul_toom44_threshold;
4709 #undef MUL_TOOM6H_THRESHOLD
4710 #define MUL_TOOM6H_THRESHOLD mul_toom6h_threshold
4711 extern mp_size_t mul_toom6h_threshold;
4713 #undef MUL_TOOM8H_THRESHOLD
4714 #define MUL_TOOM8H_THRESHOLD mul_toom8h_threshold
4715 extern mp_size_t mul_toom8h_threshold;
4717 #undef MUL_TOOM32_TO_TOOM43_THRESHOLD
4718 #define MUL_TOOM32_TO_TOOM43_THRESHOLD mul_toom32_to_toom43_threshold
4719 extern mp_size_t mul_toom32_to_toom43_threshold;
4721 #undef MUL_TOOM32_TO_TOOM53_THRESHOLD
4722 #define MUL_TOOM32_TO_TOOM53_THRESHOLD mul_toom32_to_toom53_threshold
4723 extern mp_size_t mul_toom32_to_toom53_threshold;
4725 #undef MUL_TOOM42_TO_TOOM53_THRESHOLD
4726 #define MUL_TOOM42_TO_TOOM53_THRESHOLD mul_toom42_to_toom53_threshold
4727 extern mp_size_t mul_toom42_to_toom53_threshold;
4729 #undef MUL_TOOM42_TO_TOOM63_THRESHOLD
4730 #define MUL_TOOM42_TO_TOOM63_THRESHOLD mul_toom42_to_toom63_threshold
4731 extern mp_size_t mul_toom42_to_toom63_threshold;
4733 #undef MUL_TOOM43_TO_TOOM54_THRESHOLD
4734 #define MUL_TOOM43_TO_TOOM54_THRESHOLD mul_toom43_to_toom54_threshold;
4735 extern mp_size_t mul_toom43_to_toom54_threshold;
4737 #undef MUL_FFT_THRESHOLD
4738 #define MUL_FFT_THRESHOLD mul_fft_threshold
4739 extern mp_size_t mul_fft_threshold;
4741 #undef MUL_FFT_MODF_THRESHOLD
4742 #define MUL_FFT_MODF_THRESHOLD mul_fft_modf_threshold
4743 extern mp_size_t mul_fft_modf_threshold;
4745 #undef MUL_FFT_TABLE
4746 #define MUL_FFT_TABLE { 0 }
4748 #undef MUL_FFT_TABLE3
4749 #define MUL_FFT_TABLE3 { {0,0} }
4751 /* A native mpn_sqr_basecase is not tuned and SQR_BASECASE_THRESHOLD should
4752 remain as zero (always use it). */
4753 #if ! HAVE_NATIVE_mpn_sqr_basecase
4754 #undef SQR_BASECASE_THRESHOLD
4755 #define SQR_BASECASE_THRESHOLD sqr_basecase_threshold
4756 extern mp_size_t sqr_basecase_threshold;
4757 #endif
4759 #if TUNE_PROGRAM_BUILD_SQR
4760 #undef SQR_TOOM2_THRESHOLD
4761 #define SQR_TOOM2_THRESHOLD SQR_TOOM2_MAX_GENERIC
4762 #else
4763 #undef SQR_TOOM2_THRESHOLD
4764 #define SQR_TOOM2_THRESHOLD sqr_toom2_threshold
4765 extern mp_size_t sqr_toom2_threshold;
4766 #endif
4768 #undef SQR_TOOM3_THRESHOLD
4769 #define SQR_TOOM3_THRESHOLD sqr_toom3_threshold
4770 extern mp_size_t sqr_toom3_threshold;
4772 #undef SQR_TOOM4_THRESHOLD
4773 #define SQR_TOOM4_THRESHOLD sqr_toom4_threshold
4774 extern mp_size_t sqr_toom4_threshold;
4776 #undef SQR_TOOM6_THRESHOLD
4777 #define SQR_TOOM6_THRESHOLD sqr_toom6_threshold
4778 extern mp_size_t sqr_toom6_threshold;
4780 #undef SQR_TOOM8_THRESHOLD
4781 #define SQR_TOOM8_THRESHOLD sqr_toom8_threshold
4782 extern mp_size_t sqr_toom8_threshold;
4784 #undef SQR_FFT_THRESHOLD
4785 #define SQR_FFT_THRESHOLD sqr_fft_threshold
4786 extern mp_size_t sqr_fft_threshold;
4788 #undef SQR_FFT_MODF_THRESHOLD
4789 #define SQR_FFT_MODF_THRESHOLD sqr_fft_modf_threshold
4790 extern mp_size_t sqr_fft_modf_threshold;
4792 #undef SQR_FFT_TABLE
4793 #define SQR_FFT_TABLE { 0 }
4795 #undef SQR_FFT_TABLE3
4796 #define SQR_FFT_TABLE3 { {0,0} }
4798 #undef MULLO_BASECASE_THRESHOLD
4799 #define MULLO_BASECASE_THRESHOLD mullo_basecase_threshold
4800 extern mp_size_t mullo_basecase_threshold;
4802 #undef MULLO_DC_THRESHOLD
4803 #define MULLO_DC_THRESHOLD mullo_dc_threshold
4804 extern mp_size_t mullo_dc_threshold;
4806 #undef MULLO_MUL_N_THRESHOLD
4807 #define MULLO_MUL_N_THRESHOLD mullo_mul_n_threshold
4808 extern mp_size_t mullo_mul_n_threshold;
4810 #undef SQRLO_BASECASE_THRESHOLD
4811 #define SQRLO_BASECASE_THRESHOLD sqrlo_basecase_threshold
4812 extern mp_size_t sqrlo_basecase_threshold;
4814 #undef SQRLO_DC_THRESHOLD
4815 #define SQRLO_DC_THRESHOLD sqrlo_dc_threshold
4816 extern mp_size_t sqrlo_dc_threshold;
4818 #undef SQRLO_SQR_THRESHOLD
4819 #define SQRLO_SQR_THRESHOLD sqrlo_sqr_threshold
4820 extern mp_size_t sqrlo_sqr_threshold;
4822 #undef MULMID_TOOM42_THRESHOLD
4823 #define MULMID_TOOM42_THRESHOLD mulmid_toom42_threshold
4824 extern mp_size_t mulmid_toom42_threshold;
4826 #undef DIV_QR_2_PI2_THRESHOLD
4827 #define DIV_QR_2_PI2_THRESHOLD div_qr_2_pi2_threshold
4828 extern mp_size_t div_qr_2_pi2_threshold;
4830 #undef DC_DIV_QR_THRESHOLD
4831 #define DC_DIV_QR_THRESHOLD dc_div_qr_threshold
4832 extern mp_size_t dc_div_qr_threshold;
4834 #undef DC_DIVAPPR_Q_THRESHOLD
4835 #define DC_DIVAPPR_Q_THRESHOLD dc_divappr_q_threshold
4836 extern mp_size_t dc_divappr_q_threshold;
4838 #undef DC_BDIV_Q_THRESHOLD
4839 #define DC_BDIV_Q_THRESHOLD dc_bdiv_q_threshold
4840 extern mp_size_t dc_bdiv_q_threshold;
4842 #undef DC_BDIV_QR_THRESHOLD
4843 #define DC_BDIV_QR_THRESHOLD dc_bdiv_qr_threshold
4844 extern mp_size_t dc_bdiv_qr_threshold;
4846 #undef MU_DIV_QR_THRESHOLD
4847 #define MU_DIV_QR_THRESHOLD mu_div_qr_threshold
4848 extern mp_size_t mu_div_qr_threshold;
4850 #undef MU_DIVAPPR_Q_THRESHOLD
4851 #define MU_DIVAPPR_Q_THRESHOLD mu_divappr_q_threshold
4852 extern mp_size_t mu_divappr_q_threshold;
4854 #undef MUPI_DIV_QR_THRESHOLD
4855 #define MUPI_DIV_QR_THRESHOLD mupi_div_qr_threshold
4856 extern mp_size_t mupi_div_qr_threshold;
4858 #undef MU_BDIV_QR_THRESHOLD
4859 #define MU_BDIV_QR_THRESHOLD mu_bdiv_qr_threshold
4860 extern mp_size_t mu_bdiv_qr_threshold;
4862 #undef MU_BDIV_Q_THRESHOLD
4863 #define MU_BDIV_Q_THRESHOLD mu_bdiv_q_threshold
4864 extern mp_size_t mu_bdiv_q_threshold;
4866 #undef INV_MULMOD_BNM1_THRESHOLD
4867 #define INV_MULMOD_BNM1_THRESHOLD inv_mulmod_bnm1_threshold
4868 extern mp_size_t inv_mulmod_bnm1_threshold;
4870 #undef INV_NEWTON_THRESHOLD
4871 #define INV_NEWTON_THRESHOLD inv_newton_threshold
4872 extern mp_size_t inv_newton_threshold;
4874 #undef INV_APPR_THRESHOLD
4875 #define INV_APPR_THRESHOLD inv_appr_threshold
4876 extern mp_size_t inv_appr_threshold;
4878 #undef BINV_NEWTON_THRESHOLD
4879 #define BINV_NEWTON_THRESHOLD binv_newton_threshold
4880 extern mp_size_t binv_newton_threshold;
4882 #undef REDC_1_TO_REDC_2_THRESHOLD
4883 #define REDC_1_TO_REDC_2_THRESHOLD redc_1_to_redc_2_threshold
4884 extern mp_size_t redc_1_to_redc_2_threshold;
4886 #undef REDC_2_TO_REDC_N_THRESHOLD
4887 #define REDC_2_TO_REDC_N_THRESHOLD redc_2_to_redc_n_threshold
4888 extern mp_size_t redc_2_to_redc_n_threshold;
4890 #undef REDC_1_TO_REDC_N_THRESHOLD
4891 #define REDC_1_TO_REDC_N_THRESHOLD redc_1_to_redc_n_threshold
4892 extern mp_size_t redc_1_to_redc_n_threshold;
4894 #undef MATRIX22_STRASSEN_THRESHOLD
4895 #define MATRIX22_STRASSEN_THRESHOLD matrix22_strassen_threshold
4896 extern mp_size_t matrix22_strassen_threshold;
4898 #undef HGCD_THRESHOLD
4899 #define HGCD_THRESHOLD hgcd_threshold
4900 extern mp_size_t hgcd_threshold;
4902 #undef HGCD_APPR_THRESHOLD
4903 #define HGCD_APPR_THRESHOLD hgcd_appr_threshold
4904 extern mp_size_t hgcd_appr_threshold;
4906 #undef HGCD_REDUCE_THRESHOLD
4907 #define HGCD_REDUCE_THRESHOLD hgcd_reduce_threshold
4908 extern mp_size_t hgcd_reduce_threshold;
4910 #undef GCD_DC_THRESHOLD
4911 #define GCD_DC_THRESHOLD gcd_dc_threshold
4912 extern mp_size_t gcd_dc_threshold;
4914 #undef GCDEXT_DC_THRESHOLD
4915 #define GCDEXT_DC_THRESHOLD gcdext_dc_threshold
4916 extern mp_size_t gcdext_dc_threshold;
4918 #undef DIV_QR_1N_PI1_METHOD
4919 #define DIV_QR_1N_PI1_METHOD div_qr_1n_pi1_method
4920 extern int div_qr_1n_pi1_method;
4922 #undef DIV_QR_1_NORM_THRESHOLD
4923 #define DIV_QR_1_NORM_THRESHOLD div_qr_1_norm_threshold
4924 extern mp_size_t div_qr_1_norm_threshold;
4926 #undef DIV_QR_1_UNNORM_THRESHOLD
4927 #define DIV_QR_1_UNNORM_THRESHOLD div_qr_1_unnorm_threshold
4928 extern mp_size_t div_qr_1_unnorm_threshold;
4930 #undef DIVREM_1_NORM_THRESHOLD
4931 #define DIVREM_1_NORM_THRESHOLD divrem_1_norm_threshold
4932 extern mp_size_t divrem_1_norm_threshold;
4934 #undef DIVREM_1_UNNORM_THRESHOLD
4935 #define DIVREM_1_UNNORM_THRESHOLD divrem_1_unnorm_threshold
4936 extern mp_size_t divrem_1_unnorm_threshold;
4938 #undef MOD_1_NORM_THRESHOLD
4939 #define MOD_1_NORM_THRESHOLD mod_1_norm_threshold
4940 extern mp_size_t mod_1_norm_threshold;
4942 #undef MOD_1_UNNORM_THRESHOLD
4943 #define MOD_1_UNNORM_THRESHOLD mod_1_unnorm_threshold
4944 extern mp_size_t mod_1_unnorm_threshold;
4946 #undef MOD_1_1P_METHOD
4947 #define MOD_1_1P_METHOD mod_1_1p_method
4948 extern int mod_1_1p_method;
4950 #undef MOD_1N_TO_MOD_1_1_THRESHOLD
4951 #define MOD_1N_TO_MOD_1_1_THRESHOLD mod_1n_to_mod_1_1_threshold
4952 extern mp_size_t mod_1n_to_mod_1_1_threshold;
4954 #undef MOD_1U_TO_MOD_1_1_THRESHOLD
4955 #define MOD_1U_TO_MOD_1_1_THRESHOLD mod_1u_to_mod_1_1_threshold
4956 extern mp_size_t mod_1u_to_mod_1_1_threshold;
4958 #undef MOD_1_1_TO_MOD_1_2_THRESHOLD
4959 #define MOD_1_1_TO_MOD_1_2_THRESHOLD mod_1_1_to_mod_1_2_threshold
4960 extern mp_size_t mod_1_1_to_mod_1_2_threshold;
4962 #undef MOD_1_2_TO_MOD_1_4_THRESHOLD
4963 #define MOD_1_2_TO_MOD_1_4_THRESHOLD mod_1_2_to_mod_1_4_threshold
4964 extern mp_size_t mod_1_2_to_mod_1_4_threshold;
4966 #undef PREINV_MOD_1_TO_MOD_1_THRESHOLD
4967 #define PREINV_MOD_1_TO_MOD_1_THRESHOLD preinv_mod_1_to_mod_1_threshold
4968 extern mp_size_t preinv_mod_1_to_mod_1_threshold;
4970 #if ! UDIV_PREINV_ALWAYS
4971 #undef DIVREM_2_THRESHOLD
4972 #define DIVREM_2_THRESHOLD divrem_2_threshold
4973 extern mp_size_t divrem_2_threshold;
4974 #endif
4976 #undef MULMOD_BNM1_THRESHOLD
4977 #define MULMOD_BNM1_THRESHOLD mulmod_bnm1_threshold
4978 extern mp_size_t mulmod_bnm1_threshold;
4980 #undef SQRMOD_BNM1_THRESHOLD
4981 #define SQRMOD_BNM1_THRESHOLD sqrmod_bnm1_threshold
4982 extern mp_size_t sqrmod_bnm1_threshold;
4984 #undef GET_STR_DC_THRESHOLD
4985 #define GET_STR_DC_THRESHOLD get_str_dc_threshold
4986 extern mp_size_t get_str_dc_threshold;
4988 #undef GET_STR_PRECOMPUTE_THRESHOLD
4989 #define GET_STR_PRECOMPUTE_THRESHOLD get_str_precompute_threshold
4990 extern mp_size_t get_str_precompute_threshold;
4992 #undef SET_STR_DC_THRESHOLD
4993 #define SET_STR_DC_THRESHOLD set_str_dc_threshold
4994 extern mp_size_t set_str_dc_threshold;
4996 #undef SET_STR_PRECOMPUTE_THRESHOLD
4997 #define SET_STR_PRECOMPUTE_THRESHOLD set_str_precompute_threshold
4998 extern mp_size_t set_str_precompute_threshold;
5000 #undef FAC_ODD_THRESHOLD
5001 #define FAC_ODD_THRESHOLD fac_odd_threshold
5002 extern mp_size_t fac_odd_threshold;
5004 #undef FAC_DSC_THRESHOLD
5005 #define FAC_DSC_THRESHOLD fac_dsc_threshold
5006 extern mp_size_t fac_dsc_threshold;
5008 #undef FFT_TABLE_ATTRS
5009 #define FFT_TABLE_ATTRS
5010 extern mp_size_t mpn_fft_table[2][MPN_FFT_TABLE_SIZE];
5011 #define FFT_TABLE3_SIZE 2000 /* generous space for tuning */
5012 extern struct fft_table_nk mpn_fft_table3[2][FFT_TABLE3_SIZE];
5014 /* Sizes the tune program tests up to, used in a couple of recompilations. */
5015 #undef MUL_TOOM22_THRESHOLD_LIMIT
5016 #undef MUL_TOOM33_THRESHOLD_LIMIT
5017 #undef MULLO_BASECASE_THRESHOLD_LIMIT
5018 #undef SQRLO_BASECASE_THRESHOLD_LIMIT
5019 #undef SQRLO_DC_THRESHOLD_LIMIT
5020 #undef SQR_TOOM3_THRESHOLD_LIMIT
5021 #define SQR_TOOM2_MAX_GENERIC 200
5022 #define MUL_TOOM22_THRESHOLD_LIMIT 700
5023 #define MUL_TOOM33_THRESHOLD_LIMIT 700
5024 #define SQR_TOOM3_THRESHOLD_LIMIT 400
5025 #define MUL_TOOM44_THRESHOLD_LIMIT 1000
5026 #define SQR_TOOM4_THRESHOLD_LIMIT 1000
5027 #define MUL_TOOM6H_THRESHOLD_LIMIT 1100
5028 #define SQR_TOOM6_THRESHOLD_LIMIT 1100
5029 #define MUL_TOOM8H_THRESHOLD_LIMIT 1200
5030 #define SQR_TOOM8_THRESHOLD_LIMIT 1200
5031 #define MULLO_BASECASE_THRESHOLD_LIMIT 200
5032 #define SQRLO_BASECASE_THRESHOLD_LIMIT 200
5033 #define SQRLO_DC_THRESHOLD_LIMIT 400
5034 #define GET_STR_THRESHOLD_LIMIT 150
5035 #define FAC_DSC_THRESHOLD_LIMIT 2048
5037 #endif /* TUNE_PROGRAM_BUILD */
5039 #if defined (__cplusplus)
5041 #endif
5043 /* FIXME: Make these itch functions less conservative. Also consider making
5044 them dependent on just 'an', and compute the allocation directly from 'an'
5045 instead of via n. */
5047 /* toom22/toom2: Scratch need is 2*(an + k), k is the recursion depth.
5048 k is ths smallest k such that
5049 ceil(an/2^k) < MUL_TOOM22_THRESHOLD.
5050 which implies that
5051 k = bitsize of floor ((an-1)/(MUL_TOOM22_THRESHOLD-1))
5052 = 1 + floor (log_2 (floor ((an-1)/(MUL_TOOM22_THRESHOLD-1))))
5054 #define mpn_toom22_mul_itch(an, bn) \
5055 (2 * ((an) + GMP_NUMB_BITS))
5056 #define mpn_toom2_sqr_itch(an) \
5057 (2 * ((an) + GMP_NUMB_BITS))
5059 /* toom33/toom3: Scratch need is 5an/2 + 10k, k is the recursion depth.
5060 We use 3an + C, so that we can use a smaller constant.
5062 #define mpn_toom33_mul_itch(an, bn) \
5063 (3 * (an) + GMP_NUMB_BITS)
5064 #define mpn_toom3_sqr_itch(an) \
5065 (3 * (an) + GMP_NUMB_BITS)
5067 /* toom33/toom3: Scratch need is 8an/3 + 13k, k is the recursion depth.
5068 We use 3an + C, so that we can use a smaller constant.
5070 #define mpn_toom44_mul_itch(an, bn) \
5071 (3 * (an) + GMP_NUMB_BITS)
5072 #define mpn_toom4_sqr_itch(an) \
5073 (3 * (an) + GMP_NUMB_BITS)
5075 #define mpn_toom6_sqr_itch(n) \
5076 (((n) - SQR_TOOM6_THRESHOLD)*2 + \
5077 MAX(SQR_TOOM6_THRESHOLD*2 + GMP_NUMB_BITS*6, \
5078 mpn_toom4_sqr_itch(SQR_TOOM6_THRESHOLD)))
5080 #define MUL_TOOM6H_MIN \
5081 ((MUL_TOOM6H_THRESHOLD > MUL_TOOM44_THRESHOLD) ? \
5082 MUL_TOOM6H_THRESHOLD : MUL_TOOM44_THRESHOLD)
5083 #define mpn_toom6_mul_n_itch(n) \
5084 (((n) - MUL_TOOM6H_MIN)*2 + \
5085 MAX(MUL_TOOM6H_MIN*2 + GMP_NUMB_BITS*6, \
5086 mpn_toom44_mul_itch(MUL_TOOM6H_MIN,MUL_TOOM6H_MIN)))
5088 static inline mp_size_t
5089 mpn_toom6h_mul_itch (mp_size_t an, mp_size_t bn) {
5090 mp_size_t estimatedN;
5091 estimatedN = (an + bn) / (size_t) 10 + 1;
5092 return mpn_toom6_mul_n_itch (estimatedN * 6);
5095 #define mpn_toom8_sqr_itch(n) \
5096 ((((n)*15)>>3) - ((SQR_TOOM8_THRESHOLD*15)>>3) + \
5097 MAX(((SQR_TOOM8_THRESHOLD*15)>>3) + GMP_NUMB_BITS*6, \
5098 mpn_toom6_sqr_itch(SQR_TOOM8_THRESHOLD)))
5100 #define MUL_TOOM8H_MIN \
5101 ((MUL_TOOM8H_THRESHOLD > MUL_TOOM6H_MIN) ? \
5102 MUL_TOOM8H_THRESHOLD : MUL_TOOM6H_MIN)
5103 #define mpn_toom8_mul_n_itch(n) \
5104 ((((n)*15)>>3) - ((MUL_TOOM8H_MIN*15)>>3) + \
5105 MAX(((MUL_TOOM8H_MIN*15)>>3) + GMP_NUMB_BITS*6, \
5106 mpn_toom6_mul_n_itch(MUL_TOOM8H_MIN)))
5108 static inline mp_size_t
5109 mpn_toom8h_mul_itch (mp_size_t an, mp_size_t bn) {
5110 mp_size_t estimatedN;
5111 estimatedN = (an + bn) / (size_t) 14 + 1;
5112 return mpn_toom8_mul_n_itch (estimatedN * 8);
5115 static inline mp_size_t
5116 mpn_toom32_mul_itch (mp_size_t an, mp_size_t bn)
5118 mp_size_t n = 1 + (2 * an >= 3 * bn ? (an - 1) / (size_t) 3 : (bn - 1) >> 1);
5119 mp_size_t itch = 2 * n + 1;
5121 return itch;
5124 static inline mp_size_t
5125 mpn_toom42_mul_itch (mp_size_t an, mp_size_t bn)
5127 mp_size_t n = an >= 2 * bn ? (an + 3) >> 2 : (bn + 1) >> 1;
5128 return 6 * n + 3;
5131 static inline mp_size_t
5132 mpn_toom43_mul_itch (mp_size_t an, mp_size_t bn)
5134 mp_size_t n = 1 + (3 * an >= 4 * bn ? (an - 1) >> 2 : (bn - 1) / (size_t) 3);
5136 return 6*n + 4;
5139 static inline mp_size_t
5140 mpn_toom52_mul_itch (mp_size_t an, mp_size_t bn)
5142 mp_size_t n = 1 + (2 * an >= 5 * bn ? (an - 1) / (size_t) 5 : (bn - 1) >> 1);
5143 return 6*n + 4;
5146 static inline mp_size_t
5147 mpn_toom53_mul_itch (mp_size_t an, mp_size_t bn)
5149 mp_size_t n = 1 + (3 * an >= 5 * bn ? (an - 1) / (size_t) 5 : (bn - 1) / (size_t) 3);
5150 return 10 * n + 10;
5153 static inline mp_size_t
5154 mpn_toom62_mul_itch (mp_size_t an, mp_size_t bn)
5156 mp_size_t n = 1 + (an >= 3 * bn ? (an - 1) / (size_t) 6 : (bn - 1) >> 1);
5157 return 10 * n + 10;
5160 static inline mp_size_t
5161 mpn_toom63_mul_itch (mp_size_t an, mp_size_t bn)
5163 mp_size_t n = 1 + (an >= 2 * bn ? (an - 1) / (size_t) 6 : (bn - 1) / (size_t) 3);
5164 return 9 * n + 3;
5167 static inline mp_size_t
5168 mpn_toom54_mul_itch (mp_size_t an, mp_size_t bn)
5170 mp_size_t n = 1 + (4 * an >= 5 * bn ? (an - 1) / (size_t) 5 : (bn - 1) / (size_t) 4);
5171 return 9 * n + 3;
5174 /* let S(n) = space required for input size n,
5175 then S(n) = 3 floor(n/2) + 1 + S(floor(n/2)). */
5176 #define mpn_toom42_mulmid_itch(n) \
5177 (3 * (n) + GMP_NUMB_BITS)
5179 #if 0
5180 #define mpn_fft_mul mpn_mul_fft_full
5181 #else
5182 #define mpn_fft_mul mpn_nussbaumer_mul
5183 #endif
5185 #ifdef __cplusplus
5187 /* A little helper for a null-terminated __gmp_allocate_func string.
5188 The destructor ensures it's freed even if an exception is thrown.
5189 The len field is needed by the destructor, and can be used by anyone else
5190 to avoid a second strlen pass over the data.
5192 Since our input is a C string, using strlen is correct. Perhaps it'd be
5193 more C++-ish style to use std::char_traits<char>::length, but char_traits
5194 isn't available in gcc 2.95.4. */
5196 class gmp_allocated_string {
5197 public:
5198 char *str;
5199 size_t len;
5200 gmp_allocated_string(char *arg)
5202 str = arg;
5203 len = std::strlen (str);
5205 ~gmp_allocated_string()
5207 (*__gmp_free_func) (str, len+1);
5211 std::istream &__gmpz_operator_in_nowhite (std::istream &, mpz_ptr, char);
5212 int __gmp_istream_set_base (std::istream &, char &, bool &, bool &);
5213 void __gmp_istream_set_digits (std::string &, std::istream &, char &, bool &, int);
5214 void __gmp_doprnt_params_from_ios (struct doprnt_params_t *, std::ios &);
5215 std::ostream& __gmp_doprnt_integer_ostream (std::ostream &, struct doprnt_params_t *, char *);
5216 extern const struct doprnt_funs_t __gmp_asprintf_funs_noformat;
5218 #endif /* __cplusplus */
5220 #endif /* __GMP_IMPL_H__ */