don't compare ARG_FRAME_POINTER_REGNUM and FRAME_POINTER_REGNUM with the preprocessor
[official-gcc.git] / gcc / wide-int.cc
blob1a7fc1435d0ac80dfabd45f3ad0321b8b060d9e9
1 /* Operations with very long integers.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hwint.h"
26 #include "wide-int.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "inchash.h"
35 #include "tree.h"
36 #include "dumpfile.h"
39 #define HOST_BITS_PER_HALF_WIDE_INT 32
40 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
41 # define HOST_HALF_WIDE_INT long
42 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
43 # define HOST_HALF_WIDE_INT int
44 #else
45 #error Please add support for HOST_HALF_WIDE_INT
46 #endif
48 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
49 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
50 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
51 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
52 typedef unsigned HOST_WIDE_INT UWtype;
53 typedef unsigned int UQItype __attribute__ ((mode (QI)));
54 typedef unsigned int USItype __attribute__ ((mode (SI)));
55 typedef unsigned int UDItype __attribute__ ((mode (DI)));
56 #if W_TYPE_SIZE == 32
57 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
58 #else
59 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
60 #endif
61 #include "longlong.h"
62 #endif
64 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
67 * Internal utilities.
70 /* Quantities to deal with values that hold half of a wide int. Used
71 in multiply and divide. */
72 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
74 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
75 #define BLOCKS_NEEDED(PREC) \
76 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
77 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
79 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
80 based on the top existing bit of VAL. */
82 static unsigned HOST_WIDE_INT
83 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
85 return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
88 /* Convert the integer in VAL to canonical form, returning its new length.
89 LEN is the number of blocks currently in VAL and PRECISION is the number
90 of bits in the integer it represents.
92 This function only changes the representation, not the value. */
93 static unsigned int
94 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
96 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
97 HOST_WIDE_INT top;
98 int i;
100 if (len > blocks_needed)
101 len = blocks_needed;
103 if (len == 1)
104 return len;
106 top = val[len - 1];
107 if (len * HOST_BITS_PER_WIDE_INT > precision)
108 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
109 if (top != 0 && top != (HOST_WIDE_INT)-1)
110 return len;
112 /* At this point we know that the top is either 0 or -1. Find the
113 first block that is not a copy of this. */
114 for (i = len - 2; i >= 0; i--)
116 HOST_WIDE_INT x = val[i];
117 if (x != top)
119 if (SIGN_MASK (x) == top)
120 return i + 1;
122 /* We need an extra block because the top bit block i does
123 not match the extension. */
124 return i + 2;
128 /* The number is 0 or -1. */
129 return 1;
133 * Conversion routines in and out of wide_int.
136 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
137 result for an integer with precision PRECISION. Return the length
138 of VAL (after any canonization. */
139 unsigned int
140 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
141 unsigned int xlen, unsigned int precision, bool need_canon)
143 for (unsigned i = 0; i < xlen; i++)
144 val[i] = xval[i];
145 return need_canon ? canonize (val, xlen, precision) : xlen;
148 /* Construct a wide int from a buffer of length LEN. BUFFER will be
149 read according to byte endianess and word endianess of the target.
150 Only the lower BUFFER_LEN bytes of the result are set; the remaining
151 high bytes are cleared. */
152 wide_int
153 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
155 unsigned int precision = buffer_len * BITS_PER_UNIT;
156 wide_int result = wide_int::create (precision);
157 unsigned int words = buffer_len / UNITS_PER_WORD;
159 /* We have to clear all the bits ourself, as we merely or in values
160 below. */
161 unsigned int len = BLOCKS_NEEDED (precision);
162 HOST_WIDE_INT *val = result.write_val ();
163 for (unsigned int i = 0; i < len; ++i)
164 val[i] = 0;
166 for (unsigned int byte = 0; byte < buffer_len; byte++)
168 unsigned int offset;
169 unsigned int index;
170 unsigned int bitpos = byte * BITS_PER_UNIT;
171 unsigned HOST_WIDE_INT value;
173 if (buffer_len > UNITS_PER_WORD)
175 unsigned int word = byte / UNITS_PER_WORD;
177 if (WORDS_BIG_ENDIAN)
178 word = (words - 1) - word;
180 offset = word * UNITS_PER_WORD;
182 if (BYTES_BIG_ENDIAN)
183 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
184 else
185 offset += byte % UNITS_PER_WORD;
187 else
188 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
190 value = (unsigned HOST_WIDE_INT) buffer[offset];
192 index = bitpos / HOST_BITS_PER_WIDE_INT;
193 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
196 result.set_len (canonize (val, len, precision));
198 return result;
201 /* Sets RESULT from X, the sign is taken according to SGN. */
202 void
203 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
205 int len = x.get_len ();
206 const HOST_WIDE_INT *v = x.get_val ();
207 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
209 if (wi::neg_p (x, sgn))
211 /* We use ones complement to avoid -x80..0 edge case that -
212 won't work on. */
213 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
214 for (int i = 0; i < len; i++)
215 t[i] = ~v[i];
216 if (excess > 0)
217 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
218 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
219 mpz_com (result, result);
221 else if (excess > 0)
223 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
224 for (int i = 0; i < len - 1; i++)
225 t[i] = v[i];
226 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
227 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
229 else
230 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
233 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
234 values of VAL will be wrapped; otherwise, they will be set to the
235 appropriate minimum or maximum TYPE bound. */
236 wide_int
237 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
239 size_t count, numb;
240 unsigned int prec = TYPE_PRECISION (type);
241 wide_int res = wide_int::create (prec);
243 if (!wrap)
245 mpz_t min, max;
247 mpz_init (min);
248 mpz_init (max);
249 get_type_static_bounds (type, min, max);
251 if (mpz_cmp (x, min) < 0)
252 mpz_set (x, min);
253 else if (mpz_cmp (x, max) > 0)
254 mpz_set (x, max);
256 mpz_clear (min);
257 mpz_clear (max);
260 /* Determine the number of unsigned HOST_WIDE_INTs that are required
261 for representing the value. The code to calculate count is
262 extracted from the GMP manual, section "Integer Import and Export":
263 http://gmplib.org/manual/Integer-Import-and-Export.html */
264 numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
265 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
266 HOST_WIDE_INT *val = res.write_val ();
267 /* Write directly to the wide_int storage if possible, otherwise leave
268 GMP to allocate the memory for us. It might be slightly more efficient
269 to use mpz_tdiv_r_2exp for the latter case, but the situation is
270 pathological and it seems safer to operate on the original mpz value
271 in all cases. */
272 void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
273 &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
274 if (count < 1)
276 val[0] = 0;
277 count = 1;
279 count = MIN (count, BLOCKS_NEEDED (prec));
280 if (valres != val)
282 memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
283 free (valres);
285 res.set_len (canonize (val, count, prec));
287 if (mpz_sgn (x) < 0)
288 res = -res;
290 return res;
294 * Largest and smallest values in a mode.
297 /* Return the largest SGNed number that is representable in PRECISION bits.
299 TODO: There is still code from the double_int era that trys to
300 make up for the fact that double int's could not represent the
301 min and max values of all types. This code should be removed
302 because the min and max values can always be represented in
303 wide_ints and int-csts. */
304 wide_int
305 wi::max_value (unsigned int precision, signop sgn)
307 gcc_checking_assert (precision != 0);
308 if (sgn == UNSIGNED)
309 /* The unsigned max is just all ones. */
310 return shwi (-1, precision);
311 else
312 /* The signed max is all ones except the top bit. This must be
313 explicitly represented. */
314 return mask (precision - 1, false, precision);
317 /* Return the largest SGNed number that is representable in PRECISION bits. */
318 wide_int
319 wi::min_value (unsigned int precision, signop sgn)
321 gcc_checking_assert (precision != 0);
322 if (sgn == UNSIGNED)
323 return uhwi (0, precision);
324 else
325 /* The signed min is all zeros except the top bit. This must be
326 explicitly represented. */
327 return wi::set_bit_in_zero (precision - 1, precision);
331 * Public utilities.
334 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
335 signedness SGN, to an integer that has PRECISION bits. Store the blocks
336 in VAL and return the number of blocks used.
338 This function can handle both extension (PRECISION > XPRECISION)
339 and truncation (PRECISION < XPRECISION). */
340 unsigned int
341 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
342 unsigned int xlen, unsigned int xprecision,
343 unsigned int precision, signop sgn)
345 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
346 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
347 for (unsigned i = 0; i < len; i++)
348 val[i] = xval[i];
350 if (precision > xprecision)
352 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
354 /* Expanding. */
355 if (sgn == UNSIGNED)
357 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
358 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
359 else if (val[len - 1] < 0)
361 while (len < BLOCKS_NEEDED (xprecision))
362 val[len++] = -1;
363 if (small_xprecision)
364 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
365 else
366 val[len++] = 0;
369 else
371 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
372 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
375 len = canonize (val, len, precision);
377 return len;
380 /* This function hides the fact that we cannot rely on the bits beyond
381 the precision. This issue comes up in the relational comparisions
382 where we do allow comparisons of values of different precisions. */
383 static inline HOST_WIDE_INT
384 selt (const HOST_WIDE_INT *a, unsigned int len,
385 unsigned int blocks_needed, unsigned int small_prec,
386 unsigned int index, signop sgn)
388 HOST_WIDE_INT val;
389 if (index < len)
390 val = a[index];
391 else if (index < blocks_needed || sgn == SIGNED)
392 /* Signed or within the precision. */
393 val = SIGN_MASK (a[len - 1]);
394 else
395 /* Unsigned extension beyond the precision. */
396 val = 0;
398 if (small_prec && index == blocks_needed - 1)
399 return (sgn == SIGNED
400 ? sext_hwi (val, small_prec)
401 : zext_hwi (val, small_prec));
402 else
403 return val;
406 /* Find the highest bit represented in a wide int. This will in
407 general have the same value as the sign bit. */
408 static inline HOST_WIDE_INT
409 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
411 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
412 unsigned HOST_WIDE_INT val = a[len - 1];
413 if (excess > 0)
414 val <<= excess;
415 return val >> (HOST_BITS_PER_WIDE_INT - 1);
419 * Comparisons, note that only equality is an operator. The other
420 * comparisons cannot be operators since they are inherently signed or
421 * unsigned and C++ has no such operators.
424 /* Return true if OP0 == OP1. */
425 bool
426 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
427 const HOST_WIDE_INT *op1, unsigned int op1len,
428 unsigned int prec)
430 int l0 = op0len - 1;
431 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
433 if (op0len != op1len)
434 return false;
436 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
438 /* It does not matter if we zext or sext here, we just have to
439 do both the same way. */
440 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
441 return false;
442 l0--;
445 while (l0 >= 0)
446 if (op0[l0] != op1[l0])
447 return false;
448 else
449 l0--;
451 return true;
454 /* Return true if OP0 < OP1 using signed comparisons. */
455 bool
456 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
457 unsigned int precision,
458 const HOST_WIDE_INT *op1, unsigned int op1len)
460 HOST_WIDE_INT s0, s1;
461 unsigned HOST_WIDE_INT u0, u1;
462 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
463 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
464 int l = MAX (op0len - 1, op1len - 1);
466 /* Only the top block is compared as signed. The rest are unsigned
467 comparisons. */
468 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
469 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
470 if (s0 < s1)
471 return true;
472 if (s0 > s1)
473 return false;
475 l--;
476 while (l >= 0)
478 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
479 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
481 if (u0 < u1)
482 return true;
483 if (u0 > u1)
484 return false;
485 l--;
488 return false;
491 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
492 signed compares. */
494 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
495 unsigned int precision,
496 const HOST_WIDE_INT *op1, unsigned int op1len)
498 HOST_WIDE_INT s0, s1;
499 unsigned HOST_WIDE_INT u0, u1;
500 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
501 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
502 int l = MAX (op0len - 1, op1len - 1);
504 /* Only the top block is compared as signed. The rest are unsigned
505 comparisons. */
506 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
507 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
508 if (s0 < s1)
509 return -1;
510 if (s0 > s1)
511 return 1;
513 l--;
514 while (l >= 0)
516 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
517 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
519 if (u0 < u1)
520 return -1;
521 if (u0 > u1)
522 return 1;
523 l--;
526 return 0;
529 /* Return true if OP0 < OP1 using unsigned comparisons. */
530 bool
531 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
532 unsigned int precision,
533 const HOST_WIDE_INT *op1, unsigned int op1len)
535 unsigned HOST_WIDE_INT x0;
536 unsigned HOST_WIDE_INT x1;
537 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
538 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
539 int l = MAX (op0len - 1, op1len - 1);
541 while (l >= 0)
543 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
544 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
545 if (x0 < x1)
546 return true;
547 if (x0 > x1)
548 return false;
549 l--;
552 return false;
555 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
556 unsigned compares. */
558 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
559 unsigned int precision,
560 const HOST_WIDE_INT *op1, unsigned int op1len)
562 unsigned HOST_WIDE_INT x0;
563 unsigned HOST_WIDE_INT x1;
564 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
565 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
566 int l = MAX (op0len - 1, op1len - 1);
568 while (l >= 0)
570 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
571 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
572 if (x0 < x1)
573 return -1;
574 if (x0 > x1)
575 return 1;
576 l--;
579 return 0;
583 * Extension.
586 /* Sign-extend the number represented by XVAL and XLEN into VAL,
587 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
588 and VAL have PRECISION bits. */
589 unsigned int
590 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
591 unsigned int xlen, unsigned int precision, unsigned int offset)
593 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
594 /* Extending beyond the precision is a no-op. If we have only stored
595 OFFSET bits or fewer, the rest are already signs. */
596 if (offset >= precision || len >= xlen)
598 for (unsigned i = 0; i < xlen; ++i)
599 val[i] = xval[i];
600 return xlen;
602 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
603 for (unsigned int i = 0; i < len; i++)
604 val[i] = xval[i];
605 if (suboffset > 0)
607 val[len] = sext_hwi (xval[len], suboffset);
608 len += 1;
610 return canonize (val, len, precision);
613 /* Zero-extend the number represented by XVAL and XLEN into VAL,
614 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
615 and VAL have PRECISION bits. */
616 unsigned int
617 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
618 unsigned int xlen, unsigned int precision, unsigned int offset)
620 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
621 /* Extending beyond the precision is a no-op. If we have only stored
622 OFFSET bits or fewer, and the upper stored bit is zero, then there
623 is nothing to do. */
624 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
626 for (unsigned i = 0; i < xlen; ++i)
627 val[i] = xval[i];
628 return xlen;
630 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
631 for (unsigned int i = 0; i < len; i++)
632 val[i] = i < xlen ? xval[i] : -1;
633 if (suboffset > 0)
634 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
635 else
636 val[len] = 0;
637 return canonize (val, len + 1, precision);
641 * Masking, inserting, shifting, rotating.
644 /* Insert WIDTH bits from Y into X starting at START. */
645 wide_int
646 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
647 unsigned int width)
649 wide_int result;
650 wide_int mask;
651 wide_int tmp;
653 unsigned int precision = x.get_precision ();
654 if (start >= precision)
655 return x;
657 gcc_checking_assert (precision >= width);
659 if (start + width >= precision)
660 width = precision - start;
662 mask = wi::shifted_mask (start, width, false, precision);
663 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
664 result = tmp & mask;
666 tmp = wi::bit_and_not (x, mask);
667 result = result | tmp;
669 return result;
672 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
673 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
674 bits. */
675 unsigned int
676 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
677 unsigned int xlen, unsigned int precision, unsigned int bit)
679 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
680 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
682 if (block + 1 >= xlen)
684 /* The operation either affects the last current block or needs
685 a new block. */
686 unsigned int len = block + 1;
687 for (unsigned int i = 0; i < len; i++)
688 val[i] = safe_uhwi (xval, xlen, i);
689 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
691 /* If the bit we just set is at the msb of the block, make sure
692 that any higher bits are zeros. */
693 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
694 val[len++] = 0;
695 return len;
697 else
699 for (unsigned int i = 0; i < xlen; i++)
700 val[i] = xval[i];
701 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
702 return canonize (val, xlen, precision);
706 /* bswap THIS. */
707 wide_int
708 wide_int_storage::bswap () const
710 wide_int result = wide_int::create (precision);
711 unsigned int i, s;
712 unsigned int len = BLOCKS_NEEDED (precision);
713 unsigned int xlen = get_len ();
714 const HOST_WIDE_INT *xval = get_val ();
715 HOST_WIDE_INT *val = result.write_val ();
717 /* This is not a well defined operation if the precision is not a
718 multiple of 8. */
719 gcc_assert ((precision & 0x7) == 0);
721 for (i = 0; i < len; i++)
722 val[i] = 0;
724 /* Only swap the bytes that are not the padding. */
725 for (s = 0; s < precision; s += 8)
727 unsigned int d = precision - s - 8;
728 unsigned HOST_WIDE_INT byte;
730 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
731 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
733 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
735 block = d / HOST_BITS_PER_WIDE_INT;
736 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
738 val[block] |= byte << offset;
741 result.set_len (canonize (val, len, precision));
742 return result;
745 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
746 above that up to PREC are zeros. The result is inverted if NEGATE
747 is true. Return the number of blocks in VAL. */
748 unsigned int
749 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
750 unsigned int prec)
752 if (width >= prec)
754 val[0] = negate ? 0 : -1;
755 return 1;
757 else if (width == 0)
759 val[0] = negate ? -1 : 0;
760 return 1;
763 unsigned int i = 0;
764 while (i < width / HOST_BITS_PER_WIDE_INT)
765 val[i++] = negate ? 0 : -1;
767 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
768 if (shift != 0)
770 HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
771 val[i++] = negate ? ~last : last;
773 else
774 val[i++] = negate ? -1 : 0;
776 return i;
779 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
780 bits are ones, and the bits above that up to PREC are zeros. The result
781 is inverted if NEGATE is true. Return the number of blocks in VAL. */
782 unsigned int
783 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
784 bool negate, unsigned int prec)
786 if (start >= prec || width == 0)
788 val[0] = negate ? -1 : 0;
789 return 1;
792 if (width > prec - start)
793 width = prec - start;
794 unsigned int end = start + width;
796 unsigned int i = 0;
797 while (i < start / HOST_BITS_PER_WIDE_INT)
798 val[i++] = negate ? -1 : 0;
800 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
801 if (shift)
803 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
804 shift += width;
805 if (shift < HOST_BITS_PER_WIDE_INT)
807 /* case 000111000 */
808 block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
809 val[i++] = negate ? ~block : block;
810 return i;
812 else
813 /* ...111000 */
814 val[i++] = negate ? block : ~block;
817 while (i < end / HOST_BITS_PER_WIDE_INT)
818 /* 1111111 */
819 val[i++] = negate ? 0 : -1;
821 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
822 if (shift != 0)
824 /* 000011111 */
825 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
826 val[i++] = negate ? ~block : block;
828 else if (end < prec)
829 val[i++] = negate ? -1 : 0;
831 return i;
835 * logical operations.
838 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
839 unsigned int
840 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
841 unsigned int op0len, const HOST_WIDE_INT *op1,
842 unsigned int op1len, unsigned int prec)
844 int l0 = op0len - 1;
845 int l1 = op1len - 1;
846 bool need_canon = true;
848 unsigned int len = MAX (op0len, op1len);
849 if (l0 > l1)
851 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
852 if (op1mask == 0)
854 l0 = l1;
855 len = l1 + 1;
857 else
859 need_canon = false;
860 while (l0 > l1)
862 val[l0] = op0[l0];
863 l0--;
867 else if (l1 > l0)
869 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
870 if (op0mask == 0)
871 len = l0 + 1;
872 else
874 need_canon = false;
875 while (l1 > l0)
877 val[l1] = op1[l1];
878 l1--;
883 while (l0 >= 0)
885 val[l0] = op0[l0] & op1[l0];
886 l0--;
889 if (need_canon)
890 len = canonize (val, len, prec);
892 return len;
895 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
896 unsigned int
897 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
898 unsigned int op0len, const HOST_WIDE_INT *op1,
899 unsigned int op1len, unsigned int prec)
901 wide_int result;
902 int l0 = op0len - 1;
903 int l1 = op1len - 1;
904 bool need_canon = true;
906 unsigned int len = MAX (op0len, op1len);
907 if (l0 > l1)
909 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
910 if (op1mask != 0)
912 l0 = l1;
913 len = l1 + 1;
915 else
917 need_canon = false;
918 while (l0 > l1)
920 val[l0] = op0[l0];
921 l0--;
925 else if (l1 > l0)
927 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
928 if (op0mask == 0)
929 len = l0 + 1;
930 else
932 need_canon = false;
933 while (l1 > l0)
935 val[l1] = ~op1[l1];
936 l1--;
941 while (l0 >= 0)
943 val[l0] = op0[l0] & ~op1[l0];
944 l0--;
947 if (need_canon)
948 len = canonize (val, len, prec);
950 return len;
953 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
954 unsigned int
955 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
956 unsigned int op0len, const HOST_WIDE_INT *op1,
957 unsigned int op1len, unsigned int prec)
959 wide_int result;
960 int l0 = op0len - 1;
961 int l1 = op1len - 1;
962 bool need_canon = true;
964 unsigned int len = MAX (op0len, op1len);
965 if (l0 > l1)
967 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
968 if (op1mask != 0)
970 l0 = l1;
971 len = l1 + 1;
973 else
975 need_canon = false;
976 while (l0 > l1)
978 val[l0] = op0[l0];
979 l0--;
983 else if (l1 > l0)
985 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
986 if (op0mask != 0)
987 len = l0 + 1;
988 else
990 need_canon = false;
991 while (l1 > l0)
993 val[l1] = op1[l1];
994 l1--;
999 while (l0 >= 0)
1001 val[l0] = op0[l0] | op1[l0];
1002 l0--;
1005 if (need_canon)
1006 len = canonize (val, len, prec);
1008 return len;
1011 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1012 unsigned int
1013 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1014 unsigned int op0len, const HOST_WIDE_INT *op1,
1015 unsigned int op1len, unsigned int prec)
1017 wide_int result;
1018 int l0 = op0len - 1;
1019 int l1 = op1len - 1;
1020 bool need_canon = true;
1022 unsigned int len = MAX (op0len, op1len);
1023 if (l0 > l1)
1025 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1026 if (op1mask == 0)
1028 l0 = l1;
1029 len = l1 + 1;
1031 else
1033 need_canon = false;
1034 while (l0 > l1)
1036 val[l0] = op0[l0];
1037 l0--;
1041 else if (l1 > l0)
1043 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1044 if (op0mask != 0)
1045 len = l0 + 1;
1046 else
1048 need_canon = false;
1049 while (l1 > l0)
1051 val[l1] = ~op1[l1];
1052 l1--;
1057 while (l0 >= 0)
1059 val[l0] = op0[l0] | ~op1[l0];
1060 l0--;
1063 if (need_canon)
1064 len = canonize (val, len, prec);
1066 return len;
1069 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1070 unsigned int
1071 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1072 unsigned int op0len, const HOST_WIDE_INT *op1,
1073 unsigned int op1len, unsigned int prec)
1075 wide_int result;
1076 int l0 = op0len - 1;
1077 int l1 = op1len - 1;
1079 unsigned int len = MAX (op0len, op1len);
1080 if (l0 > l1)
1082 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1083 while (l0 > l1)
1085 val[l0] = op0[l0] ^ op1mask;
1086 l0--;
1090 if (l1 > l0)
1092 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1093 while (l1 > l0)
1095 val[l1] = op0mask ^ op1[l1];
1096 l1--;
1100 while (l0 >= 0)
1102 val[l0] = op0[l0] ^ op1[l0];
1103 l0--;
1106 return canonize (val, len, prec);
1110 * math
1113 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1114 whether the result overflows when OP0 and OP1 are treated as having
1115 signedness SGN. Return the number of blocks in VAL. */
1116 unsigned int
1117 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1118 unsigned int op0len, const HOST_WIDE_INT *op1,
1119 unsigned int op1len, unsigned int prec,
1120 signop sgn, bool *overflow)
1122 unsigned HOST_WIDE_INT o0 = 0;
1123 unsigned HOST_WIDE_INT o1 = 0;
1124 unsigned HOST_WIDE_INT x = 0;
1125 unsigned HOST_WIDE_INT carry = 0;
1126 unsigned HOST_WIDE_INT old_carry = 0;
1127 unsigned HOST_WIDE_INT mask0, mask1;
1128 unsigned int i;
1130 unsigned int len = MAX (op0len, op1len);
1131 mask0 = -top_bit_of (op0, op0len, prec);
1132 mask1 = -top_bit_of (op1, op1len, prec);
1133 /* Add all of the explicitly defined elements. */
1135 for (i = 0; i < len; i++)
1137 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1138 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1139 x = o0 + o1 + carry;
1140 val[i] = x;
1141 old_carry = carry;
1142 carry = carry == 0 ? x < o0 : x <= o0;
1145 if (len * HOST_BITS_PER_WIDE_INT < prec)
1147 val[len] = mask0 + mask1 + carry;
1148 len++;
1149 if (overflow)
1150 *overflow = false;
1152 else if (overflow)
1154 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1155 if (sgn == SIGNED)
1157 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1158 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1160 else
1162 /* Put the MSB of X and O0 and in the top of the HWI. */
1163 x <<= shift;
1164 o0 <<= shift;
1165 if (old_carry)
1166 *overflow = (x <= o0);
1167 else
1168 *overflow = (x < o0);
1172 return canonize (val, len, prec);
1175 /* Subroutines of the multiplication and division operations. Unpack
1176 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1177 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1178 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1179 static void
1180 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1181 unsigned int in_len, unsigned int out_len,
1182 unsigned int prec, signop sgn)
1184 unsigned int i;
1185 unsigned int j = 0;
1186 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1187 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1188 HOST_WIDE_INT mask;
1190 if (sgn == SIGNED)
1192 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1193 mask &= HALF_INT_MASK;
1195 else
1196 mask = 0;
1198 for (i = 0; i < blocks_needed - 1; i++)
1200 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1201 result[j++] = x;
1202 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1205 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1206 if (small_prec)
1208 if (sgn == SIGNED)
1209 x = sext_hwi (x, small_prec);
1210 else
1211 x = zext_hwi (x, small_prec);
1213 result[j++] = x;
1214 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1216 /* Smear the sign bit. */
1217 while (j < out_len)
1218 result[j++] = mask;
1221 /* The inverse of wi_unpack. IN_LEN is the the number of input
1222 blocks. The number of output blocks will be half this amount. */
1223 static void
1224 wi_pack (unsigned HOST_WIDE_INT *result,
1225 const unsigned HOST_HALF_WIDE_INT *input,
1226 unsigned int in_len)
1228 unsigned int i = 0;
1229 unsigned int j = 0;
1231 while (i + 2 < in_len)
1233 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1234 | ((unsigned HOST_WIDE_INT)input[i + 1]
1235 << HOST_BITS_PER_HALF_WIDE_INT);
1236 i += 2;
1239 /* Handle the case where in_len is odd. For this we zero extend. */
1240 if (in_len & 1)
1241 result[j++] = (unsigned HOST_WIDE_INT)input[i];
1242 else
1243 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1244 | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT);
1247 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1248 result is returned.
1250 If HIGH is not set, throw away the upper half after the check is
1251 made to see if it overflows. Unfortunately there is no better way
1252 to check for overflow than to do this. If OVERFLOW is nonnull,
1253 record in *OVERFLOW whether the result overflowed. SGN controls
1254 the signedness and is used to check overflow or if HIGH is set. */
1255 unsigned int
1256 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1257 unsigned int op1len, const HOST_WIDE_INT *op2val,
1258 unsigned int op2len, unsigned int prec, signop sgn,
1259 bool *overflow, bool high)
1261 unsigned HOST_WIDE_INT o0, o1, k, t;
1262 unsigned int i;
1263 unsigned int j;
1264 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1265 unsigned int half_blocks_needed = blocks_needed * 2;
1266 /* The sizes here are scaled to support a 2x largest mode by 2x
1267 largest mode yielding a 4x largest mode result. This is what is
1268 needed by vpn. */
1270 unsigned HOST_HALF_WIDE_INT
1271 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1272 unsigned HOST_HALF_WIDE_INT
1273 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1274 /* The '2' in 'R' is because we are internally doing a full
1275 multiply. */
1276 unsigned HOST_HALF_WIDE_INT
1277 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1278 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1280 /* If the top level routine did not really pass in an overflow, then
1281 just make sure that we never attempt to set it. */
1282 bool needs_overflow = (overflow != 0);
1283 if (needs_overflow)
1284 *overflow = false;
1286 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1287 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1289 /* This is a surprisingly common case, so do it first. */
1290 if (op1 == 0 || op2 == 0)
1292 val[0] = 0;
1293 return 1;
1296 #ifdef umul_ppmm
1297 if (sgn == UNSIGNED)
1299 /* If the inputs are single HWIs and the output has room for at
1300 least two HWIs, we can use umul_ppmm directly. */
1301 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1302 && wi::fits_uhwi_p (op1)
1303 && wi::fits_uhwi_p (op2))
1305 /* This case never overflows. */
1306 if (high)
1308 val[0] = 0;
1309 return 1;
1311 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1312 if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1314 val[2] = 0;
1315 return 3;
1317 return 1 + (val[1] != 0 || val[0] < 0);
1319 /* Likewise if the output is a full single HWI, except that the
1320 upper HWI of the result is only used for determining overflow.
1321 (We handle this case inline when overflow isn't needed.) */
1322 else if (prec == HOST_BITS_PER_WIDE_INT)
1324 unsigned HOST_WIDE_INT upper;
1325 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1326 if (needs_overflow)
1327 *overflow = (upper != 0);
1328 if (high)
1329 val[0] = upper;
1330 return 1;
1333 #endif
1335 /* Handle multiplications by 1. */
1336 if (op1 == 1)
1338 if (high)
1340 val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1341 return 1;
1343 for (i = 0; i < op2len; i++)
1344 val[i] = op2val[i];
1345 return op2len;
1347 if (op2 == 1)
1349 if (high)
1351 val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1352 return 1;
1354 for (i = 0; i < op1len; i++)
1355 val[i] = op1val[i];
1356 return op1len;
1359 /* If we need to check for overflow, we can only do half wide
1360 multiplies quickly because we need to look at the top bits to
1361 check for the overflow. */
1362 if ((high || needs_overflow)
1363 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1365 unsigned HOST_WIDE_INT r;
1367 if (sgn == SIGNED)
1369 o0 = op1.to_shwi ();
1370 o1 = op2.to_shwi ();
1372 else
1374 o0 = op1.to_uhwi ();
1375 o1 = op2.to_uhwi ();
1378 r = o0 * o1;
1379 if (needs_overflow)
1381 if (sgn == SIGNED)
1383 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1384 *overflow = true;
1386 else
1388 if ((r >> prec) != 0)
1389 *overflow = true;
1392 val[0] = high ? r >> prec : r;
1393 return 1;
1396 /* We do unsigned mul and then correct it. */
1397 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1398 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1400 /* The 2 is for a full mult. */
1401 memset (r, 0, half_blocks_needed * 2
1402 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1404 for (j = 0; j < half_blocks_needed; j++)
1406 k = 0;
1407 for (i = 0; i < half_blocks_needed; i++)
1409 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1410 + r[i + j] + k);
1411 r[i + j] = t & HALF_INT_MASK;
1412 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1414 r[j + half_blocks_needed] = k;
1417 /* We did unsigned math above. For signed we must adjust the
1418 product (assuming we need to see that). */
1419 if (sgn == SIGNED && (high || needs_overflow))
1421 unsigned HOST_WIDE_INT b;
1422 if (wi::neg_p (op1))
1424 b = 0;
1425 for (i = 0; i < half_blocks_needed; i++)
1427 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1428 - (unsigned HOST_WIDE_INT)v[i] - b;
1429 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1430 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1433 if (wi::neg_p (op2))
1435 b = 0;
1436 for (i = 0; i < half_blocks_needed; i++)
1438 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1439 - (unsigned HOST_WIDE_INT)u[i] - b;
1440 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1441 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1446 if (needs_overflow)
1448 HOST_WIDE_INT top;
1450 /* For unsigned, overflow is true if any of the top bits are set.
1451 For signed, overflow is true if any of the top bits are not equal
1452 to the sign bit. */
1453 if (sgn == UNSIGNED)
1454 top = 0;
1455 else
1457 top = r[(half_blocks_needed) - 1];
1458 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1459 top &= mask;
1462 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1463 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1464 *overflow = true;
1467 if (high)
1469 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1470 wi_pack ((unsigned HOST_WIDE_INT *) val,
1471 &r[half_blocks_needed], half_blocks_needed);
1472 return canonize (val, blocks_needed, prec);
1474 else
1476 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1477 wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed);
1478 return canonize (val, blocks_needed, prec);
1482 /* Compute the population count of X. */
1484 wi::popcount (const wide_int_ref &x)
1486 unsigned int i;
1487 int count;
1489 /* The high order block is special if it is the last block and the
1490 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1491 have to clear out any ones above the precision before doing
1492 popcount on this block. */
1493 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1494 unsigned int stop = x.len;
1495 if (count < 0)
1497 count = popcount_hwi (x.uhigh () << -count);
1498 stop -= 1;
1500 else
1502 if (x.sign_mask () >= 0)
1503 count = 0;
1506 for (i = 0; i < stop; ++i)
1507 count += popcount_hwi (x.val[i]);
1509 return count;
1512 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1513 whether the result overflows when OP0 and OP1 are treated as having
1514 signedness SGN. Return the number of blocks in VAL. */
1515 unsigned int
1516 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1517 unsigned int op0len, const HOST_WIDE_INT *op1,
1518 unsigned int op1len, unsigned int prec,
1519 signop sgn, bool *overflow)
1521 unsigned HOST_WIDE_INT o0 = 0;
1522 unsigned HOST_WIDE_INT o1 = 0;
1523 unsigned HOST_WIDE_INT x = 0;
1524 /* We implement subtraction as an in place negate and add. Negation
1525 is just inversion and add 1, so we can do the add of 1 by just
1526 starting the borrow in of the first element at 1. */
1527 unsigned HOST_WIDE_INT borrow = 0;
1528 unsigned HOST_WIDE_INT old_borrow = 0;
1530 unsigned HOST_WIDE_INT mask0, mask1;
1531 unsigned int i;
1533 unsigned int len = MAX (op0len, op1len);
1534 mask0 = -top_bit_of (op0, op0len, prec);
1535 mask1 = -top_bit_of (op1, op1len, prec);
1537 /* Subtract all of the explicitly defined elements. */
1538 for (i = 0; i < len; i++)
1540 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1541 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1542 x = o0 - o1 - borrow;
1543 val[i] = x;
1544 old_borrow = borrow;
1545 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1548 if (len * HOST_BITS_PER_WIDE_INT < prec)
1550 val[len] = mask0 - mask1 - borrow;
1551 len++;
1552 if (overflow)
1553 *overflow = false;
1555 else if (overflow)
1557 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1558 if (sgn == SIGNED)
1560 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1561 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1563 else
1565 /* Put the MSB of X and O0 and in the top of the HWI. */
1566 x <<= shift;
1567 o0 <<= shift;
1568 if (old_borrow)
1569 *overflow = (x >= o0);
1570 else
1571 *overflow = (x > o0);
1575 return canonize (val, len, prec);
1580 * Division and Mod
1583 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1584 algorithm is a small modification of the algorithm in Hacker's
1585 Delight by Warren, which itself is a small modification of Knuth's
1586 algorithm. M is the number of significant elements of U however
1587 there needs to be at least one extra element of B_DIVIDEND
1588 allocated, N is the number of elements of B_DIVISOR. */
1589 static void
1590 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1591 unsigned HOST_HALF_WIDE_INT *b_remainder,
1592 unsigned HOST_HALF_WIDE_INT *b_dividend,
1593 unsigned HOST_HALF_WIDE_INT *b_divisor,
1594 int m, int n)
1596 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1597 HOST_WIDE_INT and stored in the lower bits of each word. This
1598 algorithm should work properly on both 32 and 64 bit
1599 machines. */
1600 unsigned HOST_WIDE_INT b
1601 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1602 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1603 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1604 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1605 HOST_WIDE_INT t, k;
1606 int i, j, s;
1608 /* Single digit divisor. */
1609 if (n == 1)
1611 k = 0;
1612 for (j = m - 1; j >= 0; j--)
1614 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1615 k = ((k * b + b_dividend[j])
1616 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1617 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1619 b_remainder[0] = k;
1620 return;
1623 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1625 if (s)
1627 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1628 algorithm, we can overwrite b_dividend and b_divisor, so we do
1629 that. */
1630 for (i = n - 1; i > 0; i--)
1631 b_divisor[i] = (b_divisor[i] << s)
1632 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1633 b_divisor[0] = b_divisor[0] << s;
1635 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1636 for (i = m - 1; i > 0; i--)
1637 b_dividend[i] = (b_dividend[i] << s)
1638 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1639 b_dividend[0] = b_dividend[0] << s;
1642 /* Main loop. */
1643 for (j = m - n; j >= 0; j--)
1645 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1646 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1647 again:
1648 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1650 qhat -= 1;
1651 rhat += b_divisor[n-1];
1652 if (rhat < b)
1653 goto again;
1656 /* Multiply and subtract. */
1657 k = 0;
1658 for (i = 0; i < n; i++)
1660 p = qhat * b_divisor[i];
1661 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1662 b_dividend[i + j] = t;
1663 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1664 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1666 t = b_dividend[j+n] - k;
1667 b_dividend[j+n] = t;
1669 b_quotient[j] = qhat;
1670 if (t < 0)
1672 b_quotient[j] -= 1;
1673 k = 0;
1674 for (i = 0; i < n; i++)
1676 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1677 b_dividend[i+j] = t;
1678 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1680 b_dividend[j+n] += k;
1683 if (s)
1684 for (i = 0; i < n; i++)
1685 b_remainder[i] = (b_dividend[i] >> s)
1686 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1687 else
1688 for (i = 0; i < n; i++)
1689 b_remainder[i] = b_dividend[i];
1693 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1694 the result. If QUOTIENT is nonnull, store the value of the quotient
1695 there and return the number of blocks in it. The return value is
1696 not defined otherwise. If REMAINDER is nonnull, store the value
1697 of the remainder there and store the number of blocks in
1698 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1699 the division overflowed. */
1700 unsigned int
1701 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1702 HOST_WIDE_INT *remainder,
1703 const HOST_WIDE_INT *dividend_val,
1704 unsigned int dividend_len, unsigned int dividend_prec,
1705 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1706 unsigned int divisor_prec, signop sgn,
1707 bool *oflow)
1709 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1710 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1711 unsigned HOST_HALF_WIDE_INT
1712 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1713 unsigned HOST_HALF_WIDE_INT
1714 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1715 unsigned HOST_HALF_WIDE_INT
1716 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1717 unsigned HOST_HALF_WIDE_INT
1718 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1719 unsigned int m, n;
1720 bool dividend_neg = false;
1721 bool divisor_neg = false;
1722 bool overflow = false;
1723 wide_int neg_dividend, neg_divisor;
1725 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1726 dividend_prec);
1727 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1728 divisor_prec);
1729 if (divisor == 0)
1730 overflow = true;
1732 /* The smallest signed number / -1 causes overflow. The dividend_len
1733 check is for speed rather than correctness. */
1734 if (sgn == SIGNED
1735 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1736 && divisor == -1
1737 && wi::only_sign_bit_p (dividend))
1738 overflow = true;
1740 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1741 (signed min / -1) has the same representation as the orignal dividend.
1742 We have traditionally made division by zero act as division by one,
1743 so there too we use the original dividend. */
1744 if (overflow)
1746 if (remainder)
1748 *remainder_len = 1;
1749 remainder[0] = 0;
1751 if (oflow != 0)
1752 *oflow = true;
1753 if (quotient)
1754 for (unsigned int i = 0; i < dividend_len; ++i)
1755 quotient[i] = dividend_val[i];
1756 return dividend_len;
1759 if (oflow)
1760 *oflow = false;
1762 /* Do it on the host if you can. */
1763 if (sgn == SIGNED
1764 && wi::fits_shwi_p (dividend)
1765 && wi::fits_shwi_p (divisor))
1767 HOST_WIDE_INT o0 = dividend.to_shwi ();
1768 HOST_WIDE_INT o1 = divisor.to_shwi ();
1770 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1772 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1773 if (quotient)
1775 quotient[0] = HOST_WIDE_INT_MIN;
1776 quotient[1] = 0;
1778 if (remainder)
1780 remainder[0] = 0;
1781 *remainder_len = 1;
1783 return 2;
1785 else
1787 if (quotient)
1788 quotient[0] = o0 / o1;
1789 if (remainder)
1791 remainder[0] = o0 % o1;
1792 *remainder_len = 1;
1794 return 1;
1798 if (sgn == UNSIGNED
1799 && wi::fits_uhwi_p (dividend)
1800 && wi::fits_uhwi_p (divisor))
1802 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1803 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1805 if (quotient)
1806 quotient[0] = o0 / o1;
1807 if (remainder)
1809 remainder[0] = o0 % o1;
1810 *remainder_len = 1;
1812 return 1;
1815 /* Make the divisor and dividend positive and remember what we
1816 did. */
1817 if (sgn == SIGNED)
1819 if (wi::neg_p (dividend))
1821 neg_dividend = -dividend;
1822 dividend = neg_dividend;
1823 dividend_neg = true;
1825 if (wi::neg_p (divisor))
1827 neg_divisor = -divisor;
1828 divisor = neg_divisor;
1829 divisor_neg = true;
1833 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1834 dividend_blocks_needed, dividend_prec, sgn);
1835 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1836 divisor_blocks_needed, divisor_prec, sgn);
1838 m = dividend_blocks_needed;
1839 b_dividend[m] = 0;
1840 while (m > 1 && b_dividend[m - 1] == 0)
1841 m--;
1843 n = divisor_blocks_needed;
1844 while (n > 1 && b_divisor[n - 1] == 0)
1845 n--;
1847 memset (b_quotient, 0, sizeof (b_quotient));
1849 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1851 unsigned int quotient_len = 0;
1852 if (quotient)
1854 wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m);
1855 quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec);
1856 /* The quotient is neg if exactly one of the divisor or dividend is
1857 neg. */
1858 if (dividend_neg != divisor_neg)
1859 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1860 quotient_len, dividend_prec,
1861 UNSIGNED, 0);
1864 if (remainder)
1866 wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n);
1867 *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec);
1868 /* The remainder is always the same sign as the dividend. */
1869 if (dividend_neg)
1870 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1871 *remainder_len, dividend_prec,
1872 UNSIGNED, 0);
1875 return quotient_len;
1879 * Shifting, rotating and extraction.
1882 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1883 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1884 unsigned int
1885 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1886 unsigned int xlen, unsigned int precision,
1887 unsigned int shift)
1889 /* Split the shift into a whole-block shift and a subblock shift. */
1890 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1891 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1893 /* The whole-block shift fills with zeros. */
1894 unsigned int len = BLOCKS_NEEDED (precision);
1895 for (unsigned int i = 0; i < skip; ++i)
1896 val[i] = 0;
1898 /* It's easier to handle the simple block case specially. */
1899 if (small_shift == 0)
1900 for (unsigned int i = skip; i < len; ++i)
1901 val[i] = safe_uhwi (xval, xlen, i - skip);
1902 else
1904 /* The first unfilled output block is a left shift of the first
1905 block in XVAL. The other output blocks contain bits from two
1906 consecutive input blocks. */
1907 unsigned HOST_WIDE_INT carry = 0;
1908 for (unsigned int i = skip; i < len; ++i)
1910 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1911 val[i] = (x << small_shift) | carry;
1912 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1915 return canonize (val, len, precision);
1918 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1919 number of blocks in VAL. The input has XPRECISION bits and the
1920 output has XPRECISION - SHIFT bits. */
1921 static unsigned int
1922 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1923 unsigned int xlen, unsigned int xprecision,
1924 unsigned int shift)
1926 /* Split the shift into a whole-block shift and a subblock shift. */
1927 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1928 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1930 /* Work out how many blocks are needed to store the significant bits
1931 (excluding the upper zeros or signs). */
1932 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1934 /* It's easier to handle the simple block case specially. */
1935 if (small_shift == 0)
1936 for (unsigned int i = 0; i < len; ++i)
1937 val[i] = safe_uhwi (xval, xlen, i + skip);
1938 else
1940 /* Each output block but the last is a combination of two input blocks.
1941 The last block is a right shift of the last block in XVAL. */
1942 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1943 for (unsigned int i = 0; i < len; ++i)
1945 val[i] = curr >> small_shift;
1946 curr = safe_uhwi (xval, xlen, i + skip + 1);
1947 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1950 return len;
1953 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1954 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1955 VAL has PRECISION bits. */
1956 unsigned int
1957 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1958 unsigned int xlen, unsigned int xprecision,
1959 unsigned int precision, unsigned int shift)
1961 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1963 /* The value we just created has precision XPRECISION - SHIFT.
1964 Zero-extend it to wider precisions. */
1965 if (precision > xprecision - shift)
1967 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1968 if (small_prec)
1969 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1970 else if (val[len - 1] < 0)
1972 /* Add a new block with a zero. */
1973 val[len++] = 0;
1974 return len;
1977 return canonize (val, len, precision);
1980 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1981 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1982 VAL has PRECISION bits. */
1983 unsigned int
1984 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1985 unsigned int xlen, unsigned int xprecision,
1986 unsigned int precision, unsigned int shift)
1988 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1990 /* The value we just created has precision XPRECISION - SHIFT.
1991 Sign-extend it to wider types. */
1992 if (precision > xprecision - shift)
1994 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1995 if (small_prec)
1996 val[len - 1] = sext_hwi (val[len - 1], small_prec);
1998 return canonize (val, len, precision);
2001 /* Return the number of leading (upper) zeros in X. */
2003 wi::clz (const wide_int_ref &x)
2005 /* Calculate how many bits there above the highest represented block. */
2006 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2008 unsigned HOST_WIDE_INT high = x.uhigh ();
2009 if (count < 0)
2010 /* The upper -COUNT bits of HIGH are not part of the value.
2011 Clear them out. */
2012 high = (high << -count) >> -count;
2013 else if (x.sign_mask () < 0)
2014 /* The upper bit is set, so there are no leading zeros. */
2015 return 0;
2017 /* We don't need to look below HIGH. Either HIGH is nonzero,
2018 or the top bit of the block below is nonzero; clz_hwi is
2019 HOST_BITS_PER_WIDE_INT in the latter case. */
2020 return count + clz_hwi (high);
2023 /* Return the number of redundant sign bits in X. (That is, the number
2024 of bits immediately below the sign bit that have the same value as
2025 the sign bit.) */
2027 wi::clrsb (const wide_int_ref &x)
2029 /* Calculate how many bits there above the highest represented block. */
2030 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2032 unsigned HOST_WIDE_INT high = x.uhigh ();
2033 unsigned HOST_WIDE_INT mask = -1;
2034 if (count < 0)
2036 /* The upper -COUNT bits of HIGH are not part of the value.
2037 Clear them from both MASK and HIGH. */
2038 mask >>= -count;
2039 high &= mask;
2042 /* If the top bit is 1, count the number of leading 1s. If the top
2043 bit is zero, count the number of leading zeros. */
2044 if (high > mask / 2)
2045 high ^= mask;
2047 /* There are no sign bits below the top block, so we don't need to look
2048 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2049 HIGH is 0. */
2050 return count + clz_hwi (high) - 1;
2053 /* Return the number of trailing (lower) zeros in X. */
2055 wi::ctz (const wide_int_ref &x)
2057 if (x.len == 1 && x.ulow () == 0)
2058 return x.precision;
2060 /* Having dealt with the zero case, there must be a block with a
2061 nonzero bit. We don't care about the bits above the first 1. */
2062 unsigned int i = 0;
2063 while (x.val[i] == 0)
2064 ++i;
2065 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2068 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2069 return -1. */
2071 wi::exact_log2 (const wide_int_ref &x)
2073 /* Reject cases where there are implicit -1 blocks above HIGH. */
2074 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2075 return -1;
2077 /* Set CRUX to the index of the entry that should be nonzero.
2078 If the top block is zero then the next lowest block (if any)
2079 must have the high bit set. */
2080 unsigned int crux = x.len - 1;
2081 if (crux > 0 && x.val[crux] == 0)
2082 crux -= 1;
2084 /* Check that all lower blocks are zero. */
2085 for (unsigned int i = 0; i < crux; ++i)
2086 if (x.val[i] != 0)
2087 return -1;
2089 /* Get a zero-extended form of block CRUX. */
2090 unsigned HOST_WIDE_INT hwi = x.val[crux];
2091 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2092 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2094 /* Now it's down to whether HWI is a power of 2. */
2095 int res = ::exact_log2 (hwi);
2096 if (res >= 0)
2097 res += crux * HOST_BITS_PER_WIDE_INT;
2098 return res;
2101 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2103 wi::floor_log2 (const wide_int_ref &x)
2105 return x.precision - 1 - clz (x);
2108 /* Return the index of the first (lowest) set bit in X, counting from 1.
2109 Return 0 if X is 0. */
2111 wi::ffs (const wide_int_ref &x)
2113 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2116 /* Return true if sign-extending X to have precision PRECISION would give
2117 the minimum signed value at that precision. */
2118 bool
2119 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2121 return ctz (x) + 1 == int (precision);
2124 /* Return true if X represents the minimum signed value. */
2125 bool
2126 wi::only_sign_bit_p (const wide_int_ref &x)
2128 return only_sign_bit_p (x, x.precision);
2132 * Private utilities.
2135 void gt_ggc_mx (widest_int *) { }
2136 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2137 void gt_pch_nx (widest_int *) { }
2139 template void wide_int::dump () const;
2140 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2141 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2142 template void offset_int::dump () const;
2143 template void widest_int::dump () const;