2015-09-29 Steven G. Kargl <kargl@gcc.gnu.org>
[official-gcc.git] / gcc / wide-int.cc
blob9a93660defc719010928e1507f1a0453d6d808ca
1 /* Operations with very long integers.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hwint.h"
26 #include "vec.h"
27 #include "alias.h"
28 #include "tree.h"
29 #include "inchash.h"
30 #include "dumpfile.h"
33 #define HOST_BITS_PER_HALF_WIDE_INT 32
34 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
35 # define HOST_HALF_WIDE_INT long
36 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
37 # define HOST_HALF_WIDE_INT int
38 #else
39 #error Please add support for HOST_HALF_WIDE_INT
40 #endif
42 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
43 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
44 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
45 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
46 typedef unsigned HOST_WIDE_INT UWtype;
47 typedef unsigned int UQItype __attribute__ ((mode (QI)));
48 typedef unsigned int USItype __attribute__ ((mode (SI)));
49 typedef unsigned int UDItype __attribute__ ((mode (DI)));
50 #if W_TYPE_SIZE == 32
51 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
52 #else
53 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
54 #endif
55 #include "longlong.h"
56 #endif
58 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
61 * Internal utilities.
64 /* Quantities to deal with values that hold half of a wide int. Used
65 in multiply and divide. */
66 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
68 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
69 #define BLOCKS_NEEDED(PREC) \
70 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
71 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
73 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
74 based on the top existing bit of VAL. */
76 static unsigned HOST_WIDE_INT
77 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
79 return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
82 /* Convert the integer in VAL to canonical form, returning its new length.
83 LEN is the number of blocks currently in VAL and PRECISION is the number
84 of bits in the integer it represents.
86 This function only changes the representation, not the value. */
87 static unsigned int
88 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
90 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
91 HOST_WIDE_INT top;
92 int i;
94 if (len > blocks_needed)
95 len = blocks_needed;
97 if (len == 1)
98 return len;
100 top = val[len - 1];
101 if (len * HOST_BITS_PER_WIDE_INT > precision)
102 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
103 if (top != 0 && top != (HOST_WIDE_INT)-1)
104 return len;
106 /* At this point we know that the top is either 0 or -1. Find the
107 first block that is not a copy of this. */
108 for (i = len - 2; i >= 0; i--)
110 HOST_WIDE_INT x = val[i];
111 if (x != top)
113 if (SIGN_MASK (x) == top)
114 return i + 1;
116 /* We need an extra block because the top bit block i does
117 not match the extension. */
118 return i + 2;
122 /* The number is 0 or -1. */
123 return 1;
127 * Conversion routines in and out of wide_int.
130 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
131 result for an integer with precision PRECISION. Return the length
132 of VAL (after any canonization. */
133 unsigned int
134 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
135 unsigned int xlen, unsigned int precision, bool need_canon)
137 for (unsigned i = 0; i < xlen; i++)
138 val[i] = xval[i];
139 return need_canon ? canonize (val, xlen, precision) : xlen;
142 /* Construct a wide int from a buffer of length LEN. BUFFER will be
143 read according to byte endianess and word endianess of the target.
144 Only the lower BUFFER_LEN bytes of the result are set; the remaining
145 high bytes are cleared. */
146 wide_int
147 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
149 unsigned int precision = buffer_len * BITS_PER_UNIT;
150 wide_int result = wide_int::create (precision);
151 unsigned int words = buffer_len / UNITS_PER_WORD;
153 /* We have to clear all the bits ourself, as we merely or in values
154 below. */
155 unsigned int len = BLOCKS_NEEDED (precision);
156 HOST_WIDE_INT *val = result.write_val ();
157 for (unsigned int i = 0; i < len; ++i)
158 val[i] = 0;
160 for (unsigned int byte = 0; byte < buffer_len; byte++)
162 unsigned int offset;
163 unsigned int index;
164 unsigned int bitpos = byte * BITS_PER_UNIT;
165 unsigned HOST_WIDE_INT value;
167 if (buffer_len > UNITS_PER_WORD)
169 unsigned int word = byte / UNITS_PER_WORD;
171 if (WORDS_BIG_ENDIAN)
172 word = (words - 1) - word;
174 offset = word * UNITS_PER_WORD;
176 if (BYTES_BIG_ENDIAN)
177 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
178 else
179 offset += byte % UNITS_PER_WORD;
181 else
182 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
184 value = (unsigned HOST_WIDE_INT) buffer[offset];
186 index = bitpos / HOST_BITS_PER_WIDE_INT;
187 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
190 result.set_len (canonize (val, len, precision));
192 return result;
195 /* Sets RESULT from X, the sign is taken according to SGN. */
196 void
197 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
199 int len = x.get_len ();
200 const HOST_WIDE_INT *v = x.get_val ();
201 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
203 if (wi::neg_p (x, sgn))
205 /* We use ones complement to avoid -x80..0 edge case that -
206 won't work on. */
207 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
208 for (int i = 0; i < len; i++)
209 t[i] = ~v[i];
210 if (excess > 0)
211 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
212 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
213 mpz_com (result, result);
215 else if (excess > 0)
217 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
218 for (int i = 0; i < len - 1; i++)
219 t[i] = v[i];
220 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
221 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
223 else
224 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
227 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
228 values of VAL will be wrapped; otherwise, they will be set to the
229 appropriate minimum or maximum TYPE bound. */
230 wide_int
231 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
233 size_t count, numb;
234 unsigned int prec = TYPE_PRECISION (type);
235 wide_int res = wide_int::create (prec);
237 if (!wrap)
239 mpz_t min, max;
241 mpz_init (min);
242 mpz_init (max);
243 get_type_static_bounds (type, min, max);
245 if (mpz_cmp (x, min) < 0)
246 mpz_set (x, min);
247 else if (mpz_cmp (x, max) > 0)
248 mpz_set (x, max);
250 mpz_clear (min);
251 mpz_clear (max);
254 /* Determine the number of unsigned HOST_WIDE_INTs that are required
255 for representing the absolute value. The code to calculate count is
256 extracted from the GMP manual, section "Integer Import and Export":
257 http://gmplib.org/manual/Integer-Import-and-Export.html */
258 numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
259 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
260 HOST_WIDE_INT *val = res.write_val ();
261 /* Read the absolute value.
263 Write directly to the wide_int storage if possible, otherwise leave
264 GMP to allocate the memory for us. It might be slightly more efficient
265 to use mpz_tdiv_r_2exp for the latter case, but the situation is
266 pathological and it seems safer to operate on the original mpz value
267 in all cases. */
268 void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
269 &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
270 if (count < 1)
272 val[0] = 0;
273 count = 1;
275 count = MIN (count, BLOCKS_NEEDED (prec));
276 if (valres != val)
278 memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
279 free (valres);
281 /* Zero-extend the absolute value to PREC bits. */
282 if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
283 val[count++] = 0;
284 else
285 count = canonize (val, count, prec);
286 res.set_len (count);
288 if (mpz_sgn (x) < 0)
289 res = -res;
291 return res;
295 * Largest and smallest values in a mode.
298 /* Return the largest SGNed number that is representable in PRECISION bits.
300 TODO: There is still code from the double_int era that trys to
301 make up for the fact that double int's could not represent the
302 min and max values of all types. This code should be removed
303 because the min and max values can always be represented in
304 wide_ints and int-csts. */
305 wide_int
306 wi::max_value (unsigned int precision, signop sgn)
308 gcc_checking_assert (precision != 0);
309 if (sgn == UNSIGNED)
310 /* The unsigned max is just all ones. */
311 return shwi (-1, precision);
312 else
313 /* The signed max is all ones except the top bit. This must be
314 explicitly represented. */
315 return mask (precision - 1, false, precision);
318 /* Return the largest SGNed number that is representable in PRECISION bits. */
319 wide_int
320 wi::min_value (unsigned int precision, signop sgn)
322 gcc_checking_assert (precision != 0);
323 if (sgn == UNSIGNED)
324 return uhwi (0, precision);
325 else
326 /* The signed min is all zeros except the top bit. This must be
327 explicitly represented. */
328 return wi::set_bit_in_zero (precision - 1, precision);
332 * Public utilities.
335 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
336 signedness SGN, to an integer that has PRECISION bits. Store the blocks
337 in VAL and return the number of blocks used.
339 This function can handle both extension (PRECISION > XPRECISION)
340 and truncation (PRECISION < XPRECISION). */
341 unsigned int
342 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
343 unsigned int xlen, unsigned int xprecision,
344 unsigned int precision, signop sgn)
346 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
347 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
348 for (unsigned i = 0; i < len; i++)
349 val[i] = xval[i];
351 if (precision > xprecision)
353 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
355 /* Expanding. */
356 if (sgn == UNSIGNED)
358 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
359 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
360 else if (val[len - 1] < 0)
362 while (len < BLOCKS_NEEDED (xprecision))
363 val[len++] = -1;
364 if (small_xprecision)
365 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
366 else
367 val[len++] = 0;
370 else
372 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
373 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
376 len = canonize (val, len, precision);
378 return len;
381 /* This function hides the fact that we cannot rely on the bits beyond
382 the precision. This issue comes up in the relational comparisions
383 where we do allow comparisons of values of different precisions. */
384 static inline HOST_WIDE_INT
385 selt (const HOST_WIDE_INT *a, unsigned int len,
386 unsigned int blocks_needed, unsigned int small_prec,
387 unsigned int index, signop sgn)
389 HOST_WIDE_INT val;
390 if (index < len)
391 val = a[index];
392 else if (index < blocks_needed || sgn == SIGNED)
393 /* Signed or within the precision. */
394 val = SIGN_MASK (a[len - 1]);
395 else
396 /* Unsigned extension beyond the precision. */
397 val = 0;
399 if (small_prec && index == blocks_needed - 1)
400 return (sgn == SIGNED
401 ? sext_hwi (val, small_prec)
402 : zext_hwi (val, small_prec));
403 else
404 return val;
407 /* Find the highest bit represented in a wide int. This will in
408 general have the same value as the sign bit. */
409 static inline HOST_WIDE_INT
410 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
412 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
413 unsigned HOST_WIDE_INT val = a[len - 1];
414 if (excess > 0)
415 val <<= excess;
416 return val >> (HOST_BITS_PER_WIDE_INT - 1);
420 * Comparisons, note that only equality is an operator. The other
421 * comparisons cannot be operators since they are inherently signed or
422 * unsigned and C++ has no such operators.
425 /* Return true if OP0 == OP1. */
426 bool
427 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
428 const HOST_WIDE_INT *op1, unsigned int op1len,
429 unsigned int prec)
431 int l0 = op0len - 1;
432 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
434 if (op0len != op1len)
435 return false;
437 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
439 /* It does not matter if we zext or sext here, we just have to
440 do both the same way. */
441 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
442 return false;
443 l0--;
446 while (l0 >= 0)
447 if (op0[l0] != op1[l0])
448 return false;
449 else
450 l0--;
452 return true;
455 /* Return true if OP0 < OP1 using signed comparisons. */
456 bool
457 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
458 unsigned int precision,
459 const HOST_WIDE_INT *op1, unsigned int op1len)
461 HOST_WIDE_INT s0, s1;
462 unsigned HOST_WIDE_INT u0, u1;
463 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
464 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
465 int l = MAX (op0len - 1, op1len - 1);
467 /* Only the top block is compared as signed. The rest are unsigned
468 comparisons. */
469 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
470 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
471 if (s0 < s1)
472 return true;
473 if (s0 > s1)
474 return false;
476 l--;
477 while (l >= 0)
479 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
480 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
482 if (u0 < u1)
483 return true;
484 if (u0 > u1)
485 return false;
486 l--;
489 return false;
492 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
493 signed compares. */
495 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
496 unsigned int precision,
497 const HOST_WIDE_INT *op1, unsigned int op1len)
499 HOST_WIDE_INT s0, s1;
500 unsigned HOST_WIDE_INT u0, u1;
501 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
502 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
503 int l = MAX (op0len - 1, op1len - 1);
505 /* Only the top block is compared as signed. The rest are unsigned
506 comparisons. */
507 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
508 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
509 if (s0 < s1)
510 return -1;
511 if (s0 > s1)
512 return 1;
514 l--;
515 while (l >= 0)
517 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
518 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
520 if (u0 < u1)
521 return -1;
522 if (u0 > u1)
523 return 1;
524 l--;
527 return 0;
530 /* Return true if OP0 < OP1 using unsigned comparisons. */
531 bool
532 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
533 unsigned int precision,
534 const HOST_WIDE_INT *op1, unsigned int op1len)
536 unsigned HOST_WIDE_INT x0;
537 unsigned HOST_WIDE_INT x1;
538 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
539 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
540 int l = MAX (op0len - 1, op1len - 1);
542 while (l >= 0)
544 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
545 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
546 if (x0 < x1)
547 return true;
548 if (x0 > x1)
549 return false;
550 l--;
553 return false;
556 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
557 unsigned compares. */
559 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
560 unsigned int precision,
561 const HOST_WIDE_INT *op1, unsigned int op1len)
563 unsigned HOST_WIDE_INT x0;
564 unsigned HOST_WIDE_INT x1;
565 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
566 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
567 int l = MAX (op0len - 1, op1len - 1);
569 while (l >= 0)
571 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
572 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
573 if (x0 < x1)
574 return -1;
575 if (x0 > x1)
576 return 1;
577 l--;
580 return 0;
584 * Extension.
587 /* Sign-extend the number represented by XVAL and XLEN into VAL,
588 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
589 and VAL have PRECISION bits. */
590 unsigned int
591 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
592 unsigned int xlen, unsigned int precision, unsigned int offset)
594 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
595 /* Extending beyond the precision is a no-op. If we have only stored
596 OFFSET bits or fewer, the rest are already signs. */
597 if (offset >= precision || len >= xlen)
599 for (unsigned i = 0; i < xlen; ++i)
600 val[i] = xval[i];
601 return xlen;
603 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
604 for (unsigned int i = 0; i < len; i++)
605 val[i] = xval[i];
606 if (suboffset > 0)
608 val[len] = sext_hwi (xval[len], suboffset);
609 len += 1;
611 return canonize (val, len, precision);
614 /* Zero-extend the number represented by XVAL and XLEN into VAL,
615 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
616 and VAL have PRECISION bits. */
617 unsigned int
618 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
619 unsigned int xlen, unsigned int precision, unsigned int offset)
621 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
622 /* Extending beyond the precision is a no-op. If we have only stored
623 OFFSET bits or fewer, and the upper stored bit is zero, then there
624 is nothing to do. */
625 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
627 for (unsigned i = 0; i < xlen; ++i)
628 val[i] = xval[i];
629 return xlen;
631 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
632 for (unsigned int i = 0; i < len; i++)
633 val[i] = i < xlen ? xval[i] : -1;
634 if (suboffset > 0)
635 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
636 else
637 val[len] = 0;
638 return canonize (val, len + 1, precision);
642 * Masking, inserting, shifting, rotating.
645 /* Insert WIDTH bits from Y into X starting at START. */
646 wide_int
647 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
648 unsigned int width)
650 wide_int result;
651 wide_int mask;
652 wide_int tmp;
654 unsigned int precision = x.get_precision ();
655 if (start >= precision)
656 return x;
658 gcc_checking_assert (precision >= width);
660 if (start + width >= precision)
661 width = precision - start;
663 mask = wi::shifted_mask (start, width, false, precision);
664 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
665 result = tmp & mask;
667 tmp = wi::bit_and_not (x, mask);
668 result = result | tmp;
670 return result;
673 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
674 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
675 bits. */
676 unsigned int
677 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
678 unsigned int xlen, unsigned int precision, unsigned int bit)
680 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
681 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
683 if (block + 1 >= xlen)
685 /* The operation either affects the last current block or needs
686 a new block. */
687 unsigned int len = block + 1;
688 for (unsigned int i = 0; i < len; i++)
689 val[i] = safe_uhwi (xval, xlen, i);
690 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
692 /* If the bit we just set is at the msb of the block, make sure
693 that any higher bits are zeros. */
694 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
695 val[len++] = 0;
696 return len;
698 else
700 for (unsigned int i = 0; i < xlen; i++)
701 val[i] = xval[i];
702 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
703 return canonize (val, xlen, precision);
707 /* bswap THIS. */
708 wide_int
709 wide_int_storage::bswap () const
711 wide_int result = wide_int::create (precision);
712 unsigned int i, s;
713 unsigned int len = BLOCKS_NEEDED (precision);
714 unsigned int xlen = get_len ();
715 const HOST_WIDE_INT *xval = get_val ();
716 HOST_WIDE_INT *val = result.write_val ();
718 /* This is not a well defined operation if the precision is not a
719 multiple of 8. */
720 gcc_assert ((precision & 0x7) == 0);
722 for (i = 0; i < len; i++)
723 val[i] = 0;
725 /* Only swap the bytes that are not the padding. */
726 for (s = 0; s < precision; s += 8)
728 unsigned int d = precision - s - 8;
729 unsigned HOST_WIDE_INT byte;
731 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
732 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
734 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
736 block = d / HOST_BITS_PER_WIDE_INT;
737 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
739 val[block] |= byte << offset;
742 result.set_len (canonize (val, len, precision));
743 return result;
746 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
747 above that up to PREC are zeros. The result is inverted if NEGATE
748 is true. Return the number of blocks in VAL. */
749 unsigned int
750 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
751 unsigned int prec)
753 if (width >= prec)
755 val[0] = negate ? 0 : -1;
756 return 1;
758 else if (width == 0)
760 val[0] = negate ? -1 : 0;
761 return 1;
764 unsigned int i = 0;
765 while (i < width / HOST_BITS_PER_WIDE_INT)
766 val[i++] = negate ? 0 : -1;
768 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
769 if (shift != 0)
771 HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
772 val[i++] = negate ? ~last : last;
774 else
775 val[i++] = negate ? -1 : 0;
777 return i;
780 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
781 bits are ones, and the bits above that up to PREC are zeros. The result
782 is inverted if NEGATE is true. Return the number of blocks in VAL. */
783 unsigned int
784 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
785 bool negate, unsigned int prec)
787 if (start >= prec || width == 0)
789 val[0] = negate ? -1 : 0;
790 return 1;
793 if (width > prec - start)
794 width = prec - start;
795 unsigned int end = start + width;
797 unsigned int i = 0;
798 while (i < start / HOST_BITS_PER_WIDE_INT)
799 val[i++] = negate ? -1 : 0;
801 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
802 if (shift)
804 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
805 shift += width;
806 if (shift < HOST_BITS_PER_WIDE_INT)
808 /* case 000111000 */
809 block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
810 val[i++] = negate ? ~block : block;
811 return i;
813 else
814 /* ...111000 */
815 val[i++] = negate ? block : ~block;
818 while (i < end / HOST_BITS_PER_WIDE_INT)
819 /* 1111111 */
820 val[i++] = negate ? 0 : -1;
822 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
823 if (shift != 0)
825 /* 000011111 */
826 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
827 val[i++] = negate ? ~block : block;
829 else if (end < prec)
830 val[i++] = negate ? -1 : 0;
832 return i;
836 * logical operations.
839 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
840 unsigned int
841 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
842 unsigned int op0len, const HOST_WIDE_INT *op1,
843 unsigned int op1len, unsigned int prec)
845 int l0 = op0len - 1;
846 int l1 = op1len - 1;
847 bool need_canon = true;
849 unsigned int len = MAX (op0len, op1len);
850 if (l0 > l1)
852 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
853 if (op1mask == 0)
855 l0 = l1;
856 len = l1 + 1;
858 else
860 need_canon = false;
861 while (l0 > l1)
863 val[l0] = op0[l0];
864 l0--;
868 else if (l1 > l0)
870 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
871 if (op0mask == 0)
872 len = l0 + 1;
873 else
875 need_canon = false;
876 while (l1 > l0)
878 val[l1] = op1[l1];
879 l1--;
884 while (l0 >= 0)
886 val[l0] = op0[l0] & op1[l0];
887 l0--;
890 if (need_canon)
891 len = canonize (val, len, prec);
893 return len;
896 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
897 unsigned int
898 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
899 unsigned int op0len, const HOST_WIDE_INT *op1,
900 unsigned int op1len, unsigned int prec)
902 wide_int result;
903 int l0 = op0len - 1;
904 int l1 = op1len - 1;
905 bool need_canon = true;
907 unsigned int len = MAX (op0len, op1len);
908 if (l0 > l1)
910 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
911 if (op1mask != 0)
913 l0 = l1;
914 len = l1 + 1;
916 else
918 need_canon = false;
919 while (l0 > l1)
921 val[l0] = op0[l0];
922 l0--;
926 else if (l1 > l0)
928 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
929 if (op0mask == 0)
930 len = l0 + 1;
931 else
933 need_canon = false;
934 while (l1 > l0)
936 val[l1] = ~op1[l1];
937 l1--;
942 while (l0 >= 0)
944 val[l0] = op0[l0] & ~op1[l0];
945 l0--;
948 if (need_canon)
949 len = canonize (val, len, prec);
951 return len;
954 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
955 unsigned int
956 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
957 unsigned int op0len, const HOST_WIDE_INT *op1,
958 unsigned int op1len, unsigned int prec)
960 wide_int result;
961 int l0 = op0len - 1;
962 int l1 = op1len - 1;
963 bool need_canon = true;
965 unsigned int len = MAX (op0len, op1len);
966 if (l0 > l1)
968 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
969 if (op1mask != 0)
971 l0 = l1;
972 len = l1 + 1;
974 else
976 need_canon = false;
977 while (l0 > l1)
979 val[l0] = op0[l0];
980 l0--;
984 else if (l1 > l0)
986 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
987 if (op0mask != 0)
988 len = l0 + 1;
989 else
991 need_canon = false;
992 while (l1 > l0)
994 val[l1] = op1[l1];
995 l1--;
1000 while (l0 >= 0)
1002 val[l0] = op0[l0] | op1[l0];
1003 l0--;
1006 if (need_canon)
1007 len = canonize (val, len, prec);
1009 return len;
1012 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1013 unsigned int
1014 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1015 unsigned int op0len, const HOST_WIDE_INT *op1,
1016 unsigned int op1len, unsigned int prec)
1018 wide_int result;
1019 int l0 = op0len - 1;
1020 int l1 = op1len - 1;
1021 bool need_canon = true;
1023 unsigned int len = MAX (op0len, op1len);
1024 if (l0 > l1)
1026 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1027 if (op1mask == 0)
1029 l0 = l1;
1030 len = l1 + 1;
1032 else
1034 need_canon = false;
1035 while (l0 > l1)
1037 val[l0] = op0[l0];
1038 l0--;
1042 else if (l1 > l0)
1044 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1045 if (op0mask != 0)
1046 len = l0 + 1;
1047 else
1049 need_canon = false;
1050 while (l1 > l0)
1052 val[l1] = ~op1[l1];
1053 l1--;
1058 while (l0 >= 0)
1060 val[l0] = op0[l0] | ~op1[l0];
1061 l0--;
1064 if (need_canon)
1065 len = canonize (val, len, prec);
1067 return len;
1070 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1071 unsigned int
1072 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1073 unsigned int op0len, const HOST_WIDE_INT *op1,
1074 unsigned int op1len, unsigned int prec)
1076 wide_int result;
1077 int l0 = op0len - 1;
1078 int l1 = op1len - 1;
1080 unsigned int len = MAX (op0len, op1len);
1081 if (l0 > l1)
1083 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1084 while (l0 > l1)
1086 val[l0] = op0[l0] ^ op1mask;
1087 l0--;
1091 if (l1 > l0)
1093 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1094 while (l1 > l0)
1096 val[l1] = op0mask ^ op1[l1];
1097 l1--;
1101 while (l0 >= 0)
1103 val[l0] = op0[l0] ^ op1[l0];
1104 l0--;
1107 return canonize (val, len, prec);
1111 * math
1114 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1115 whether the result overflows when OP0 and OP1 are treated as having
1116 signedness SGN. Return the number of blocks in VAL. */
1117 unsigned int
1118 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1119 unsigned int op0len, const HOST_WIDE_INT *op1,
1120 unsigned int op1len, unsigned int prec,
1121 signop sgn, bool *overflow)
1123 unsigned HOST_WIDE_INT o0 = 0;
1124 unsigned HOST_WIDE_INT o1 = 0;
1125 unsigned HOST_WIDE_INT x = 0;
1126 unsigned HOST_WIDE_INT carry = 0;
1127 unsigned HOST_WIDE_INT old_carry = 0;
1128 unsigned HOST_WIDE_INT mask0, mask1;
1129 unsigned int i;
1131 unsigned int len = MAX (op0len, op1len);
1132 mask0 = -top_bit_of (op0, op0len, prec);
1133 mask1 = -top_bit_of (op1, op1len, prec);
1134 /* Add all of the explicitly defined elements. */
1136 for (i = 0; i < len; i++)
1138 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1139 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1140 x = o0 + o1 + carry;
1141 val[i] = x;
1142 old_carry = carry;
1143 carry = carry == 0 ? x < o0 : x <= o0;
1146 if (len * HOST_BITS_PER_WIDE_INT < prec)
1148 val[len] = mask0 + mask1 + carry;
1149 len++;
1150 if (overflow)
1151 *overflow = false;
1153 else if (overflow)
1155 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1156 if (sgn == SIGNED)
1158 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1159 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1161 else
1163 /* Put the MSB of X and O0 and in the top of the HWI. */
1164 x <<= shift;
1165 o0 <<= shift;
1166 if (old_carry)
1167 *overflow = (x <= o0);
1168 else
1169 *overflow = (x < o0);
1173 return canonize (val, len, prec);
1176 /* Subroutines of the multiplication and division operations. Unpack
1177 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1178 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1179 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1180 static void
1181 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1182 unsigned int in_len, unsigned int out_len,
1183 unsigned int prec, signop sgn)
1185 unsigned int i;
1186 unsigned int j = 0;
1187 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1188 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1189 HOST_WIDE_INT mask;
1191 if (sgn == SIGNED)
1193 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1194 mask &= HALF_INT_MASK;
1196 else
1197 mask = 0;
1199 for (i = 0; i < blocks_needed - 1; i++)
1201 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1202 result[j++] = x;
1203 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1206 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1207 if (small_prec)
1209 if (sgn == SIGNED)
1210 x = sext_hwi (x, small_prec);
1211 else
1212 x = zext_hwi (x, small_prec);
1214 result[j++] = x;
1215 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1217 /* Smear the sign bit. */
1218 while (j < out_len)
1219 result[j++] = mask;
1222 /* The inverse of wi_unpack. IN_LEN is the the number of input
1223 blocks. The number of output blocks will be half this amount. */
1224 static void
1225 wi_pack (unsigned HOST_WIDE_INT *result,
1226 const unsigned HOST_HALF_WIDE_INT *input,
1227 unsigned int in_len)
1229 unsigned int i = 0;
1230 unsigned int j = 0;
1232 while (i + 2 < in_len)
1234 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1235 | ((unsigned HOST_WIDE_INT)input[i + 1]
1236 << HOST_BITS_PER_HALF_WIDE_INT);
1237 i += 2;
1240 /* Handle the case where in_len is odd. For this we zero extend. */
1241 if (in_len & 1)
1242 result[j++] = (unsigned HOST_WIDE_INT)input[i];
1243 else
1244 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1245 | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT);
1248 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1249 result is returned.
1251 If HIGH is not set, throw away the upper half after the check is
1252 made to see if it overflows. Unfortunately there is no better way
1253 to check for overflow than to do this. If OVERFLOW is nonnull,
1254 record in *OVERFLOW whether the result overflowed. SGN controls
1255 the signedness and is used to check overflow or if HIGH is set. */
1256 unsigned int
1257 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1258 unsigned int op1len, const HOST_WIDE_INT *op2val,
1259 unsigned int op2len, unsigned int prec, signop sgn,
1260 bool *overflow, bool high)
1262 unsigned HOST_WIDE_INT o0, o1, k, t;
1263 unsigned int i;
1264 unsigned int j;
1265 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1266 unsigned int half_blocks_needed = blocks_needed * 2;
1267 /* The sizes here are scaled to support a 2x largest mode by 2x
1268 largest mode yielding a 4x largest mode result. This is what is
1269 needed by vpn. */
1271 unsigned HOST_HALF_WIDE_INT
1272 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1273 unsigned HOST_HALF_WIDE_INT
1274 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1275 /* The '2' in 'R' is because we are internally doing a full
1276 multiply. */
1277 unsigned HOST_HALF_WIDE_INT
1278 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1279 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1281 /* If the top level routine did not really pass in an overflow, then
1282 just make sure that we never attempt to set it. */
1283 bool needs_overflow = (overflow != 0);
1284 if (needs_overflow)
1285 *overflow = false;
1287 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1288 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1290 /* This is a surprisingly common case, so do it first. */
1291 if (op1 == 0 || op2 == 0)
1293 val[0] = 0;
1294 return 1;
1297 #ifdef umul_ppmm
1298 if (sgn == UNSIGNED)
1300 /* If the inputs are single HWIs and the output has room for at
1301 least two HWIs, we can use umul_ppmm directly. */
1302 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1303 && wi::fits_uhwi_p (op1)
1304 && wi::fits_uhwi_p (op2))
1306 /* This case never overflows. */
1307 if (high)
1309 val[0] = 0;
1310 return 1;
1312 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1313 if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1315 val[2] = 0;
1316 return 3;
1318 return 1 + (val[1] != 0 || val[0] < 0);
1320 /* Likewise if the output is a full single HWI, except that the
1321 upper HWI of the result is only used for determining overflow.
1322 (We handle this case inline when overflow isn't needed.) */
1323 else if (prec == HOST_BITS_PER_WIDE_INT)
1325 unsigned HOST_WIDE_INT upper;
1326 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1327 if (needs_overflow)
1328 *overflow = (upper != 0);
1329 if (high)
1330 val[0] = upper;
1331 return 1;
1334 #endif
1336 /* Handle multiplications by 1. */
1337 if (op1 == 1)
1339 if (high)
1341 val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1342 return 1;
1344 for (i = 0; i < op2len; i++)
1345 val[i] = op2val[i];
1346 return op2len;
1348 if (op2 == 1)
1350 if (high)
1352 val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1353 return 1;
1355 for (i = 0; i < op1len; i++)
1356 val[i] = op1val[i];
1357 return op1len;
1360 /* If we need to check for overflow, we can only do half wide
1361 multiplies quickly because we need to look at the top bits to
1362 check for the overflow. */
1363 if ((high || needs_overflow)
1364 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1366 unsigned HOST_WIDE_INT r;
1368 if (sgn == SIGNED)
1370 o0 = op1.to_shwi ();
1371 o1 = op2.to_shwi ();
1373 else
1375 o0 = op1.to_uhwi ();
1376 o1 = op2.to_uhwi ();
1379 r = o0 * o1;
1380 if (needs_overflow)
1382 if (sgn == SIGNED)
1384 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1385 *overflow = true;
1387 else
1389 if ((r >> prec) != 0)
1390 *overflow = true;
1393 val[0] = high ? r >> prec : r;
1394 return 1;
1397 /* We do unsigned mul and then correct it. */
1398 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1399 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1401 /* The 2 is for a full mult. */
1402 memset (r, 0, half_blocks_needed * 2
1403 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1405 for (j = 0; j < half_blocks_needed; j++)
1407 k = 0;
1408 for (i = 0; i < half_blocks_needed; i++)
1410 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1411 + r[i + j] + k);
1412 r[i + j] = t & HALF_INT_MASK;
1413 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1415 r[j + half_blocks_needed] = k;
1418 /* We did unsigned math above. For signed we must adjust the
1419 product (assuming we need to see that). */
1420 if (sgn == SIGNED && (high || needs_overflow))
1422 unsigned HOST_WIDE_INT b;
1423 if (wi::neg_p (op1))
1425 b = 0;
1426 for (i = 0; i < half_blocks_needed; i++)
1428 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1429 - (unsigned HOST_WIDE_INT)v[i] - b;
1430 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1431 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1434 if (wi::neg_p (op2))
1436 b = 0;
1437 for (i = 0; i < half_blocks_needed; i++)
1439 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1440 - (unsigned HOST_WIDE_INT)u[i] - b;
1441 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1442 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1447 if (needs_overflow)
1449 HOST_WIDE_INT top;
1451 /* For unsigned, overflow is true if any of the top bits are set.
1452 For signed, overflow is true if any of the top bits are not equal
1453 to the sign bit. */
1454 if (sgn == UNSIGNED)
1455 top = 0;
1456 else
1458 top = r[(half_blocks_needed) - 1];
1459 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1460 top &= mask;
1463 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1464 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1465 *overflow = true;
1468 if (high)
1470 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1471 wi_pack ((unsigned HOST_WIDE_INT *) val,
1472 &r[half_blocks_needed], half_blocks_needed);
1473 return canonize (val, blocks_needed, prec);
1475 else
1477 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1478 wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed);
1479 return canonize (val, blocks_needed, prec);
1483 /* Compute the population count of X. */
1485 wi::popcount (const wide_int_ref &x)
1487 unsigned int i;
1488 int count;
1490 /* The high order block is special if it is the last block and the
1491 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1492 have to clear out any ones above the precision before doing
1493 popcount on this block. */
1494 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1495 unsigned int stop = x.len;
1496 if (count < 0)
1498 count = popcount_hwi (x.uhigh () << -count);
1499 stop -= 1;
1501 else
1503 if (x.sign_mask () >= 0)
1504 count = 0;
1507 for (i = 0; i < stop; ++i)
1508 count += popcount_hwi (x.val[i]);
1510 return count;
1513 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1514 whether the result overflows when OP0 and OP1 are treated as having
1515 signedness SGN. Return the number of blocks in VAL. */
1516 unsigned int
1517 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1518 unsigned int op0len, const HOST_WIDE_INT *op1,
1519 unsigned int op1len, unsigned int prec,
1520 signop sgn, bool *overflow)
1522 unsigned HOST_WIDE_INT o0 = 0;
1523 unsigned HOST_WIDE_INT o1 = 0;
1524 unsigned HOST_WIDE_INT x = 0;
1525 /* We implement subtraction as an in place negate and add. Negation
1526 is just inversion and add 1, so we can do the add of 1 by just
1527 starting the borrow in of the first element at 1. */
1528 unsigned HOST_WIDE_INT borrow = 0;
1529 unsigned HOST_WIDE_INT old_borrow = 0;
1531 unsigned HOST_WIDE_INT mask0, mask1;
1532 unsigned int i;
1534 unsigned int len = MAX (op0len, op1len);
1535 mask0 = -top_bit_of (op0, op0len, prec);
1536 mask1 = -top_bit_of (op1, op1len, prec);
1538 /* Subtract all of the explicitly defined elements. */
1539 for (i = 0; i < len; i++)
1541 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1542 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1543 x = o0 - o1 - borrow;
1544 val[i] = x;
1545 old_borrow = borrow;
1546 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1549 if (len * HOST_BITS_PER_WIDE_INT < prec)
1551 val[len] = mask0 - mask1 - borrow;
1552 len++;
1553 if (overflow)
1554 *overflow = false;
1556 else if (overflow)
1558 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1559 if (sgn == SIGNED)
1561 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1562 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1564 else
1566 /* Put the MSB of X and O0 and in the top of the HWI. */
1567 x <<= shift;
1568 o0 <<= shift;
1569 if (old_borrow)
1570 *overflow = (x >= o0);
1571 else
1572 *overflow = (x > o0);
1576 return canonize (val, len, prec);
1581 * Division and Mod
1584 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1585 algorithm is a small modification of the algorithm in Hacker's
1586 Delight by Warren, which itself is a small modification of Knuth's
1587 algorithm. M is the number of significant elements of U however
1588 there needs to be at least one extra element of B_DIVIDEND
1589 allocated, N is the number of elements of B_DIVISOR. */
1590 static void
1591 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1592 unsigned HOST_HALF_WIDE_INT *b_remainder,
1593 unsigned HOST_HALF_WIDE_INT *b_dividend,
1594 unsigned HOST_HALF_WIDE_INT *b_divisor,
1595 int m, int n)
1597 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1598 HOST_WIDE_INT and stored in the lower bits of each word. This
1599 algorithm should work properly on both 32 and 64 bit
1600 machines. */
1601 unsigned HOST_WIDE_INT b
1602 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1603 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1604 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1605 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1606 HOST_WIDE_INT t, k;
1607 int i, j, s;
1609 /* Single digit divisor. */
1610 if (n == 1)
1612 k = 0;
1613 for (j = m - 1; j >= 0; j--)
1615 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1616 k = ((k * b + b_dividend[j])
1617 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1618 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1620 b_remainder[0] = k;
1621 return;
1624 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1626 if (s)
1628 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1629 algorithm, we can overwrite b_dividend and b_divisor, so we do
1630 that. */
1631 for (i = n - 1; i > 0; i--)
1632 b_divisor[i] = (b_divisor[i] << s)
1633 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1634 b_divisor[0] = b_divisor[0] << s;
1636 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1637 for (i = m - 1; i > 0; i--)
1638 b_dividend[i] = (b_dividend[i] << s)
1639 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1640 b_dividend[0] = b_dividend[0] << s;
1643 /* Main loop. */
1644 for (j = m - n; j >= 0; j--)
1646 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1647 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1648 again:
1649 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1651 qhat -= 1;
1652 rhat += b_divisor[n-1];
1653 if (rhat < b)
1654 goto again;
1657 /* Multiply and subtract. */
1658 k = 0;
1659 for (i = 0; i < n; i++)
1661 p = qhat * b_divisor[i];
1662 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1663 b_dividend[i + j] = t;
1664 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1665 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1667 t = b_dividend[j+n] - k;
1668 b_dividend[j+n] = t;
1670 b_quotient[j] = qhat;
1671 if (t < 0)
1673 b_quotient[j] -= 1;
1674 k = 0;
1675 for (i = 0; i < n; i++)
1677 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1678 b_dividend[i+j] = t;
1679 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1681 b_dividend[j+n] += k;
1684 if (s)
1685 for (i = 0; i < n; i++)
1686 b_remainder[i] = (b_dividend[i] >> s)
1687 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1688 else
1689 for (i = 0; i < n; i++)
1690 b_remainder[i] = b_dividend[i];
1694 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1695 the result. If QUOTIENT is nonnull, store the value of the quotient
1696 there and return the number of blocks in it. The return value is
1697 not defined otherwise. If REMAINDER is nonnull, store the value
1698 of the remainder there and store the number of blocks in
1699 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1700 the division overflowed. */
1701 unsigned int
1702 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1703 HOST_WIDE_INT *remainder,
1704 const HOST_WIDE_INT *dividend_val,
1705 unsigned int dividend_len, unsigned int dividend_prec,
1706 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1707 unsigned int divisor_prec, signop sgn,
1708 bool *oflow)
1710 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1711 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1712 unsigned HOST_HALF_WIDE_INT
1713 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1714 unsigned HOST_HALF_WIDE_INT
1715 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1716 unsigned HOST_HALF_WIDE_INT
1717 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1718 unsigned HOST_HALF_WIDE_INT
1719 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1720 unsigned int m, n;
1721 bool dividend_neg = false;
1722 bool divisor_neg = false;
1723 bool overflow = false;
1724 wide_int neg_dividend, neg_divisor;
1726 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1727 dividend_prec);
1728 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1729 divisor_prec);
1730 if (divisor == 0)
1731 overflow = true;
1733 /* The smallest signed number / -1 causes overflow. The dividend_len
1734 check is for speed rather than correctness. */
1735 if (sgn == SIGNED
1736 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1737 && divisor == -1
1738 && wi::only_sign_bit_p (dividend))
1739 overflow = true;
1741 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1742 (signed min / -1) has the same representation as the orignal dividend.
1743 We have traditionally made division by zero act as division by one,
1744 so there too we use the original dividend. */
1745 if (overflow)
1747 if (remainder)
1749 *remainder_len = 1;
1750 remainder[0] = 0;
1752 if (oflow != 0)
1753 *oflow = true;
1754 if (quotient)
1755 for (unsigned int i = 0; i < dividend_len; ++i)
1756 quotient[i] = dividend_val[i];
1757 return dividend_len;
1760 if (oflow)
1761 *oflow = false;
1763 /* Do it on the host if you can. */
1764 if (sgn == SIGNED
1765 && wi::fits_shwi_p (dividend)
1766 && wi::fits_shwi_p (divisor))
1768 HOST_WIDE_INT o0 = dividend.to_shwi ();
1769 HOST_WIDE_INT o1 = divisor.to_shwi ();
1771 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1773 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1774 if (quotient)
1776 quotient[0] = HOST_WIDE_INT_MIN;
1777 quotient[1] = 0;
1779 if (remainder)
1781 remainder[0] = 0;
1782 *remainder_len = 1;
1784 return 2;
1786 else
1788 if (quotient)
1789 quotient[0] = o0 / o1;
1790 if (remainder)
1792 remainder[0] = o0 % o1;
1793 *remainder_len = 1;
1795 return 1;
1799 if (sgn == UNSIGNED
1800 && wi::fits_uhwi_p (dividend)
1801 && wi::fits_uhwi_p (divisor))
1803 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1804 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1806 if (quotient)
1807 quotient[0] = o0 / o1;
1808 if (remainder)
1810 remainder[0] = o0 % o1;
1811 *remainder_len = 1;
1813 return 1;
1816 /* Make the divisor and dividend positive and remember what we
1817 did. */
1818 if (sgn == SIGNED)
1820 if (wi::neg_p (dividend))
1822 neg_dividend = -dividend;
1823 dividend = neg_dividend;
1824 dividend_neg = true;
1826 if (wi::neg_p (divisor))
1828 neg_divisor = -divisor;
1829 divisor = neg_divisor;
1830 divisor_neg = true;
1834 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1835 dividend_blocks_needed, dividend_prec, sgn);
1836 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1837 divisor_blocks_needed, divisor_prec, sgn);
1839 m = dividend_blocks_needed;
1840 b_dividend[m] = 0;
1841 while (m > 1 && b_dividend[m - 1] == 0)
1842 m--;
1844 n = divisor_blocks_needed;
1845 while (n > 1 && b_divisor[n - 1] == 0)
1846 n--;
1848 memset (b_quotient, 0, sizeof (b_quotient));
1850 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1852 unsigned int quotient_len = 0;
1853 if (quotient)
1855 wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m);
1856 quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec);
1857 /* The quotient is neg if exactly one of the divisor or dividend is
1858 neg. */
1859 if (dividend_neg != divisor_neg)
1860 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1861 quotient_len, dividend_prec,
1862 UNSIGNED, 0);
1865 if (remainder)
1867 wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n);
1868 *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec);
1869 /* The remainder is always the same sign as the dividend. */
1870 if (dividend_neg)
1871 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1872 *remainder_len, dividend_prec,
1873 UNSIGNED, 0);
1876 return quotient_len;
1880 * Shifting, rotating and extraction.
1883 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1884 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1885 unsigned int
1886 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1887 unsigned int xlen, unsigned int precision,
1888 unsigned int shift)
1890 /* Split the shift into a whole-block shift and a subblock shift. */
1891 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1892 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1894 /* The whole-block shift fills with zeros. */
1895 unsigned int len = BLOCKS_NEEDED (precision);
1896 for (unsigned int i = 0; i < skip; ++i)
1897 val[i] = 0;
1899 /* It's easier to handle the simple block case specially. */
1900 if (small_shift == 0)
1901 for (unsigned int i = skip; i < len; ++i)
1902 val[i] = safe_uhwi (xval, xlen, i - skip);
1903 else
1905 /* The first unfilled output block is a left shift of the first
1906 block in XVAL. The other output blocks contain bits from two
1907 consecutive input blocks. */
1908 unsigned HOST_WIDE_INT carry = 0;
1909 for (unsigned int i = skip; i < len; ++i)
1911 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1912 val[i] = (x << small_shift) | carry;
1913 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1916 return canonize (val, len, precision);
1919 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1920 number of blocks in VAL. The input has XPRECISION bits and the
1921 output has XPRECISION - SHIFT bits. */
1922 static unsigned int
1923 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1924 unsigned int xlen, unsigned int xprecision,
1925 unsigned int shift)
1927 /* Split the shift into a whole-block shift and a subblock shift. */
1928 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1929 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1931 /* Work out how many blocks are needed to store the significant bits
1932 (excluding the upper zeros or signs). */
1933 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1935 /* It's easier to handle the simple block case specially. */
1936 if (small_shift == 0)
1937 for (unsigned int i = 0; i < len; ++i)
1938 val[i] = safe_uhwi (xval, xlen, i + skip);
1939 else
1941 /* Each output block but the last is a combination of two input blocks.
1942 The last block is a right shift of the last block in XVAL. */
1943 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1944 for (unsigned int i = 0; i < len; ++i)
1946 val[i] = curr >> small_shift;
1947 curr = safe_uhwi (xval, xlen, i + skip + 1);
1948 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1951 return len;
1954 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1955 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1956 VAL has PRECISION bits. */
1957 unsigned int
1958 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1959 unsigned int xlen, unsigned int xprecision,
1960 unsigned int precision, unsigned int shift)
1962 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1964 /* The value we just created has precision XPRECISION - SHIFT.
1965 Zero-extend it to wider precisions. */
1966 if (precision > xprecision - shift)
1968 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1969 if (small_prec)
1970 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1971 else if (val[len - 1] < 0)
1973 /* Add a new block with a zero. */
1974 val[len++] = 0;
1975 return len;
1978 return canonize (val, len, precision);
1981 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1982 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1983 VAL has PRECISION bits. */
1984 unsigned int
1985 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1986 unsigned int xlen, unsigned int xprecision,
1987 unsigned int precision, unsigned int shift)
1989 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1991 /* The value we just created has precision XPRECISION - SHIFT.
1992 Sign-extend it to wider types. */
1993 if (precision > xprecision - shift)
1995 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1996 if (small_prec)
1997 val[len - 1] = sext_hwi (val[len - 1], small_prec);
1999 return canonize (val, len, precision);
2002 /* Return the number of leading (upper) zeros in X. */
2004 wi::clz (const wide_int_ref &x)
2006 /* Calculate how many bits there above the highest represented block. */
2007 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2009 unsigned HOST_WIDE_INT high = x.uhigh ();
2010 if (count < 0)
2011 /* The upper -COUNT bits of HIGH are not part of the value.
2012 Clear them out. */
2013 high = (high << -count) >> -count;
2014 else if (x.sign_mask () < 0)
2015 /* The upper bit is set, so there are no leading zeros. */
2016 return 0;
2018 /* We don't need to look below HIGH. Either HIGH is nonzero,
2019 or the top bit of the block below is nonzero; clz_hwi is
2020 HOST_BITS_PER_WIDE_INT in the latter case. */
2021 return count + clz_hwi (high);
2024 /* Return the number of redundant sign bits in X. (That is, the number
2025 of bits immediately below the sign bit that have the same value as
2026 the sign bit.) */
2028 wi::clrsb (const wide_int_ref &x)
2030 /* Calculate how many bits there above the highest represented block. */
2031 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2033 unsigned HOST_WIDE_INT high = x.uhigh ();
2034 unsigned HOST_WIDE_INT mask = -1;
2035 if (count < 0)
2037 /* The upper -COUNT bits of HIGH are not part of the value.
2038 Clear them from both MASK and HIGH. */
2039 mask >>= -count;
2040 high &= mask;
2043 /* If the top bit is 1, count the number of leading 1s. If the top
2044 bit is zero, count the number of leading zeros. */
2045 if (high > mask / 2)
2046 high ^= mask;
2048 /* There are no sign bits below the top block, so we don't need to look
2049 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2050 HIGH is 0. */
2051 return count + clz_hwi (high) - 1;
2054 /* Return the number of trailing (lower) zeros in X. */
2056 wi::ctz (const wide_int_ref &x)
2058 if (x.len == 1 && x.ulow () == 0)
2059 return x.precision;
2061 /* Having dealt with the zero case, there must be a block with a
2062 nonzero bit. We don't care about the bits above the first 1. */
2063 unsigned int i = 0;
2064 while (x.val[i] == 0)
2065 ++i;
2066 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2069 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2070 return -1. */
2072 wi::exact_log2 (const wide_int_ref &x)
2074 /* Reject cases where there are implicit -1 blocks above HIGH. */
2075 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2076 return -1;
2078 /* Set CRUX to the index of the entry that should be nonzero.
2079 If the top block is zero then the next lowest block (if any)
2080 must have the high bit set. */
2081 unsigned int crux = x.len - 1;
2082 if (crux > 0 && x.val[crux] == 0)
2083 crux -= 1;
2085 /* Check that all lower blocks are zero. */
2086 for (unsigned int i = 0; i < crux; ++i)
2087 if (x.val[i] != 0)
2088 return -1;
2090 /* Get a zero-extended form of block CRUX. */
2091 unsigned HOST_WIDE_INT hwi = x.val[crux];
2092 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2093 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2095 /* Now it's down to whether HWI is a power of 2. */
2096 int res = ::exact_log2 (hwi);
2097 if (res >= 0)
2098 res += crux * HOST_BITS_PER_WIDE_INT;
2099 return res;
2102 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2104 wi::floor_log2 (const wide_int_ref &x)
2106 return x.precision - 1 - clz (x);
2109 /* Return the index of the first (lowest) set bit in X, counting from 1.
2110 Return 0 if X is 0. */
2112 wi::ffs (const wide_int_ref &x)
2114 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2117 /* Return true if sign-extending X to have precision PRECISION would give
2118 the minimum signed value at that precision. */
2119 bool
2120 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2122 return ctz (x) + 1 == int (precision);
2125 /* Return true if X represents the minimum signed value. */
2126 bool
2127 wi::only_sign_bit_p (const wide_int_ref &x)
2129 return only_sign_bit_p (x, x.precision);
2133 * Private utilities.
2136 void gt_ggc_mx (widest_int *) { }
2137 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2138 void gt_pch_nx (widest_int *) { }
2140 template void wide_int::dump () const;
2141 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2142 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2143 template void offset_int::dump () const;
2144 template void widest_int::dump () const;