2015-06-30 Sandra Loosemore <sandra@codesourcery.com>
[official-gcc.git] / gcc / wide-int.cc
blob199206f41e05aa574eb2914972875cbe37171386
1 /* Operations with very long integers.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hwint.h"
26 #include "vec.h"
27 #include "alias.h"
28 #include "symtab.h"
29 #include "inchash.h"
30 #include "tree.h"
31 #include "dumpfile.h"
34 #define HOST_BITS_PER_HALF_WIDE_INT 32
35 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
36 # define HOST_HALF_WIDE_INT long
37 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
38 # define HOST_HALF_WIDE_INT int
39 #else
40 #error Please add support for HOST_HALF_WIDE_INT
41 #endif
43 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
44 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
45 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
46 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
47 typedef unsigned HOST_WIDE_INT UWtype;
48 typedef unsigned int UQItype __attribute__ ((mode (QI)));
49 typedef unsigned int USItype __attribute__ ((mode (SI)));
50 typedef unsigned int UDItype __attribute__ ((mode (DI)));
51 #if W_TYPE_SIZE == 32
52 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
53 #else
54 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
55 #endif
56 #include "longlong.h"
57 #endif
59 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
62 * Internal utilities.
65 /* Quantities to deal with values that hold half of a wide int. Used
66 in multiply and divide. */
67 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
69 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
70 #define BLOCKS_NEEDED(PREC) \
71 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
72 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
74 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
75 based on the top existing bit of VAL. */
77 static unsigned HOST_WIDE_INT
78 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
80 return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
83 /* Convert the integer in VAL to canonical form, returning its new length.
84 LEN is the number of blocks currently in VAL and PRECISION is the number
85 of bits in the integer it represents.
87 This function only changes the representation, not the value. */
88 static unsigned int
89 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
91 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
92 HOST_WIDE_INT top;
93 int i;
95 if (len > blocks_needed)
96 len = blocks_needed;
98 if (len == 1)
99 return len;
101 top = val[len - 1];
102 if (len * HOST_BITS_PER_WIDE_INT > precision)
103 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
104 if (top != 0 && top != (HOST_WIDE_INT)-1)
105 return len;
107 /* At this point we know that the top is either 0 or -1. Find the
108 first block that is not a copy of this. */
109 for (i = len - 2; i >= 0; i--)
111 HOST_WIDE_INT x = val[i];
112 if (x != top)
114 if (SIGN_MASK (x) == top)
115 return i + 1;
117 /* We need an extra block because the top bit block i does
118 not match the extension. */
119 return i + 2;
123 /* The number is 0 or -1. */
124 return 1;
128 * Conversion routines in and out of wide_int.
131 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
132 result for an integer with precision PRECISION. Return the length
133 of VAL (after any canonization. */
134 unsigned int
135 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
136 unsigned int xlen, unsigned int precision, bool need_canon)
138 for (unsigned i = 0; i < xlen; i++)
139 val[i] = xval[i];
140 return need_canon ? canonize (val, xlen, precision) : xlen;
143 /* Construct a wide int from a buffer of length LEN. BUFFER will be
144 read according to byte endianess and word endianess of the target.
145 Only the lower BUFFER_LEN bytes of the result are set; the remaining
146 high bytes are cleared. */
147 wide_int
148 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
150 unsigned int precision = buffer_len * BITS_PER_UNIT;
151 wide_int result = wide_int::create (precision);
152 unsigned int words = buffer_len / UNITS_PER_WORD;
154 /* We have to clear all the bits ourself, as we merely or in values
155 below. */
156 unsigned int len = BLOCKS_NEEDED (precision);
157 HOST_WIDE_INT *val = result.write_val ();
158 for (unsigned int i = 0; i < len; ++i)
159 val[i] = 0;
161 for (unsigned int byte = 0; byte < buffer_len; byte++)
163 unsigned int offset;
164 unsigned int index;
165 unsigned int bitpos = byte * BITS_PER_UNIT;
166 unsigned HOST_WIDE_INT value;
168 if (buffer_len > UNITS_PER_WORD)
170 unsigned int word = byte / UNITS_PER_WORD;
172 if (WORDS_BIG_ENDIAN)
173 word = (words - 1) - word;
175 offset = word * UNITS_PER_WORD;
177 if (BYTES_BIG_ENDIAN)
178 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
179 else
180 offset += byte % UNITS_PER_WORD;
182 else
183 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
185 value = (unsigned HOST_WIDE_INT) buffer[offset];
187 index = bitpos / HOST_BITS_PER_WIDE_INT;
188 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
191 result.set_len (canonize (val, len, precision));
193 return result;
196 /* Sets RESULT from X, the sign is taken according to SGN. */
197 void
198 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
200 int len = x.get_len ();
201 const HOST_WIDE_INT *v = x.get_val ();
202 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
204 if (wi::neg_p (x, sgn))
206 /* We use ones complement to avoid -x80..0 edge case that -
207 won't work on. */
208 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
209 for (int i = 0; i < len; i++)
210 t[i] = ~v[i];
211 if (excess > 0)
212 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
213 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
214 mpz_com (result, result);
216 else if (excess > 0)
218 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
219 for (int i = 0; i < len - 1; i++)
220 t[i] = v[i];
221 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
222 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
224 else
225 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
228 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
229 values of VAL will be wrapped; otherwise, they will be set to the
230 appropriate minimum or maximum TYPE bound. */
231 wide_int
232 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
234 size_t count, numb;
235 unsigned int prec = TYPE_PRECISION (type);
236 wide_int res = wide_int::create (prec);
238 if (!wrap)
240 mpz_t min, max;
242 mpz_init (min);
243 mpz_init (max);
244 get_type_static_bounds (type, min, max);
246 if (mpz_cmp (x, min) < 0)
247 mpz_set (x, min);
248 else if (mpz_cmp (x, max) > 0)
249 mpz_set (x, max);
251 mpz_clear (min);
252 mpz_clear (max);
255 /* Determine the number of unsigned HOST_WIDE_INTs that are required
256 for representing the value. The code to calculate count is
257 extracted from the GMP manual, section "Integer Import and Export":
258 http://gmplib.org/manual/Integer-Import-and-Export.html */
259 numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
260 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
261 HOST_WIDE_INT *val = res.write_val ();
262 /* Write directly to the wide_int storage if possible, otherwise leave
263 GMP to allocate the memory for us. It might be slightly more efficient
264 to use mpz_tdiv_r_2exp for the latter case, but the situation is
265 pathological and it seems safer to operate on the original mpz value
266 in all cases. */
267 void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
268 &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
269 if (count < 1)
271 val[0] = 0;
272 count = 1;
274 count = MIN (count, BLOCKS_NEEDED (prec));
275 if (valres != val)
277 memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
278 free (valres);
280 res.set_len (canonize (val, count, prec));
282 if (mpz_sgn (x) < 0)
283 res = -res;
285 return res;
289 * Largest and smallest values in a mode.
292 /* Return the largest SGNed number that is representable in PRECISION bits.
294 TODO: There is still code from the double_int era that trys to
295 make up for the fact that double int's could not represent the
296 min and max values of all types. This code should be removed
297 because the min and max values can always be represented in
298 wide_ints and int-csts. */
299 wide_int
300 wi::max_value (unsigned int precision, signop sgn)
302 gcc_checking_assert (precision != 0);
303 if (sgn == UNSIGNED)
304 /* The unsigned max is just all ones. */
305 return shwi (-1, precision);
306 else
307 /* The signed max is all ones except the top bit. This must be
308 explicitly represented. */
309 return mask (precision - 1, false, precision);
312 /* Return the largest SGNed number that is representable in PRECISION bits. */
313 wide_int
314 wi::min_value (unsigned int precision, signop sgn)
316 gcc_checking_assert (precision != 0);
317 if (sgn == UNSIGNED)
318 return uhwi (0, precision);
319 else
320 /* The signed min is all zeros except the top bit. This must be
321 explicitly represented. */
322 return wi::set_bit_in_zero (precision - 1, precision);
326 * Public utilities.
329 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
330 signedness SGN, to an integer that has PRECISION bits. Store the blocks
331 in VAL and return the number of blocks used.
333 This function can handle both extension (PRECISION > XPRECISION)
334 and truncation (PRECISION < XPRECISION). */
335 unsigned int
336 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
337 unsigned int xlen, unsigned int xprecision,
338 unsigned int precision, signop sgn)
340 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
341 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
342 for (unsigned i = 0; i < len; i++)
343 val[i] = xval[i];
345 if (precision > xprecision)
347 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
349 /* Expanding. */
350 if (sgn == UNSIGNED)
352 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
353 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
354 else if (val[len - 1] < 0)
356 while (len < BLOCKS_NEEDED (xprecision))
357 val[len++] = -1;
358 if (small_xprecision)
359 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
360 else
361 val[len++] = 0;
364 else
366 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
367 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
370 len = canonize (val, len, precision);
372 return len;
375 /* This function hides the fact that we cannot rely on the bits beyond
376 the precision. This issue comes up in the relational comparisions
377 where we do allow comparisons of values of different precisions. */
378 static inline HOST_WIDE_INT
379 selt (const HOST_WIDE_INT *a, unsigned int len,
380 unsigned int blocks_needed, unsigned int small_prec,
381 unsigned int index, signop sgn)
383 HOST_WIDE_INT val;
384 if (index < len)
385 val = a[index];
386 else if (index < blocks_needed || sgn == SIGNED)
387 /* Signed or within the precision. */
388 val = SIGN_MASK (a[len - 1]);
389 else
390 /* Unsigned extension beyond the precision. */
391 val = 0;
393 if (small_prec && index == blocks_needed - 1)
394 return (sgn == SIGNED
395 ? sext_hwi (val, small_prec)
396 : zext_hwi (val, small_prec));
397 else
398 return val;
401 /* Find the highest bit represented in a wide int. This will in
402 general have the same value as the sign bit. */
403 static inline HOST_WIDE_INT
404 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
406 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
407 unsigned HOST_WIDE_INT val = a[len - 1];
408 if (excess > 0)
409 val <<= excess;
410 return val >> (HOST_BITS_PER_WIDE_INT - 1);
414 * Comparisons, note that only equality is an operator. The other
415 * comparisons cannot be operators since they are inherently signed or
416 * unsigned and C++ has no such operators.
419 /* Return true if OP0 == OP1. */
420 bool
421 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
422 const HOST_WIDE_INT *op1, unsigned int op1len,
423 unsigned int prec)
425 int l0 = op0len - 1;
426 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
428 if (op0len != op1len)
429 return false;
431 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
433 /* It does not matter if we zext or sext here, we just have to
434 do both the same way. */
435 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
436 return false;
437 l0--;
440 while (l0 >= 0)
441 if (op0[l0] != op1[l0])
442 return false;
443 else
444 l0--;
446 return true;
449 /* Return true if OP0 < OP1 using signed comparisons. */
450 bool
451 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
452 unsigned int precision,
453 const HOST_WIDE_INT *op1, unsigned int op1len)
455 HOST_WIDE_INT s0, s1;
456 unsigned HOST_WIDE_INT u0, u1;
457 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
458 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
459 int l = MAX (op0len - 1, op1len - 1);
461 /* Only the top block is compared as signed. The rest are unsigned
462 comparisons. */
463 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
464 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
465 if (s0 < s1)
466 return true;
467 if (s0 > s1)
468 return false;
470 l--;
471 while (l >= 0)
473 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
474 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
476 if (u0 < u1)
477 return true;
478 if (u0 > u1)
479 return false;
480 l--;
483 return false;
486 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
487 signed compares. */
489 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
490 unsigned int precision,
491 const HOST_WIDE_INT *op1, unsigned int op1len)
493 HOST_WIDE_INT s0, s1;
494 unsigned HOST_WIDE_INT u0, u1;
495 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
496 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
497 int l = MAX (op0len - 1, op1len - 1);
499 /* Only the top block is compared as signed. The rest are unsigned
500 comparisons. */
501 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
502 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
503 if (s0 < s1)
504 return -1;
505 if (s0 > s1)
506 return 1;
508 l--;
509 while (l >= 0)
511 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
512 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
514 if (u0 < u1)
515 return -1;
516 if (u0 > u1)
517 return 1;
518 l--;
521 return 0;
524 /* Return true if OP0 < OP1 using unsigned comparisons. */
525 bool
526 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
527 unsigned int precision,
528 const HOST_WIDE_INT *op1, unsigned int op1len)
530 unsigned HOST_WIDE_INT x0;
531 unsigned HOST_WIDE_INT x1;
532 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
533 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
534 int l = MAX (op0len - 1, op1len - 1);
536 while (l >= 0)
538 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
539 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
540 if (x0 < x1)
541 return true;
542 if (x0 > x1)
543 return false;
544 l--;
547 return false;
550 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
551 unsigned compares. */
553 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
554 unsigned int precision,
555 const HOST_WIDE_INT *op1, unsigned int op1len)
557 unsigned HOST_WIDE_INT x0;
558 unsigned HOST_WIDE_INT x1;
559 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
560 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
561 int l = MAX (op0len - 1, op1len - 1);
563 while (l >= 0)
565 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
566 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
567 if (x0 < x1)
568 return -1;
569 if (x0 > x1)
570 return 1;
571 l--;
574 return 0;
578 * Extension.
581 /* Sign-extend the number represented by XVAL and XLEN into VAL,
582 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
583 and VAL have PRECISION bits. */
584 unsigned int
585 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
586 unsigned int xlen, unsigned int precision, unsigned int offset)
588 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
589 /* Extending beyond the precision is a no-op. If we have only stored
590 OFFSET bits or fewer, the rest are already signs. */
591 if (offset >= precision || len >= xlen)
593 for (unsigned i = 0; i < xlen; ++i)
594 val[i] = xval[i];
595 return xlen;
597 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
598 for (unsigned int i = 0; i < len; i++)
599 val[i] = xval[i];
600 if (suboffset > 0)
602 val[len] = sext_hwi (xval[len], suboffset);
603 len += 1;
605 return canonize (val, len, precision);
608 /* Zero-extend the number represented by XVAL and XLEN into VAL,
609 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
610 and VAL have PRECISION bits. */
611 unsigned int
612 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
613 unsigned int xlen, unsigned int precision, unsigned int offset)
615 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
616 /* Extending beyond the precision is a no-op. If we have only stored
617 OFFSET bits or fewer, and the upper stored bit is zero, then there
618 is nothing to do. */
619 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
621 for (unsigned i = 0; i < xlen; ++i)
622 val[i] = xval[i];
623 return xlen;
625 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
626 for (unsigned int i = 0; i < len; i++)
627 val[i] = i < xlen ? xval[i] : -1;
628 if (suboffset > 0)
629 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
630 else
631 val[len] = 0;
632 return canonize (val, len + 1, precision);
636 * Masking, inserting, shifting, rotating.
639 /* Insert WIDTH bits from Y into X starting at START. */
640 wide_int
641 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
642 unsigned int width)
644 wide_int result;
645 wide_int mask;
646 wide_int tmp;
648 unsigned int precision = x.get_precision ();
649 if (start >= precision)
650 return x;
652 gcc_checking_assert (precision >= width);
654 if (start + width >= precision)
655 width = precision - start;
657 mask = wi::shifted_mask (start, width, false, precision);
658 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
659 result = tmp & mask;
661 tmp = wi::bit_and_not (x, mask);
662 result = result | tmp;
664 return result;
667 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
668 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
669 bits. */
670 unsigned int
671 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
672 unsigned int xlen, unsigned int precision, unsigned int bit)
674 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
675 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
677 if (block + 1 >= xlen)
679 /* The operation either affects the last current block or needs
680 a new block. */
681 unsigned int len = block + 1;
682 for (unsigned int i = 0; i < len; i++)
683 val[i] = safe_uhwi (xval, xlen, i);
684 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
686 /* If the bit we just set is at the msb of the block, make sure
687 that any higher bits are zeros. */
688 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
689 val[len++] = 0;
690 return len;
692 else
694 for (unsigned int i = 0; i < xlen; i++)
695 val[i] = xval[i];
696 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
697 return canonize (val, xlen, precision);
701 /* bswap THIS. */
702 wide_int
703 wide_int_storage::bswap () const
705 wide_int result = wide_int::create (precision);
706 unsigned int i, s;
707 unsigned int len = BLOCKS_NEEDED (precision);
708 unsigned int xlen = get_len ();
709 const HOST_WIDE_INT *xval = get_val ();
710 HOST_WIDE_INT *val = result.write_val ();
712 /* This is not a well defined operation if the precision is not a
713 multiple of 8. */
714 gcc_assert ((precision & 0x7) == 0);
716 for (i = 0; i < len; i++)
717 val[i] = 0;
719 /* Only swap the bytes that are not the padding. */
720 for (s = 0; s < precision; s += 8)
722 unsigned int d = precision - s - 8;
723 unsigned HOST_WIDE_INT byte;
725 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
726 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
728 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
730 block = d / HOST_BITS_PER_WIDE_INT;
731 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
733 val[block] |= byte << offset;
736 result.set_len (canonize (val, len, precision));
737 return result;
740 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
741 above that up to PREC are zeros. The result is inverted if NEGATE
742 is true. Return the number of blocks in VAL. */
743 unsigned int
744 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
745 unsigned int prec)
747 if (width >= prec)
749 val[0] = negate ? 0 : -1;
750 return 1;
752 else if (width == 0)
754 val[0] = negate ? -1 : 0;
755 return 1;
758 unsigned int i = 0;
759 while (i < width / HOST_BITS_PER_WIDE_INT)
760 val[i++] = negate ? 0 : -1;
762 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
763 if (shift != 0)
765 HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
766 val[i++] = negate ? ~last : last;
768 else
769 val[i++] = negate ? -1 : 0;
771 return i;
774 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
775 bits are ones, and the bits above that up to PREC are zeros. The result
776 is inverted if NEGATE is true. Return the number of blocks in VAL. */
777 unsigned int
778 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
779 bool negate, unsigned int prec)
781 if (start >= prec || width == 0)
783 val[0] = negate ? -1 : 0;
784 return 1;
787 if (width > prec - start)
788 width = prec - start;
789 unsigned int end = start + width;
791 unsigned int i = 0;
792 while (i < start / HOST_BITS_PER_WIDE_INT)
793 val[i++] = negate ? -1 : 0;
795 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
796 if (shift)
798 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
799 shift += width;
800 if (shift < HOST_BITS_PER_WIDE_INT)
802 /* case 000111000 */
803 block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
804 val[i++] = negate ? ~block : block;
805 return i;
807 else
808 /* ...111000 */
809 val[i++] = negate ? block : ~block;
812 while (i < end / HOST_BITS_PER_WIDE_INT)
813 /* 1111111 */
814 val[i++] = negate ? 0 : -1;
816 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
817 if (shift != 0)
819 /* 000011111 */
820 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
821 val[i++] = negate ? ~block : block;
823 else if (end < prec)
824 val[i++] = negate ? -1 : 0;
826 return i;
830 * logical operations.
833 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
834 unsigned int
835 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
836 unsigned int op0len, const HOST_WIDE_INT *op1,
837 unsigned int op1len, unsigned int prec)
839 int l0 = op0len - 1;
840 int l1 = op1len - 1;
841 bool need_canon = true;
843 unsigned int len = MAX (op0len, op1len);
844 if (l0 > l1)
846 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
847 if (op1mask == 0)
849 l0 = l1;
850 len = l1 + 1;
852 else
854 need_canon = false;
855 while (l0 > l1)
857 val[l0] = op0[l0];
858 l0--;
862 else if (l1 > l0)
864 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
865 if (op0mask == 0)
866 len = l0 + 1;
867 else
869 need_canon = false;
870 while (l1 > l0)
872 val[l1] = op1[l1];
873 l1--;
878 while (l0 >= 0)
880 val[l0] = op0[l0] & op1[l0];
881 l0--;
884 if (need_canon)
885 len = canonize (val, len, prec);
887 return len;
890 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
891 unsigned int
892 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
893 unsigned int op0len, const HOST_WIDE_INT *op1,
894 unsigned int op1len, unsigned int prec)
896 wide_int result;
897 int l0 = op0len - 1;
898 int l1 = op1len - 1;
899 bool need_canon = true;
901 unsigned int len = MAX (op0len, op1len);
902 if (l0 > l1)
904 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
905 if (op1mask != 0)
907 l0 = l1;
908 len = l1 + 1;
910 else
912 need_canon = false;
913 while (l0 > l1)
915 val[l0] = op0[l0];
916 l0--;
920 else if (l1 > l0)
922 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
923 if (op0mask == 0)
924 len = l0 + 1;
925 else
927 need_canon = false;
928 while (l1 > l0)
930 val[l1] = ~op1[l1];
931 l1--;
936 while (l0 >= 0)
938 val[l0] = op0[l0] & ~op1[l0];
939 l0--;
942 if (need_canon)
943 len = canonize (val, len, prec);
945 return len;
948 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
949 unsigned int
950 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
951 unsigned int op0len, const HOST_WIDE_INT *op1,
952 unsigned int op1len, unsigned int prec)
954 wide_int result;
955 int l0 = op0len - 1;
956 int l1 = op1len - 1;
957 bool need_canon = true;
959 unsigned int len = MAX (op0len, op1len);
960 if (l0 > l1)
962 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
963 if (op1mask != 0)
965 l0 = l1;
966 len = l1 + 1;
968 else
970 need_canon = false;
971 while (l0 > l1)
973 val[l0] = op0[l0];
974 l0--;
978 else if (l1 > l0)
980 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
981 if (op0mask != 0)
982 len = l0 + 1;
983 else
985 need_canon = false;
986 while (l1 > l0)
988 val[l1] = op1[l1];
989 l1--;
994 while (l0 >= 0)
996 val[l0] = op0[l0] | op1[l0];
997 l0--;
1000 if (need_canon)
1001 len = canonize (val, len, prec);
1003 return len;
1006 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1007 unsigned int
1008 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1009 unsigned int op0len, const HOST_WIDE_INT *op1,
1010 unsigned int op1len, unsigned int prec)
1012 wide_int result;
1013 int l0 = op0len - 1;
1014 int l1 = op1len - 1;
1015 bool need_canon = true;
1017 unsigned int len = MAX (op0len, op1len);
1018 if (l0 > l1)
1020 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1021 if (op1mask == 0)
1023 l0 = l1;
1024 len = l1 + 1;
1026 else
1028 need_canon = false;
1029 while (l0 > l1)
1031 val[l0] = op0[l0];
1032 l0--;
1036 else if (l1 > l0)
1038 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1039 if (op0mask != 0)
1040 len = l0 + 1;
1041 else
1043 need_canon = false;
1044 while (l1 > l0)
1046 val[l1] = ~op1[l1];
1047 l1--;
1052 while (l0 >= 0)
1054 val[l0] = op0[l0] | ~op1[l0];
1055 l0--;
1058 if (need_canon)
1059 len = canonize (val, len, prec);
1061 return len;
1064 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1065 unsigned int
1066 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1067 unsigned int op0len, const HOST_WIDE_INT *op1,
1068 unsigned int op1len, unsigned int prec)
1070 wide_int result;
1071 int l0 = op0len - 1;
1072 int l1 = op1len - 1;
1074 unsigned int len = MAX (op0len, op1len);
1075 if (l0 > l1)
1077 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1078 while (l0 > l1)
1080 val[l0] = op0[l0] ^ op1mask;
1081 l0--;
1085 if (l1 > l0)
1087 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1088 while (l1 > l0)
1090 val[l1] = op0mask ^ op1[l1];
1091 l1--;
1095 while (l0 >= 0)
1097 val[l0] = op0[l0] ^ op1[l0];
1098 l0--;
1101 return canonize (val, len, prec);
1105 * math
1108 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1109 whether the result overflows when OP0 and OP1 are treated as having
1110 signedness SGN. Return the number of blocks in VAL. */
1111 unsigned int
1112 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1113 unsigned int op0len, const HOST_WIDE_INT *op1,
1114 unsigned int op1len, unsigned int prec,
1115 signop sgn, bool *overflow)
1117 unsigned HOST_WIDE_INT o0 = 0;
1118 unsigned HOST_WIDE_INT o1 = 0;
1119 unsigned HOST_WIDE_INT x = 0;
1120 unsigned HOST_WIDE_INT carry = 0;
1121 unsigned HOST_WIDE_INT old_carry = 0;
1122 unsigned HOST_WIDE_INT mask0, mask1;
1123 unsigned int i;
1125 unsigned int len = MAX (op0len, op1len);
1126 mask0 = -top_bit_of (op0, op0len, prec);
1127 mask1 = -top_bit_of (op1, op1len, prec);
1128 /* Add all of the explicitly defined elements. */
1130 for (i = 0; i < len; i++)
1132 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1133 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1134 x = o0 + o1 + carry;
1135 val[i] = x;
1136 old_carry = carry;
1137 carry = carry == 0 ? x < o0 : x <= o0;
1140 if (len * HOST_BITS_PER_WIDE_INT < prec)
1142 val[len] = mask0 + mask1 + carry;
1143 len++;
1144 if (overflow)
1145 *overflow = false;
1147 else if (overflow)
1149 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1150 if (sgn == SIGNED)
1152 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1153 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1155 else
1157 /* Put the MSB of X and O0 and in the top of the HWI. */
1158 x <<= shift;
1159 o0 <<= shift;
1160 if (old_carry)
1161 *overflow = (x <= o0);
1162 else
1163 *overflow = (x < o0);
1167 return canonize (val, len, prec);
1170 /* Subroutines of the multiplication and division operations. Unpack
1171 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1172 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1173 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1174 static void
1175 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1176 unsigned int in_len, unsigned int out_len,
1177 unsigned int prec, signop sgn)
1179 unsigned int i;
1180 unsigned int j = 0;
1181 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1182 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1183 HOST_WIDE_INT mask;
1185 if (sgn == SIGNED)
1187 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1188 mask &= HALF_INT_MASK;
1190 else
1191 mask = 0;
1193 for (i = 0; i < blocks_needed - 1; i++)
1195 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1196 result[j++] = x;
1197 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1200 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1201 if (small_prec)
1203 if (sgn == SIGNED)
1204 x = sext_hwi (x, small_prec);
1205 else
1206 x = zext_hwi (x, small_prec);
1208 result[j++] = x;
1209 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1211 /* Smear the sign bit. */
1212 while (j < out_len)
1213 result[j++] = mask;
1216 /* The inverse of wi_unpack. IN_LEN is the the number of input
1217 blocks. The number of output blocks will be half this amount. */
1218 static void
1219 wi_pack (unsigned HOST_WIDE_INT *result,
1220 const unsigned HOST_HALF_WIDE_INT *input,
1221 unsigned int in_len)
1223 unsigned int i = 0;
1224 unsigned int j = 0;
1226 while (i + 2 < in_len)
1228 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1229 | ((unsigned HOST_WIDE_INT)input[i + 1]
1230 << HOST_BITS_PER_HALF_WIDE_INT);
1231 i += 2;
1234 /* Handle the case where in_len is odd. For this we zero extend. */
1235 if (in_len & 1)
1236 result[j++] = (unsigned HOST_WIDE_INT)input[i];
1237 else
1238 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1239 | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT);
1242 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1243 result is returned.
1245 If HIGH is not set, throw away the upper half after the check is
1246 made to see if it overflows. Unfortunately there is no better way
1247 to check for overflow than to do this. If OVERFLOW is nonnull,
1248 record in *OVERFLOW whether the result overflowed. SGN controls
1249 the signedness and is used to check overflow or if HIGH is set. */
1250 unsigned int
1251 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1252 unsigned int op1len, const HOST_WIDE_INT *op2val,
1253 unsigned int op2len, unsigned int prec, signop sgn,
1254 bool *overflow, bool high)
1256 unsigned HOST_WIDE_INT o0, o1, k, t;
1257 unsigned int i;
1258 unsigned int j;
1259 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1260 unsigned int half_blocks_needed = blocks_needed * 2;
1261 /* The sizes here are scaled to support a 2x largest mode by 2x
1262 largest mode yielding a 4x largest mode result. This is what is
1263 needed by vpn. */
1265 unsigned HOST_HALF_WIDE_INT
1266 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1267 unsigned HOST_HALF_WIDE_INT
1268 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1269 /* The '2' in 'R' is because we are internally doing a full
1270 multiply. */
1271 unsigned HOST_HALF_WIDE_INT
1272 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1273 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1275 /* If the top level routine did not really pass in an overflow, then
1276 just make sure that we never attempt to set it. */
1277 bool needs_overflow = (overflow != 0);
1278 if (needs_overflow)
1279 *overflow = false;
1281 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1282 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1284 /* This is a surprisingly common case, so do it first. */
1285 if (op1 == 0 || op2 == 0)
1287 val[0] = 0;
1288 return 1;
1291 #ifdef umul_ppmm
1292 if (sgn == UNSIGNED)
1294 /* If the inputs are single HWIs and the output has room for at
1295 least two HWIs, we can use umul_ppmm directly. */
1296 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1297 && wi::fits_uhwi_p (op1)
1298 && wi::fits_uhwi_p (op2))
1300 /* This case never overflows. */
1301 if (high)
1303 val[0] = 0;
1304 return 1;
1306 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1307 if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1309 val[2] = 0;
1310 return 3;
1312 return 1 + (val[1] != 0 || val[0] < 0);
1314 /* Likewise if the output is a full single HWI, except that the
1315 upper HWI of the result is only used for determining overflow.
1316 (We handle this case inline when overflow isn't needed.) */
1317 else if (prec == HOST_BITS_PER_WIDE_INT)
1319 unsigned HOST_WIDE_INT upper;
1320 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1321 if (needs_overflow)
1322 *overflow = (upper != 0);
1323 if (high)
1324 val[0] = upper;
1325 return 1;
1328 #endif
1330 /* Handle multiplications by 1. */
1331 if (op1 == 1)
1333 if (high)
1335 val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1336 return 1;
1338 for (i = 0; i < op2len; i++)
1339 val[i] = op2val[i];
1340 return op2len;
1342 if (op2 == 1)
1344 if (high)
1346 val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1347 return 1;
1349 for (i = 0; i < op1len; i++)
1350 val[i] = op1val[i];
1351 return op1len;
1354 /* If we need to check for overflow, we can only do half wide
1355 multiplies quickly because we need to look at the top bits to
1356 check for the overflow. */
1357 if ((high || needs_overflow)
1358 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1360 unsigned HOST_WIDE_INT r;
1362 if (sgn == SIGNED)
1364 o0 = op1.to_shwi ();
1365 o1 = op2.to_shwi ();
1367 else
1369 o0 = op1.to_uhwi ();
1370 o1 = op2.to_uhwi ();
1373 r = o0 * o1;
1374 if (needs_overflow)
1376 if (sgn == SIGNED)
1378 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1379 *overflow = true;
1381 else
1383 if ((r >> prec) != 0)
1384 *overflow = true;
1387 val[0] = high ? r >> prec : r;
1388 return 1;
1391 /* We do unsigned mul and then correct it. */
1392 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1393 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1395 /* The 2 is for a full mult. */
1396 memset (r, 0, half_blocks_needed * 2
1397 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1399 for (j = 0; j < half_blocks_needed; j++)
1401 k = 0;
1402 for (i = 0; i < half_blocks_needed; i++)
1404 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1405 + r[i + j] + k);
1406 r[i + j] = t & HALF_INT_MASK;
1407 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1409 r[j + half_blocks_needed] = k;
1412 /* We did unsigned math above. For signed we must adjust the
1413 product (assuming we need to see that). */
1414 if (sgn == SIGNED && (high || needs_overflow))
1416 unsigned HOST_WIDE_INT b;
1417 if (wi::neg_p (op1))
1419 b = 0;
1420 for (i = 0; i < half_blocks_needed; i++)
1422 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1423 - (unsigned HOST_WIDE_INT)v[i] - b;
1424 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1425 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1428 if (wi::neg_p (op2))
1430 b = 0;
1431 for (i = 0; i < half_blocks_needed; i++)
1433 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1434 - (unsigned HOST_WIDE_INT)u[i] - b;
1435 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1436 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1441 if (needs_overflow)
1443 HOST_WIDE_INT top;
1445 /* For unsigned, overflow is true if any of the top bits are set.
1446 For signed, overflow is true if any of the top bits are not equal
1447 to the sign bit. */
1448 if (sgn == UNSIGNED)
1449 top = 0;
1450 else
1452 top = r[(half_blocks_needed) - 1];
1453 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1454 top &= mask;
1457 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1458 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1459 *overflow = true;
1462 if (high)
1464 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1465 wi_pack ((unsigned HOST_WIDE_INT *) val,
1466 &r[half_blocks_needed], half_blocks_needed);
1467 return canonize (val, blocks_needed, prec);
1469 else
1471 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1472 wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed);
1473 return canonize (val, blocks_needed, prec);
1477 /* Compute the population count of X. */
1479 wi::popcount (const wide_int_ref &x)
1481 unsigned int i;
1482 int count;
1484 /* The high order block is special if it is the last block and the
1485 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1486 have to clear out any ones above the precision before doing
1487 popcount on this block. */
1488 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1489 unsigned int stop = x.len;
1490 if (count < 0)
1492 count = popcount_hwi (x.uhigh () << -count);
1493 stop -= 1;
1495 else
1497 if (x.sign_mask () >= 0)
1498 count = 0;
1501 for (i = 0; i < stop; ++i)
1502 count += popcount_hwi (x.val[i]);
1504 return count;
1507 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1508 whether the result overflows when OP0 and OP1 are treated as having
1509 signedness SGN. Return the number of blocks in VAL. */
1510 unsigned int
1511 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1512 unsigned int op0len, const HOST_WIDE_INT *op1,
1513 unsigned int op1len, unsigned int prec,
1514 signop sgn, bool *overflow)
1516 unsigned HOST_WIDE_INT o0 = 0;
1517 unsigned HOST_WIDE_INT o1 = 0;
1518 unsigned HOST_WIDE_INT x = 0;
1519 /* We implement subtraction as an in place negate and add. Negation
1520 is just inversion and add 1, so we can do the add of 1 by just
1521 starting the borrow in of the first element at 1. */
1522 unsigned HOST_WIDE_INT borrow = 0;
1523 unsigned HOST_WIDE_INT old_borrow = 0;
1525 unsigned HOST_WIDE_INT mask0, mask1;
1526 unsigned int i;
1528 unsigned int len = MAX (op0len, op1len);
1529 mask0 = -top_bit_of (op0, op0len, prec);
1530 mask1 = -top_bit_of (op1, op1len, prec);
1532 /* Subtract all of the explicitly defined elements. */
1533 for (i = 0; i < len; i++)
1535 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1536 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1537 x = o0 - o1 - borrow;
1538 val[i] = x;
1539 old_borrow = borrow;
1540 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1543 if (len * HOST_BITS_PER_WIDE_INT < prec)
1545 val[len] = mask0 - mask1 - borrow;
1546 len++;
1547 if (overflow)
1548 *overflow = false;
1550 else if (overflow)
1552 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1553 if (sgn == SIGNED)
1555 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1556 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1558 else
1560 /* Put the MSB of X and O0 and in the top of the HWI. */
1561 x <<= shift;
1562 o0 <<= shift;
1563 if (old_borrow)
1564 *overflow = (x >= o0);
1565 else
1566 *overflow = (x > o0);
1570 return canonize (val, len, prec);
1575 * Division and Mod
1578 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1579 algorithm is a small modification of the algorithm in Hacker's
1580 Delight by Warren, which itself is a small modification of Knuth's
1581 algorithm. M is the number of significant elements of U however
1582 there needs to be at least one extra element of B_DIVIDEND
1583 allocated, N is the number of elements of B_DIVISOR. */
1584 static void
1585 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1586 unsigned HOST_HALF_WIDE_INT *b_remainder,
1587 unsigned HOST_HALF_WIDE_INT *b_dividend,
1588 unsigned HOST_HALF_WIDE_INT *b_divisor,
1589 int m, int n)
1591 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1592 HOST_WIDE_INT and stored in the lower bits of each word. This
1593 algorithm should work properly on both 32 and 64 bit
1594 machines. */
1595 unsigned HOST_WIDE_INT b
1596 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1597 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1598 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1599 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1600 HOST_WIDE_INT t, k;
1601 int i, j, s;
1603 /* Single digit divisor. */
1604 if (n == 1)
1606 k = 0;
1607 for (j = m - 1; j >= 0; j--)
1609 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1610 k = ((k * b + b_dividend[j])
1611 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1612 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1614 b_remainder[0] = k;
1615 return;
1618 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1620 if (s)
1622 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1623 algorithm, we can overwrite b_dividend and b_divisor, so we do
1624 that. */
1625 for (i = n - 1; i > 0; i--)
1626 b_divisor[i] = (b_divisor[i] << s)
1627 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1628 b_divisor[0] = b_divisor[0] << s;
1630 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1631 for (i = m - 1; i > 0; i--)
1632 b_dividend[i] = (b_dividend[i] << s)
1633 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1634 b_dividend[0] = b_dividend[0] << s;
1637 /* Main loop. */
1638 for (j = m - n; j >= 0; j--)
1640 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1641 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1642 again:
1643 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1645 qhat -= 1;
1646 rhat += b_divisor[n-1];
1647 if (rhat < b)
1648 goto again;
1651 /* Multiply and subtract. */
1652 k = 0;
1653 for (i = 0; i < n; i++)
1655 p = qhat * b_divisor[i];
1656 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1657 b_dividend[i + j] = t;
1658 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1659 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1661 t = b_dividend[j+n] - k;
1662 b_dividend[j+n] = t;
1664 b_quotient[j] = qhat;
1665 if (t < 0)
1667 b_quotient[j] -= 1;
1668 k = 0;
1669 for (i = 0; i < n; i++)
1671 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1672 b_dividend[i+j] = t;
1673 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1675 b_dividend[j+n] += k;
1678 if (s)
1679 for (i = 0; i < n; i++)
1680 b_remainder[i] = (b_dividend[i] >> s)
1681 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1682 else
1683 for (i = 0; i < n; i++)
1684 b_remainder[i] = b_dividend[i];
1688 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1689 the result. If QUOTIENT is nonnull, store the value of the quotient
1690 there and return the number of blocks in it. The return value is
1691 not defined otherwise. If REMAINDER is nonnull, store the value
1692 of the remainder there and store the number of blocks in
1693 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1694 the division overflowed. */
1695 unsigned int
1696 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1697 HOST_WIDE_INT *remainder,
1698 const HOST_WIDE_INT *dividend_val,
1699 unsigned int dividend_len, unsigned int dividend_prec,
1700 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1701 unsigned int divisor_prec, signop sgn,
1702 bool *oflow)
1704 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1705 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1706 unsigned HOST_HALF_WIDE_INT
1707 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1708 unsigned HOST_HALF_WIDE_INT
1709 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1710 unsigned HOST_HALF_WIDE_INT
1711 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1712 unsigned HOST_HALF_WIDE_INT
1713 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1714 unsigned int m, n;
1715 bool dividend_neg = false;
1716 bool divisor_neg = false;
1717 bool overflow = false;
1718 wide_int neg_dividend, neg_divisor;
1720 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1721 dividend_prec);
1722 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1723 divisor_prec);
1724 if (divisor == 0)
1725 overflow = true;
1727 /* The smallest signed number / -1 causes overflow. The dividend_len
1728 check is for speed rather than correctness. */
1729 if (sgn == SIGNED
1730 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1731 && divisor == -1
1732 && wi::only_sign_bit_p (dividend))
1733 overflow = true;
1735 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1736 (signed min / -1) has the same representation as the orignal dividend.
1737 We have traditionally made division by zero act as division by one,
1738 so there too we use the original dividend. */
1739 if (overflow)
1741 if (remainder)
1743 *remainder_len = 1;
1744 remainder[0] = 0;
1746 if (oflow != 0)
1747 *oflow = true;
1748 if (quotient)
1749 for (unsigned int i = 0; i < dividend_len; ++i)
1750 quotient[i] = dividend_val[i];
1751 return dividend_len;
1754 if (oflow)
1755 *oflow = false;
1757 /* Do it on the host if you can. */
1758 if (sgn == SIGNED
1759 && wi::fits_shwi_p (dividend)
1760 && wi::fits_shwi_p (divisor))
1762 HOST_WIDE_INT o0 = dividend.to_shwi ();
1763 HOST_WIDE_INT o1 = divisor.to_shwi ();
1765 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1767 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1768 if (quotient)
1770 quotient[0] = HOST_WIDE_INT_MIN;
1771 quotient[1] = 0;
1773 if (remainder)
1775 remainder[0] = 0;
1776 *remainder_len = 1;
1778 return 2;
1780 else
1782 if (quotient)
1783 quotient[0] = o0 / o1;
1784 if (remainder)
1786 remainder[0] = o0 % o1;
1787 *remainder_len = 1;
1789 return 1;
1793 if (sgn == UNSIGNED
1794 && wi::fits_uhwi_p (dividend)
1795 && wi::fits_uhwi_p (divisor))
1797 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1798 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1800 if (quotient)
1801 quotient[0] = o0 / o1;
1802 if (remainder)
1804 remainder[0] = o0 % o1;
1805 *remainder_len = 1;
1807 return 1;
1810 /* Make the divisor and dividend positive and remember what we
1811 did. */
1812 if (sgn == SIGNED)
1814 if (wi::neg_p (dividend))
1816 neg_dividend = -dividend;
1817 dividend = neg_dividend;
1818 dividend_neg = true;
1820 if (wi::neg_p (divisor))
1822 neg_divisor = -divisor;
1823 divisor = neg_divisor;
1824 divisor_neg = true;
1828 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1829 dividend_blocks_needed, dividend_prec, sgn);
1830 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1831 divisor_blocks_needed, divisor_prec, sgn);
1833 m = dividend_blocks_needed;
1834 b_dividend[m] = 0;
1835 while (m > 1 && b_dividend[m - 1] == 0)
1836 m--;
1838 n = divisor_blocks_needed;
1839 while (n > 1 && b_divisor[n - 1] == 0)
1840 n--;
1842 memset (b_quotient, 0, sizeof (b_quotient));
1844 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1846 unsigned int quotient_len = 0;
1847 if (quotient)
1849 wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m);
1850 quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec);
1851 /* The quotient is neg if exactly one of the divisor or dividend is
1852 neg. */
1853 if (dividend_neg != divisor_neg)
1854 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1855 quotient_len, dividend_prec,
1856 UNSIGNED, 0);
1859 if (remainder)
1861 wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n);
1862 *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec);
1863 /* The remainder is always the same sign as the dividend. */
1864 if (dividend_neg)
1865 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1866 *remainder_len, dividend_prec,
1867 UNSIGNED, 0);
1870 return quotient_len;
1874 * Shifting, rotating and extraction.
1877 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1878 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1879 unsigned int
1880 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1881 unsigned int xlen, unsigned int precision,
1882 unsigned int shift)
1884 /* Split the shift into a whole-block shift and a subblock shift. */
1885 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1886 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1888 /* The whole-block shift fills with zeros. */
1889 unsigned int len = BLOCKS_NEEDED (precision);
1890 for (unsigned int i = 0; i < skip; ++i)
1891 val[i] = 0;
1893 /* It's easier to handle the simple block case specially. */
1894 if (small_shift == 0)
1895 for (unsigned int i = skip; i < len; ++i)
1896 val[i] = safe_uhwi (xval, xlen, i - skip);
1897 else
1899 /* The first unfilled output block is a left shift of the first
1900 block in XVAL. The other output blocks contain bits from two
1901 consecutive input blocks. */
1902 unsigned HOST_WIDE_INT carry = 0;
1903 for (unsigned int i = skip; i < len; ++i)
1905 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1906 val[i] = (x << small_shift) | carry;
1907 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1910 return canonize (val, len, precision);
1913 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1914 number of blocks in VAL. The input has XPRECISION bits and the
1915 output has XPRECISION - SHIFT bits. */
1916 static unsigned int
1917 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1918 unsigned int xlen, unsigned int xprecision,
1919 unsigned int shift)
1921 /* Split the shift into a whole-block shift and a subblock shift. */
1922 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1923 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1925 /* Work out how many blocks are needed to store the significant bits
1926 (excluding the upper zeros or signs). */
1927 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1929 /* It's easier to handle the simple block case specially. */
1930 if (small_shift == 0)
1931 for (unsigned int i = 0; i < len; ++i)
1932 val[i] = safe_uhwi (xval, xlen, i + skip);
1933 else
1935 /* Each output block but the last is a combination of two input blocks.
1936 The last block is a right shift of the last block in XVAL. */
1937 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1938 for (unsigned int i = 0; i < len; ++i)
1940 val[i] = curr >> small_shift;
1941 curr = safe_uhwi (xval, xlen, i + skip + 1);
1942 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1945 return len;
1948 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1949 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1950 VAL has PRECISION bits. */
1951 unsigned int
1952 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1953 unsigned int xlen, unsigned int xprecision,
1954 unsigned int precision, unsigned int shift)
1956 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1958 /* The value we just created has precision XPRECISION - SHIFT.
1959 Zero-extend it to wider precisions. */
1960 if (precision > xprecision - shift)
1962 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1963 if (small_prec)
1964 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1965 else if (val[len - 1] < 0)
1967 /* Add a new block with a zero. */
1968 val[len++] = 0;
1969 return len;
1972 return canonize (val, len, precision);
1975 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1976 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1977 VAL has PRECISION bits. */
1978 unsigned int
1979 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1980 unsigned int xlen, unsigned int xprecision,
1981 unsigned int precision, unsigned int shift)
1983 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1985 /* The value we just created has precision XPRECISION - SHIFT.
1986 Sign-extend it to wider types. */
1987 if (precision > xprecision - shift)
1989 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1990 if (small_prec)
1991 val[len - 1] = sext_hwi (val[len - 1], small_prec);
1993 return canonize (val, len, precision);
1996 /* Return the number of leading (upper) zeros in X. */
1998 wi::clz (const wide_int_ref &x)
2000 /* Calculate how many bits there above the highest represented block. */
2001 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2003 unsigned HOST_WIDE_INT high = x.uhigh ();
2004 if (count < 0)
2005 /* The upper -COUNT bits of HIGH are not part of the value.
2006 Clear them out. */
2007 high = (high << -count) >> -count;
2008 else if (x.sign_mask () < 0)
2009 /* The upper bit is set, so there are no leading zeros. */
2010 return 0;
2012 /* We don't need to look below HIGH. Either HIGH is nonzero,
2013 or the top bit of the block below is nonzero; clz_hwi is
2014 HOST_BITS_PER_WIDE_INT in the latter case. */
2015 return count + clz_hwi (high);
2018 /* Return the number of redundant sign bits in X. (That is, the number
2019 of bits immediately below the sign bit that have the same value as
2020 the sign bit.) */
2022 wi::clrsb (const wide_int_ref &x)
2024 /* Calculate how many bits there above the highest represented block. */
2025 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2027 unsigned HOST_WIDE_INT high = x.uhigh ();
2028 unsigned HOST_WIDE_INT mask = -1;
2029 if (count < 0)
2031 /* The upper -COUNT bits of HIGH are not part of the value.
2032 Clear them from both MASK and HIGH. */
2033 mask >>= -count;
2034 high &= mask;
2037 /* If the top bit is 1, count the number of leading 1s. If the top
2038 bit is zero, count the number of leading zeros. */
2039 if (high > mask / 2)
2040 high ^= mask;
2042 /* There are no sign bits below the top block, so we don't need to look
2043 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2044 HIGH is 0. */
2045 return count + clz_hwi (high) - 1;
2048 /* Return the number of trailing (lower) zeros in X. */
2050 wi::ctz (const wide_int_ref &x)
2052 if (x.len == 1 && x.ulow () == 0)
2053 return x.precision;
2055 /* Having dealt with the zero case, there must be a block with a
2056 nonzero bit. We don't care about the bits above the first 1. */
2057 unsigned int i = 0;
2058 while (x.val[i] == 0)
2059 ++i;
2060 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2063 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2064 return -1. */
2066 wi::exact_log2 (const wide_int_ref &x)
2068 /* Reject cases where there are implicit -1 blocks above HIGH. */
2069 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2070 return -1;
2072 /* Set CRUX to the index of the entry that should be nonzero.
2073 If the top block is zero then the next lowest block (if any)
2074 must have the high bit set. */
2075 unsigned int crux = x.len - 1;
2076 if (crux > 0 && x.val[crux] == 0)
2077 crux -= 1;
2079 /* Check that all lower blocks are zero. */
2080 for (unsigned int i = 0; i < crux; ++i)
2081 if (x.val[i] != 0)
2082 return -1;
2084 /* Get a zero-extended form of block CRUX. */
2085 unsigned HOST_WIDE_INT hwi = x.val[crux];
2086 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2087 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2089 /* Now it's down to whether HWI is a power of 2. */
2090 int res = ::exact_log2 (hwi);
2091 if (res >= 0)
2092 res += crux * HOST_BITS_PER_WIDE_INT;
2093 return res;
2096 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2098 wi::floor_log2 (const wide_int_ref &x)
2100 return x.precision - 1 - clz (x);
2103 /* Return the index of the first (lowest) set bit in X, counting from 1.
2104 Return 0 if X is 0. */
2106 wi::ffs (const wide_int_ref &x)
2108 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2111 /* Return true if sign-extending X to have precision PRECISION would give
2112 the minimum signed value at that precision. */
2113 bool
2114 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2116 return ctz (x) + 1 == int (precision);
2119 /* Return true if X represents the minimum signed value. */
2120 bool
2121 wi::only_sign_bit_p (const wide_int_ref &x)
2123 return only_sign_bit_p (x, x.precision);
2127 * Private utilities.
2130 void gt_ggc_mx (widest_int *) { }
2131 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2132 void gt_pch_nx (widest_int *) { }
2134 template void wide_int::dump () const;
2135 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2136 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2137 template void offset_int::dump () const;
2138 template void widest_int::dump () const;