* gcc.dg/vmx/unpack.c: Use dg-additional-options rather than
[official-gcc.git] / gcc / wide-int.cc
blob13ba10c866f5d599e42f200a989b43965fd352e2
1 /* Operations with very long integers.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hwint.h"
26 #include "vec.h"
27 #include "alias.h"
28 #include "tree.h"
29 #include "inchash.h"
30 #include "dumpfile.h"
33 #define HOST_BITS_PER_HALF_WIDE_INT 32
34 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
35 # define HOST_HALF_WIDE_INT long
36 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
37 # define HOST_HALF_WIDE_INT int
38 #else
39 #error Please add support for HOST_HALF_WIDE_INT
40 #endif
42 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
43 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
44 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
45 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
46 typedef unsigned HOST_WIDE_INT UWtype;
47 typedef unsigned int UQItype __attribute__ ((mode (QI)));
48 typedef unsigned int USItype __attribute__ ((mode (SI)));
49 typedef unsigned int UDItype __attribute__ ((mode (DI)));
50 #if W_TYPE_SIZE == 32
51 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
52 #else
53 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
54 #endif
55 #include "longlong.h"
56 #endif
58 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
61 * Internal utilities.
64 /* Quantities to deal with values that hold half of a wide int. Used
65 in multiply and divide. */
66 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
68 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
69 #define BLOCKS_NEEDED(PREC) \
70 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
71 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
73 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
74 based on the top existing bit of VAL. */
76 static unsigned HOST_WIDE_INT
77 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
79 return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
82 /* Convert the integer in VAL to canonical form, returning its new length.
83 LEN is the number of blocks currently in VAL and PRECISION is the number
84 of bits in the integer it represents.
86 This function only changes the representation, not the value. */
87 static unsigned int
88 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
90 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
91 HOST_WIDE_INT top;
92 int i;
94 if (len > blocks_needed)
95 len = blocks_needed;
97 if (len == 1)
98 return len;
100 top = val[len - 1];
101 if (len * HOST_BITS_PER_WIDE_INT > precision)
102 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
103 if (top != 0 && top != (HOST_WIDE_INT)-1)
104 return len;
106 /* At this point we know that the top is either 0 or -1. Find the
107 first block that is not a copy of this. */
108 for (i = len - 2; i >= 0; i--)
110 HOST_WIDE_INT x = val[i];
111 if (x != top)
113 if (SIGN_MASK (x) == top)
114 return i + 1;
116 /* We need an extra block because the top bit block i does
117 not match the extension. */
118 return i + 2;
122 /* The number is 0 or -1. */
123 return 1;
127 * Conversion routines in and out of wide_int.
130 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
131 result for an integer with precision PRECISION. Return the length
132 of VAL (after any canonization. */
133 unsigned int
134 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
135 unsigned int xlen, unsigned int precision, bool need_canon)
137 for (unsigned i = 0; i < xlen; i++)
138 val[i] = xval[i];
139 return need_canon ? canonize (val, xlen, precision) : xlen;
142 /* Construct a wide int from a buffer of length LEN. BUFFER will be
143 read according to byte endianess and word endianess of the target.
144 Only the lower BUFFER_LEN bytes of the result are set; the remaining
145 high bytes are cleared. */
146 wide_int
147 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
149 unsigned int precision = buffer_len * BITS_PER_UNIT;
150 wide_int result = wide_int::create (precision);
151 unsigned int words = buffer_len / UNITS_PER_WORD;
153 /* We have to clear all the bits ourself, as we merely or in values
154 below. */
155 unsigned int len = BLOCKS_NEEDED (precision);
156 HOST_WIDE_INT *val = result.write_val ();
157 for (unsigned int i = 0; i < len; ++i)
158 val[i] = 0;
160 for (unsigned int byte = 0; byte < buffer_len; byte++)
162 unsigned int offset;
163 unsigned int index;
164 unsigned int bitpos = byte * BITS_PER_UNIT;
165 unsigned HOST_WIDE_INT value;
167 if (buffer_len > UNITS_PER_WORD)
169 unsigned int word = byte / UNITS_PER_WORD;
171 if (WORDS_BIG_ENDIAN)
172 word = (words - 1) - word;
174 offset = word * UNITS_PER_WORD;
176 if (BYTES_BIG_ENDIAN)
177 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
178 else
179 offset += byte % UNITS_PER_WORD;
181 else
182 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
184 value = (unsigned HOST_WIDE_INT) buffer[offset];
186 index = bitpos / HOST_BITS_PER_WIDE_INT;
187 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
190 result.set_len (canonize (val, len, precision));
192 return result;
195 /* Sets RESULT from X, the sign is taken according to SGN. */
196 void
197 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
199 int len = x.get_len ();
200 const HOST_WIDE_INT *v = x.get_val ();
201 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
203 if (wi::neg_p (x, sgn))
205 /* We use ones complement to avoid -x80..0 edge case that -
206 won't work on. */
207 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
208 for (int i = 0; i < len; i++)
209 t[i] = ~v[i];
210 if (excess > 0)
211 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
212 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
213 mpz_com (result, result);
215 else if (excess > 0)
217 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
218 for (int i = 0; i < len - 1; i++)
219 t[i] = v[i];
220 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
221 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
223 else
224 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
227 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
228 values of VAL will be wrapped; otherwise, they will be set to the
229 appropriate minimum or maximum TYPE bound. */
230 wide_int
231 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
233 size_t count, numb;
234 unsigned int prec = TYPE_PRECISION (type);
235 wide_int res = wide_int::create (prec);
237 if (!wrap)
239 mpz_t min, max;
241 mpz_init (min);
242 mpz_init (max);
243 get_type_static_bounds (type, min, max);
245 if (mpz_cmp (x, min) < 0)
246 mpz_set (x, min);
247 else if (mpz_cmp (x, max) > 0)
248 mpz_set (x, max);
250 mpz_clear (min);
251 mpz_clear (max);
254 /* Determine the number of unsigned HOST_WIDE_INTs that are required
255 for representing the value. The code to calculate count is
256 extracted from the GMP manual, section "Integer Import and Export":
257 http://gmplib.org/manual/Integer-Import-and-Export.html */
258 numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
259 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
260 HOST_WIDE_INT *val = res.write_val ();
261 /* Write directly to the wide_int storage if possible, otherwise leave
262 GMP to allocate the memory for us. It might be slightly more efficient
263 to use mpz_tdiv_r_2exp for the latter case, but the situation is
264 pathological and it seems safer to operate on the original mpz value
265 in all cases. */
266 void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
267 &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
268 if (count < 1)
270 val[0] = 0;
271 count = 1;
273 count = MIN (count, BLOCKS_NEEDED (prec));
274 if (valres != val)
276 memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
277 free (valres);
279 res.set_len (canonize (val, count, prec));
281 if (mpz_sgn (x) < 0)
282 res = -res;
284 return res;
288 * Largest and smallest values in a mode.
291 /* Return the largest SGNed number that is representable in PRECISION bits.
293 TODO: There is still code from the double_int era that trys to
294 make up for the fact that double int's could not represent the
295 min and max values of all types. This code should be removed
296 because the min and max values can always be represented in
297 wide_ints and int-csts. */
298 wide_int
299 wi::max_value (unsigned int precision, signop sgn)
301 gcc_checking_assert (precision != 0);
302 if (sgn == UNSIGNED)
303 /* The unsigned max is just all ones. */
304 return shwi (-1, precision);
305 else
306 /* The signed max is all ones except the top bit. This must be
307 explicitly represented. */
308 return mask (precision - 1, false, precision);
311 /* Return the largest SGNed number that is representable in PRECISION bits. */
312 wide_int
313 wi::min_value (unsigned int precision, signop sgn)
315 gcc_checking_assert (precision != 0);
316 if (sgn == UNSIGNED)
317 return uhwi (0, precision);
318 else
319 /* The signed min is all zeros except the top bit. This must be
320 explicitly represented. */
321 return wi::set_bit_in_zero (precision - 1, precision);
325 * Public utilities.
328 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
329 signedness SGN, to an integer that has PRECISION bits. Store the blocks
330 in VAL and return the number of blocks used.
332 This function can handle both extension (PRECISION > XPRECISION)
333 and truncation (PRECISION < XPRECISION). */
334 unsigned int
335 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
336 unsigned int xlen, unsigned int xprecision,
337 unsigned int precision, signop sgn)
339 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
340 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
341 for (unsigned i = 0; i < len; i++)
342 val[i] = xval[i];
344 if (precision > xprecision)
346 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
348 /* Expanding. */
349 if (sgn == UNSIGNED)
351 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
352 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
353 else if (val[len - 1] < 0)
355 while (len < BLOCKS_NEEDED (xprecision))
356 val[len++] = -1;
357 if (small_xprecision)
358 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
359 else
360 val[len++] = 0;
363 else
365 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
366 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
369 len = canonize (val, len, precision);
371 return len;
374 /* This function hides the fact that we cannot rely on the bits beyond
375 the precision. This issue comes up in the relational comparisions
376 where we do allow comparisons of values of different precisions. */
377 static inline HOST_WIDE_INT
378 selt (const HOST_WIDE_INT *a, unsigned int len,
379 unsigned int blocks_needed, unsigned int small_prec,
380 unsigned int index, signop sgn)
382 HOST_WIDE_INT val;
383 if (index < len)
384 val = a[index];
385 else if (index < blocks_needed || sgn == SIGNED)
386 /* Signed or within the precision. */
387 val = SIGN_MASK (a[len - 1]);
388 else
389 /* Unsigned extension beyond the precision. */
390 val = 0;
392 if (small_prec && index == blocks_needed - 1)
393 return (sgn == SIGNED
394 ? sext_hwi (val, small_prec)
395 : zext_hwi (val, small_prec));
396 else
397 return val;
400 /* Find the highest bit represented in a wide int. This will in
401 general have the same value as the sign bit. */
402 static inline HOST_WIDE_INT
403 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
405 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
406 unsigned HOST_WIDE_INT val = a[len - 1];
407 if (excess > 0)
408 val <<= excess;
409 return val >> (HOST_BITS_PER_WIDE_INT - 1);
413 * Comparisons, note that only equality is an operator. The other
414 * comparisons cannot be operators since they are inherently signed or
415 * unsigned and C++ has no such operators.
418 /* Return true if OP0 == OP1. */
419 bool
420 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
421 const HOST_WIDE_INT *op1, unsigned int op1len,
422 unsigned int prec)
424 int l0 = op0len - 1;
425 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
427 if (op0len != op1len)
428 return false;
430 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
432 /* It does not matter if we zext or sext here, we just have to
433 do both the same way. */
434 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
435 return false;
436 l0--;
439 while (l0 >= 0)
440 if (op0[l0] != op1[l0])
441 return false;
442 else
443 l0--;
445 return true;
448 /* Return true if OP0 < OP1 using signed comparisons. */
449 bool
450 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
451 unsigned int precision,
452 const HOST_WIDE_INT *op1, unsigned int op1len)
454 HOST_WIDE_INT s0, s1;
455 unsigned HOST_WIDE_INT u0, u1;
456 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
457 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
458 int l = MAX (op0len - 1, op1len - 1);
460 /* Only the top block is compared as signed. The rest are unsigned
461 comparisons. */
462 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
463 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
464 if (s0 < s1)
465 return true;
466 if (s0 > s1)
467 return false;
469 l--;
470 while (l >= 0)
472 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
473 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
475 if (u0 < u1)
476 return true;
477 if (u0 > u1)
478 return false;
479 l--;
482 return false;
485 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
486 signed compares. */
488 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
489 unsigned int precision,
490 const HOST_WIDE_INT *op1, unsigned int op1len)
492 HOST_WIDE_INT s0, s1;
493 unsigned HOST_WIDE_INT u0, u1;
494 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
495 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
496 int l = MAX (op0len - 1, op1len - 1);
498 /* Only the top block is compared as signed. The rest are unsigned
499 comparisons. */
500 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
501 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
502 if (s0 < s1)
503 return -1;
504 if (s0 > s1)
505 return 1;
507 l--;
508 while (l >= 0)
510 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
511 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
513 if (u0 < u1)
514 return -1;
515 if (u0 > u1)
516 return 1;
517 l--;
520 return 0;
523 /* Return true if OP0 < OP1 using unsigned comparisons. */
524 bool
525 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
526 unsigned int precision,
527 const HOST_WIDE_INT *op1, unsigned int op1len)
529 unsigned HOST_WIDE_INT x0;
530 unsigned HOST_WIDE_INT x1;
531 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
532 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
533 int l = MAX (op0len - 1, op1len - 1);
535 while (l >= 0)
537 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
538 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
539 if (x0 < x1)
540 return true;
541 if (x0 > x1)
542 return false;
543 l--;
546 return false;
549 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
550 unsigned compares. */
552 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
553 unsigned int precision,
554 const HOST_WIDE_INT *op1, unsigned int op1len)
556 unsigned HOST_WIDE_INT x0;
557 unsigned HOST_WIDE_INT x1;
558 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
559 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
560 int l = MAX (op0len - 1, op1len - 1);
562 while (l >= 0)
564 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
565 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
566 if (x0 < x1)
567 return -1;
568 if (x0 > x1)
569 return 1;
570 l--;
573 return 0;
577 * Extension.
580 /* Sign-extend the number represented by XVAL and XLEN into VAL,
581 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
582 and VAL have PRECISION bits. */
583 unsigned int
584 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
585 unsigned int xlen, unsigned int precision, unsigned int offset)
587 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
588 /* Extending beyond the precision is a no-op. If we have only stored
589 OFFSET bits or fewer, the rest are already signs. */
590 if (offset >= precision || len >= xlen)
592 for (unsigned i = 0; i < xlen; ++i)
593 val[i] = xval[i];
594 return xlen;
596 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
597 for (unsigned int i = 0; i < len; i++)
598 val[i] = xval[i];
599 if (suboffset > 0)
601 val[len] = sext_hwi (xval[len], suboffset);
602 len += 1;
604 return canonize (val, len, precision);
607 /* Zero-extend the number represented by XVAL and XLEN into VAL,
608 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
609 and VAL have PRECISION bits. */
610 unsigned int
611 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
612 unsigned int xlen, unsigned int precision, unsigned int offset)
614 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
615 /* Extending beyond the precision is a no-op. If we have only stored
616 OFFSET bits or fewer, and the upper stored bit is zero, then there
617 is nothing to do. */
618 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
620 for (unsigned i = 0; i < xlen; ++i)
621 val[i] = xval[i];
622 return xlen;
624 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
625 for (unsigned int i = 0; i < len; i++)
626 val[i] = i < xlen ? xval[i] : -1;
627 if (suboffset > 0)
628 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
629 else
630 val[len] = 0;
631 return canonize (val, len + 1, precision);
635 * Masking, inserting, shifting, rotating.
638 /* Insert WIDTH bits from Y into X starting at START. */
639 wide_int
640 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
641 unsigned int width)
643 wide_int result;
644 wide_int mask;
645 wide_int tmp;
647 unsigned int precision = x.get_precision ();
648 if (start >= precision)
649 return x;
651 gcc_checking_assert (precision >= width);
653 if (start + width >= precision)
654 width = precision - start;
656 mask = wi::shifted_mask (start, width, false, precision);
657 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
658 result = tmp & mask;
660 tmp = wi::bit_and_not (x, mask);
661 result = result | tmp;
663 return result;
666 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
667 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
668 bits. */
669 unsigned int
670 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
671 unsigned int xlen, unsigned int precision, unsigned int bit)
673 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
674 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
676 if (block + 1 >= xlen)
678 /* The operation either affects the last current block or needs
679 a new block. */
680 unsigned int len = block + 1;
681 for (unsigned int i = 0; i < len; i++)
682 val[i] = safe_uhwi (xval, xlen, i);
683 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
685 /* If the bit we just set is at the msb of the block, make sure
686 that any higher bits are zeros. */
687 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
688 val[len++] = 0;
689 return len;
691 else
693 for (unsigned int i = 0; i < xlen; i++)
694 val[i] = xval[i];
695 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
696 return canonize (val, xlen, precision);
700 /* bswap THIS. */
701 wide_int
702 wide_int_storage::bswap () const
704 wide_int result = wide_int::create (precision);
705 unsigned int i, s;
706 unsigned int len = BLOCKS_NEEDED (precision);
707 unsigned int xlen = get_len ();
708 const HOST_WIDE_INT *xval = get_val ();
709 HOST_WIDE_INT *val = result.write_val ();
711 /* This is not a well defined operation if the precision is not a
712 multiple of 8. */
713 gcc_assert ((precision & 0x7) == 0);
715 for (i = 0; i < len; i++)
716 val[i] = 0;
718 /* Only swap the bytes that are not the padding. */
719 for (s = 0; s < precision; s += 8)
721 unsigned int d = precision - s - 8;
722 unsigned HOST_WIDE_INT byte;
724 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
725 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
727 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
729 block = d / HOST_BITS_PER_WIDE_INT;
730 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
732 val[block] |= byte << offset;
735 result.set_len (canonize (val, len, precision));
736 return result;
739 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
740 above that up to PREC are zeros. The result is inverted if NEGATE
741 is true. Return the number of blocks in VAL. */
742 unsigned int
743 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
744 unsigned int prec)
746 if (width >= prec)
748 val[0] = negate ? 0 : -1;
749 return 1;
751 else if (width == 0)
753 val[0] = negate ? -1 : 0;
754 return 1;
757 unsigned int i = 0;
758 while (i < width / HOST_BITS_PER_WIDE_INT)
759 val[i++] = negate ? 0 : -1;
761 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
762 if (shift != 0)
764 HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
765 val[i++] = negate ? ~last : last;
767 else
768 val[i++] = negate ? -1 : 0;
770 return i;
773 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
774 bits are ones, and the bits above that up to PREC are zeros. The result
775 is inverted if NEGATE is true. Return the number of blocks in VAL. */
776 unsigned int
777 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
778 bool negate, unsigned int prec)
780 if (start >= prec || width == 0)
782 val[0] = negate ? -1 : 0;
783 return 1;
786 if (width > prec - start)
787 width = prec - start;
788 unsigned int end = start + width;
790 unsigned int i = 0;
791 while (i < start / HOST_BITS_PER_WIDE_INT)
792 val[i++] = negate ? -1 : 0;
794 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
795 if (shift)
797 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
798 shift += width;
799 if (shift < HOST_BITS_PER_WIDE_INT)
801 /* case 000111000 */
802 block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
803 val[i++] = negate ? ~block : block;
804 return i;
806 else
807 /* ...111000 */
808 val[i++] = negate ? block : ~block;
811 while (i < end / HOST_BITS_PER_WIDE_INT)
812 /* 1111111 */
813 val[i++] = negate ? 0 : -1;
815 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
816 if (shift != 0)
818 /* 000011111 */
819 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
820 val[i++] = negate ? ~block : block;
822 else if (end < prec)
823 val[i++] = negate ? -1 : 0;
825 return i;
829 * logical operations.
832 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
833 unsigned int
834 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
835 unsigned int op0len, const HOST_WIDE_INT *op1,
836 unsigned int op1len, unsigned int prec)
838 int l0 = op0len - 1;
839 int l1 = op1len - 1;
840 bool need_canon = true;
842 unsigned int len = MAX (op0len, op1len);
843 if (l0 > l1)
845 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
846 if (op1mask == 0)
848 l0 = l1;
849 len = l1 + 1;
851 else
853 need_canon = false;
854 while (l0 > l1)
856 val[l0] = op0[l0];
857 l0--;
861 else if (l1 > l0)
863 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
864 if (op0mask == 0)
865 len = l0 + 1;
866 else
868 need_canon = false;
869 while (l1 > l0)
871 val[l1] = op1[l1];
872 l1--;
877 while (l0 >= 0)
879 val[l0] = op0[l0] & op1[l0];
880 l0--;
883 if (need_canon)
884 len = canonize (val, len, prec);
886 return len;
889 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
890 unsigned int
891 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
892 unsigned int op0len, const HOST_WIDE_INT *op1,
893 unsigned int op1len, unsigned int prec)
895 wide_int result;
896 int l0 = op0len - 1;
897 int l1 = op1len - 1;
898 bool need_canon = true;
900 unsigned int len = MAX (op0len, op1len);
901 if (l0 > l1)
903 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
904 if (op1mask != 0)
906 l0 = l1;
907 len = l1 + 1;
909 else
911 need_canon = false;
912 while (l0 > l1)
914 val[l0] = op0[l0];
915 l0--;
919 else if (l1 > l0)
921 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
922 if (op0mask == 0)
923 len = l0 + 1;
924 else
926 need_canon = false;
927 while (l1 > l0)
929 val[l1] = ~op1[l1];
930 l1--;
935 while (l0 >= 0)
937 val[l0] = op0[l0] & ~op1[l0];
938 l0--;
941 if (need_canon)
942 len = canonize (val, len, prec);
944 return len;
947 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
948 unsigned int
949 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
950 unsigned int op0len, const HOST_WIDE_INT *op1,
951 unsigned int op1len, unsigned int prec)
953 wide_int result;
954 int l0 = op0len - 1;
955 int l1 = op1len - 1;
956 bool need_canon = true;
958 unsigned int len = MAX (op0len, op1len);
959 if (l0 > l1)
961 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
962 if (op1mask != 0)
964 l0 = l1;
965 len = l1 + 1;
967 else
969 need_canon = false;
970 while (l0 > l1)
972 val[l0] = op0[l0];
973 l0--;
977 else if (l1 > l0)
979 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
980 if (op0mask != 0)
981 len = l0 + 1;
982 else
984 need_canon = false;
985 while (l1 > l0)
987 val[l1] = op1[l1];
988 l1--;
993 while (l0 >= 0)
995 val[l0] = op0[l0] | op1[l0];
996 l0--;
999 if (need_canon)
1000 len = canonize (val, len, prec);
1002 return len;
1005 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1006 unsigned int
1007 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1008 unsigned int op0len, const HOST_WIDE_INT *op1,
1009 unsigned int op1len, unsigned int prec)
1011 wide_int result;
1012 int l0 = op0len - 1;
1013 int l1 = op1len - 1;
1014 bool need_canon = true;
1016 unsigned int len = MAX (op0len, op1len);
1017 if (l0 > l1)
1019 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1020 if (op1mask == 0)
1022 l0 = l1;
1023 len = l1 + 1;
1025 else
1027 need_canon = false;
1028 while (l0 > l1)
1030 val[l0] = op0[l0];
1031 l0--;
1035 else if (l1 > l0)
1037 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1038 if (op0mask != 0)
1039 len = l0 + 1;
1040 else
1042 need_canon = false;
1043 while (l1 > l0)
1045 val[l1] = ~op1[l1];
1046 l1--;
1051 while (l0 >= 0)
1053 val[l0] = op0[l0] | ~op1[l0];
1054 l0--;
1057 if (need_canon)
1058 len = canonize (val, len, prec);
1060 return len;
1063 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1064 unsigned int
1065 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1066 unsigned int op0len, const HOST_WIDE_INT *op1,
1067 unsigned int op1len, unsigned int prec)
1069 wide_int result;
1070 int l0 = op0len - 1;
1071 int l1 = op1len - 1;
1073 unsigned int len = MAX (op0len, op1len);
1074 if (l0 > l1)
1076 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1077 while (l0 > l1)
1079 val[l0] = op0[l0] ^ op1mask;
1080 l0--;
1084 if (l1 > l0)
1086 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1087 while (l1 > l0)
1089 val[l1] = op0mask ^ op1[l1];
1090 l1--;
1094 while (l0 >= 0)
1096 val[l0] = op0[l0] ^ op1[l0];
1097 l0--;
1100 return canonize (val, len, prec);
1104 * math
1107 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1108 whether the result overflows when OP0 and OP1 are treated as having
1109 signedness SGN. Return the number of blocks in VAL. */
1110 unsigned int
1111 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1112 unsigned int op0len, const HOST_WIDE_INT *op1,
1113 unsigned int op1len, unsigned int prec,
1114 signop sgn, bool *overflow)
1116 unsigned HOST_WIDE_INT o0 = 0;
1117 unsigned HOST_WIDE_INT o1 = 0;
1118 unsigned HOST_WIDE_INT x = 0;
1119 unsigned HOST_WIDE_INT carry = 0;
1120 unsigned HOST_WIDE_INT old_carry = 0;
1121 unsigned HOST_WIDE_INT mask0, mask1;
1122 unsigned int i;
1124 unsigned int len = MAX (op0len, op1len);
1125 mask0 = -top_bit_of (op0, op0len, prec);
1126 mask1 = -top_bit_of (op1, op1len, prec);
1127 /* Add all of the explicitly defined elements. */
1129 for (i = 0; i < len; i++)
1131 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1132 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1133 x = o0 + o1 + carry;
1134 val[i] = x;
1135 old_carry = carry;
1136 carry = carry == 0 ? x < o0 : x <= o0;
1139 if (len * HOST_BITS_PER_WIDE_INT < prec)
1141 val[len] = mask0 + mask1 + carry;
1142 len++;
1143 if (overflow)
1144 *overflow = false;
1146 else if (overflow)
1148 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1149 if (sgn == SIGNED)
1151 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1152 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1154 else
1156 /* Put the MSB of X and O0 and in the top of the HWI. */
1157 x <<= shift;
1158 o0 <<= shift;
1159 if (old_carry)
1160 *overflow = (x <= o0);
1161 else
1162 *overflow = (x < o0);
1166 return canonize (val, len, prec);
1169 /* Subroutines of the multiplication and division operations. Unpack
1170 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1171 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1172 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1173 static void
1174 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1175 unsigned int in_len, unsigned int out_len,
1176 unsigned int prec, signop sgn)
1178 unsigned int i;
1179 unsigned int j = 0;
1180 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1181 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1182 HOST_WIDE_INT mask;
1184 if (sgn == SIGNED)
1186 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1187 mask &= HALF_INT_MASK;
1189 else
1190 mask = 0;
1192 for (i = 0; i < blocks_needed - 1; i++)
1194 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1195 result[j++] = x;
1196 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1199 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1200 if (small_prec)
1202 if (sgn == SIGNED)
1203 x = sext_hwi (x, small_prec);
1204 else
1205 x = zext_hwi (x, small_prec);
1207 result[j++] = x;
1208 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1210 /* Smear the sign bit. */
1211 while (j < out_len)
1212 result[j++] = mask;
1215 /* The inverse of wi_unpack. IN_LEN is the the number of input
1216 blocks. The number of output blocks will be half this amount. */
1217 static void
1218 wi_pack (unsigned HOST_WIDE_INT *result,
1219 const unsigned HOST_HALF_WIDE_INT *input,
1220 unsigned int in_len)
1222 unsigned int i = 0;
1223 unsigned int j = 0;
1225 while (i + 2 < in_len)
1227 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1228 | ((unsigned HOST_WIDE_INT)input[i + 1]
1229 << HOST_BITS_PER_HALF_WIDE_INT);
1230 i += 2;
1233 /* Handle the case where in_len is odd. For this we zero extend. */
1234 if (in_len & 1)
1235 result[j++] = (unsigned HOST_WIDE_INT)input[i];
1236 else
1237 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1238 | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT);
1241 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1242 result is returned.
1244 If HIGH is not set, throw away the upper half after the check is
1245 made to see if it overflows. Unfortunately there is no better way
1246 to check for overflow than to do this. If OVERFLOW is nonnull,
1247 record in *OVERFLOW whether the result overflowed. SGN controls
1248 the signedness and is used to check overflow or if HIGH is set. */
1249 unsigned int
1250 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1251 unsigned int op1len, const HOST_WIDE_INT *op2val,
1252 unsigned int op2len, unsigned int prec, signop sgn,
1253 bool *overflow, bool high)
1255 unsigned HOST_WIDE_INT o0, o1, k, t;
1256 unsigned int i;
1257 unsigned int j;
1258 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1259 unsigned int half_blocks_needed = blocks_needed * 2;
1260 /* The sizes here are scaled to support a 2x largest mode by 2x
1261 largest mode yielding a 4x largest mode result. This is what is
1262 needed by vpn. */
1264 unsigned HOST_HALF_WIDE_INT
1265 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1266 unsigned HOST_HALF_WIDE_INT
1267 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1268 /* The '2' in 'R' is because we are internally doing a full
1269 multiply. */
1270 unsigned HOST_HALF_WIDE_INT
1271 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1272 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1274 /* If the top level routine did not really pass in an overflow, then
1275 just make sure that we never attempt to set it. */
1276 bool needs_overflow = (overflow != 0);
1277 if (needs_overflow)
1278 *overflow = false;
1280 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1281 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1283 /* This is a surprisingly common case, so do it first. */
1284 if (op1 == 0 || op2 == 0)
1286 val[0] = 0;
1287 return 1;
1290 #ifdef umul_ppmm
1291 if (sgn == UNSIGNED)
1293 /* If the inputs are single HWIs and the output has room for at
1294 least two HWIs, we can use umul_ppmm directly. */
1295 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1296 && wi::fits_uhwi_p (op1)
1297 && wi::fits_uhwi_p (op2))
1299 /* This case never overflows. */
1300 if (high)
1302 val[0] = 0;
1303 return 1;
1305 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1306 if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1308 val[2] = 0;
1309 return 3;
1311 return 1 + (val[1] != 0 || val[0] < 0);
1313 /* Likewise if the output is a full single HWI, except that the
1314 upper HWI of the result is only used for determining overflow.
1315 (We handle this case inline when overflow isn't needed.) */
1316 else if (prec == HOST_BITS_PER_WIDE_INT)
1318 unsigned HOST_WIDE_INT upper;
1319 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1320 if (needs_overflow)
1321 *overflow = (upper != 0);
1322 if (high)
1323 val[0] = upper;
1324 return 1;
1327 #endif
1329 /* Handle multiplications by 1. */
1330 if (op1 == 1)
1332 if (high)
1334 val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1335 return 1;
1337 for (i = 0; i < op2len; i++)
1338 val[i] = op2val[i];
1339 return op2len;
1341 if (op2 == 1)
1343 if (high)
1345 val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1346 return 1;
1348 for (i = 0; i < op1len; i++)
1349 val[i] = op1val[i];
1350 return op1len;
1353 /* If we need to check for overflow, we can only do half wide
1354 multiplies quickly because we need to look at the top bits to
1355 check for the overflow. */
1356 if ((high || needs_overflow)
1357 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1359 unsigned HOST_WIDE_INT r;
1361 if (sgn == SIGNED)
1363 o0 = op1.to_shwi ();
1364 o1 = op2.to_shwi ();
1366 else
1368 o0 = op1.to_uhwi ();
1369 o1 = op2.to_uhwi ();
1372 r = o0 * o1;
1373 if (needs_overflow)
1375 if (sgn == SIGNED)
1377 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1378 *overflow = true;
1380 else
1382 if ((r >> prec) != 0)
1383 *overflow = true;
1386 val[0] = high ? r >> prec : r;
1387 return 1;
1390 /* We do unsigned mul and then correct it. */
1391 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1392 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1394 /* The 2 is for a full mult. */
1395 memset (r, 0, half_blocks_needed * 2
1396 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1398 for (j = 0; j < half_blocks_needed; j++)
1400 k = 0;
1401 for (i = 0; i < half_blocks_needed; i++)
1403 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1404 + r[i + j] + k);
1405 r[i + j] = t & HALF_INT_MASK;
1406 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1408 r[j + half_blocks_needed] = k;
1411 /* We did unsigned math above. For signed we must adjust the
1412 product (assuming we need to see that). */
1413 if (sgn == SIGNED && (high || needs_overflow))
1415 unsigned HOST_WIDE_INT b;
1416 if (wi::neg_p (op1))
1418 b = 0;
1419 for (i = 0; i < half_blocks_needed; i++)
1421 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1422 - (unsigned HOST_WIDE_INT)v[i] - b;
1423 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1424 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1427 if (wi::neg_p (op2))
1429 b = 0;
1430 for (i = 0; i < half_blocks_needed; i++)
1432 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1433 - (unsigned HOST_WIDE_INT)u[i] - b;
1434 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1435 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1440 if (needs_overflow)
1442 HOST_WIDE_INT top;
1444 /* For unsigned, overflow is true if any of the top bits are set.
1445 For signed, overflow is true if any of the top bits are not equal
1446 to the sign bit. */
1447 if (sgn == UNSIGNED)
1448 top = 0;
1449 else
1451 top = r[(half_blocks_needed) - 1];
1452 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1453 top &= mask;
1456 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1457 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1458 *overflow = true;
1461 if (high)
1463 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1464 wi_pack ((unsigned HOST_WIDE_INT *) val,
1465 &r[half_blocks_needed], half_blocks_needed);
1466 return canonize (val, blocks_needed, prec);
1468 else
1470 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1471 wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed);
1472 return canonize (val, blocks_needed, prec);
1476 /* Compute the population count of X. */
1478 wi::popcount (const wide_int_ref &x)
1480 unsigned int i;
1481 int count;
1483 /* The high order block is special if it is the last block and the
1484 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1485 have to clear out any ones above the precision before doing
1486 popcount on this block. */
1487 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1488 unsigned int stop = x.len;
1489 if (count < 0)
1491 count = popcount_hwi (x.uhigh () << -count);
1492 stop -= 1;
1494 else
1496 if (x.sign_mask () >= 0)
1497 count = 0;
1500 for (i = 0; i < stop; ++i)
1501 count += popcount_hwi (x.val[i]);
1503 return count;
1506 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1507 whether the result overflows when OP0 and OP1 are treated as having
1508 signedness SGN. Return the number of blocks in VAL. */
1509 unsigned int
1510 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1511 unsigned int op0len, const HOST_WIDE_INT *op1,
1512 unsigned int op1len, unsigned int prec,
1513 signop sgn, bool *overflow)
1515 unsigned HOST_WIDE_INT o0 = 0;
1516 unsigned HOST_WIDE_INT o1 = 0;
1517 unsigned HOST_WIDE_INT x = 0;
1518 /* We implement subtraction as an in place negate and add. Negation
1519 is just inversion and add 1, so we can do the add of 1 by just
1520 starting the borrow in of the first element at 1. */
1521 unsigned HOST_WIDE_INT borrow = 0;
1522 unsigned HOST_WIDE_INT old_borrow = 0;
1524 unsigned HOST_WIDE_INT mask0, mask1;
1525 unsigned int i;
1527 unsigned int len = MAX (op0len, op1len);
1528 mask0 = -top_bit_of (op0, op0len, prec);
1529 mask1 = -top_bit_of (op1, op1len, prec);
1531 /* Subtract all of the explicitly defined elements. */
1532 for (i = 0; i < len; i++)
1534 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1535 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1536 x = o0 - o1 - borrow;
1537 val[i] = x;
1538 old_borrow = borrow;
1539 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1542 if (len * HOST_BITS_PER_WIDE_INT < prec)
1544 val[len] = mask0 - mask1 - borrow;
1545 len++;
1546 if (overflow)
1547 *overflow = false;
1549 else if (overflow)
1551 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1552 if (sgn == SIGNED)
1554 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1555 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1557 else
1559 /* Put the MSB of X and O0 and in the top of the HWI. */
1560 x <<= shift;
1561 o0 <<= shift;
1562 if (old_borrow)
1563 *overflow = (x >= o0);
1564 else
1565 *overflow = (x > o0);
1569 return canonize (val, len, prec);
1574 * Division and Mod
1577 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1578 algorithm is a small modification of the algorithm in Hacker's
1579 Delight by Warren, which itself is a small modification of Knuth's
1580 algorithm. M is the number of significant elements of U however
1581 there needs to be at least one extra element of B_DIVIDEND
1582 allocated, N is the number of elements of B_DIVISOR. */
1583 static void
1584 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1585 unsigned HOST_HALF_WIDE_INT *b_remainder,
1586 unsigned HOST_HALF_WIDE_INT *b_dividend,
1587 unsigned HOST_HALF_WIDE_INT *b_divisor,
1588 int m, int n)
1590 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1591 HOST_WIDE_INT and stored in the lower bits of each word. This
1592 algorithm should work properly on both 32 and 64 bit
1593 machines. */
1594 unsigned HOST_WIDE_INT b
1595 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1596 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1597 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1598 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1599 HOST_WIDE_INT t, k;
1600 int i, j, s;
1602 /* Single digit divisor. */
1603 if (n == 1)
1605 k = 0;
1606 for (j = m - 1; j >= 0; j--)
1608 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1609 k = ((k * b + b_dividend[j])
1610 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1611 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1613 b_remainder[0] = k;
1614 return;
1617 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1619 if (s)
1621 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1622 algorithm, we can overwrite b_dividend and b_divisor, so we do
1623 that. */
1624 for (i = n - 1; i > 0; i--)
1625 b_divisor[i] = (b_divisor[i] << s)
1626 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1627 b_divisor[0] = b_divisor[0] << s;
1629 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1630 for (i = m - 1; i > 0; i--)
1631 b_dividend[i] = (b_dividend[i] << s)
1632 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1633 b_dividend[0] = b_dividend[0] << s;
1636 /* Main loop. */
1637 for (j = m - n; j >= 0; j--)
1639 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1640 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1641 again:
1642 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1644 qhat -= 1;
1645 rhat += b_divisor[n-1];
1646 if (rhat < b)
1647 goto again;
1650 /* Multiply and subtract. */
1651 k = 0;
1652 for (i = 0; i < n; i++)
1654 p = qhat * b_divisor[i];
1655 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1656 b_dividend[i + j] = t;
1657 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1658 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1660 t = b_dividend[j+n] - k;
1661 b_dividend[j+n] = t;
1663 b_quotient[j] = qhat;
1664 if (t < 0)
1666 b_quotient[j] -= 1;
1667 k = 0;
1668 for (i = 0; i < n; i++)
1670 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1671 b_dividend[i+j] = t;
1672 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1674 b_dividend[j+n] += k;
1677 if (s)
1678 for (i = 0; i < n; i++)
1679 b_remainder[i] = (b_dividend[i] >> s)
1680 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1681 else
1682 for (i = 0; i < n; i++)
1683 b_remainder[i] = b_dividend[i];
1687 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1688 the result. If QUOTIENT is nonnull, store the value of the quotient
1689 there and return the number of blocks in it. The return value is
1690 not defined otherwise. If REMAINDER is nonnull, store the value
1691 of the remainder there and store the number of blocks in
1692 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1693 the division overflowed. */
1694 unsigned int
1695 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1696 HOST_WIDE_INT *remainder,
1697 const HOST_WIDE_INT *dividend_val,
1698 unsigned int dividend_len, unsigned int dividend_prec,
1699 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1700 unsigned int divisor_prec, signop sgn,
1701 bool *oflow)
1703 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1704 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1705 unsigned HOST_HALF_WIDE_INT
1706 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1707 unsigned HOST_HALF_WIDE_INT
1708 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1709 unsigned HOST_HALF_WIDE_INT
1710 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1711 unsigned HOST_HALF_WIDE_INT
1712 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1713 unsigned int m, n;
1714 bool dividend_neg = false;
1715 bool divisor_neg = false;
1716 bool overflow = false;
1717 wide_int neg_dividend, neg_divisor;
1719 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1720 dividend_prec);
1721 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1722 divisor_prec);
1723 if (divisor == 0)
1724 overflow = true;
1726 /* The smallest signed number / -1 causes overflow. The dividend_len
1727 check is for speed rather than correctness. */
1728 if (sgn == SIGNED
1729 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1730 && divisor == -1
1731 && wi::only_sign_bit_p (dividend))
1732 overflow = true;
1734 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1735 (signed min / -1) has the same representation as the orignal dividend.
1736 We have traditionally made division by zero act as division by one,
1737 so there too we use the original dividend. */
1738 if (overflow)
1740 if (remainder)
1742 *remainder_len = 1;
1743 remainder[0] = 0;
1745 if (oflow != 0)
1746 *oflow = true;
1747 if (quotient)
1748 for (unsigned int i = 0; i < dividend_len; ++i)
1749 quotient[i] = dividend_val[i];
1750 return dividend_len;
1753 if (oflow)
1754 *oflow = false;
1756 /* Do it on the host if you can. */
1757 if (sgn == SIGNED
1758 && wi::fits_shwi_p (dividend)
1759 && wi::fits_shwi_p (divisor))
1761 HOST_WIDE_INT o0 = dividend.to_shwi ();
1762 HOST_WIDE_INT o1 = divisor.to_shwi ();
1764 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1766 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1767 if (quotient)
1769 quotient[0] = HOST_WIDE_INT_MIN;
1770 quotient[1] = 0;
1772 if (remainder)
1774 remainder[0] = 0;
1775 *remainder_len = 1;
1777 return 2;
1779 else
1781 if (quotient)
1782 quotient[0] = o0 / o1;
1783 if (remainder)
1785 remainder[0] = o0 % o1;
1786 *remainder_len = 1;
1788 return 1;
1792 if (sgn == UNSIGNED
1793 && wi::fits_uhwi_p (dividend)
1794 && wi::fits_uhwi_p (divisor))
1796 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1797 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1799 if (quotient)
1800 quotient[0] = o0 / o1;
1801 if (remainder)
1803 remainder[0] = o0 % o1;
1804 *remainder_len = 1;
1806 return 1;
1809 /* Make the divisor and dividend positive and remember what we
1810 did. */
1811 if (sgn == SIGNED)
1813 if (wi::neg_p (dividend))
1815 neg_dividend = -dividend;
1816 dividend = neg_dividend;
1817 dividend_neg = true;
1819 if (wi::neg_p (divisor))
1821 neg_divisor = -divisor;
1822 divisor = neg_divisor;
1823 divisor_neg = true;
1827 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1828 dividend_blocks_needed, dividend_prec, sgn);
1829 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1830 divisor_blocks_needed, divisor_prec, sgn);
1832 m = dividend_blocks_needed;
1833 b_dividend[m] = 0;
1834 while (m > 1 && b_dividend[m - 1] == 0)
1835 m--;
1837 n = divisor_blocks_needed;
1838 while (n > 1 && b_divisor[n - 1] == 0)
1839 n--;
1841 memset (b_quotient, 0, sizeof (b_quotient));
1843 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1845 unsigned int quotient_len = 0;
1846 if (quotient)
1848 wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m);
1849 quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec);
1850 /* The quotient is neg if exactly one of the divisor or dividend is
1851 neg. */
1852 if (dividend_neg != divisor_neg)
1853 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1854 quotient_len, dividend_prec,
1855 UNSIGNED, 0);
1858 if (remainder)
1860 wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n);
1861 *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec);
1862 /* The remainder is always the same sign as the dividend. */
1863 if (dividend_neg)
1864 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1865 *remainder_len, dividend_prec,
1866 UNSIGNED, 0);
1869 return quotient_len;
1873 * Shifting, rotating and extraction.
1876 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1877 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1878 unsigned int
1879 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1880 unsigned int xlen, unsigned int precision,
1881 unsigned int shift)
1883 /* Split the shift into a whole-block shift and a subblock shift. */
1884 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1885 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1887 /* The whole-block shift fills with zeros. */
1888 unsigned int len = BLOCKS_NEEDED (precision);
1889 for (unsigned int i = 0; i < skip; ++i)
1890 val[i] = 0;
1892 /* It's easier to handle the simple block case specially. */
1893 if (small_shift == 0)
1894 for (unsigned int i = skip; i < len; ++i)
1895 val[i] = safe_uhwi (xval, xlen, i - skip);
1896 else
1898 /* The first unfilled output block is a left shift of the first
1899 block in XVAL. The other output blocks contain bits from two
1900 consecutive input blocks. */
1901 unsigned HOST_WIDE_INT carry = 0;
1902 for (unsigned int i = skip; i < len; ++i)
1904 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1905 val[i] = (x << small_shift) | carry;
1906 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1909 return canonize (val, len, precision);
1912 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1913 number of blocks in VAL. The input has XPRECISION bits and the
1914 output has XPRECISION - SHIFT bits. */
1915 static unsigned int
1916 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1917 unsigned int xlen, unsigned int xprecision,
1918 unsigned int shift)
1920 /* Split the shift into a whole-block shift and a subblock shift. */
1921 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1922 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1924 /* Work out how many blocks are needed to store the significant bits
1925 (excluding the upper zeros or signs). */
1926 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1928 /* It's easier to handle the simple block case specially. */
1929 if (small_shift == 0)
1930 for (unsigned int i = 0; i < len; ++i)
1931 val[i] = safe_uhwi (xval, xlen, i + skip);
1932 else
1934 /* Each output block but the last is a combination of two input blocks.
1935 The last block is a right shift of the last block in XVAL. */
1936 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1937 for (unsigned int i = 0; i < len; ++i)
1939 val[i] = curr >> small_shift;
1940 curr = safe_uhwi (xval, xlen, i + skip + 1);
1941 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1944 return len;
1947 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1948 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1949 VAL has PRECISION bits. */
1950 unsigned int
1951 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1952 unsigned int xlen, unsigned int xprecision,
1953 unsigned int precision, unsigned int shift)
1955 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1957 /* The value we just created has precision XPRECISION - SHIFT.
1958 Zero-extend it to wider precisions. */
1959 if (precision > xprecision - shift)
1961 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1962 if (small_prec)
1963 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1964 else if (val[len - 1] < 0)
1966 /* Add a new block with a zero. */
1967 val[len++] = 0;
1968 return len;
1971 return canonize (val, len, precision);
1974 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1975 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1976 VAL has PRECISION bits. */
1977 unsigned int
1978 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1979 unsigned int xlen, unsigned int xprecision,
1980 unsigned int precision, unsigned int shift)
1982 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1984 /* The value we just created has precision XPRECISION - SHIFT.
1985 Sign-extend it to wider types. */
1986 if (precision > xprecision - shift)
1988 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1989 if (small_prec)
1990 val[len - 1] = sext_hwi (val[len - 1], small_prec);
1992 return canonize (val, len, precision);
1995 /* Return the number of leading (upper) zeros in X. */
1997 wi::clz (const wide_int_ref &x)
1999 /* Calculate how many bits there above the highest represented block. */
2000 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2002 unsigned HOST_WIDE_INT high = x.uhigh ();
2003 if (count < 0)
2004 /* The upper -COUNT bits of HIGH are not part of the value.
2005 Clear them out. */
2006 high = (high << -count) >> -count;
2007 else if (x.sign_mask () < 0)
2008 /* The upper bit is set, so there are no leading zeros. */
2009 return 0;
2011 /* We don't need to look below HIGH. Either HIGH is nonzero,
2012 or the top bit of the block below is nonzero; clz_hwi is
2013 HOST_BITS_PER_WIDE_INT in the latter case. */
2014 return count + clz_hwi (high);
2017 /* Return the number of redundant sign bits in X. (That is, the number
2018 of bits immediately below the sign bit that have the same value as
2019 the sign bit.) */
2021 wi::clrsb (const wide_int_ref &x)
2023 /* Calculate how many bits there above the highest represented block. */
2024 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2026 unsigned HOST_WIDE_INT high = x.uhigh ();
2027 unsigned HOST_WIDE_INT mask = -1;
2028 if (count < 0)
2030 /* The upper -COUNT bits of HIGH are not part of the value.
2031 Clear them from both MASK and HIGH. */
2032 mask >>= -count;
2033 high &= mask;
2036 /* If the top bit is 1, count the number of leading 1s. If the top
2037 bit is zero, count the number of leading zeros. */
2038 if (high > mask / 2)
2039 high ^= mask;
2041 /* There are no sign bits below the top block, so we don't need to look
2042 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2043 HIGH is 0. */
2044 return count + clz_hwi (high) - 1;
2047 /* Return the number of trailing (lower) zeros in X. */
2049 wi::ctz (const wide_int_ref &x)
2051 if (x.len == 1 && x.ulow () == 0)
2052 return x.precision;
2054 /* Having dealt with the zero case, there must be a block with a
2055 nonzero bit. We don't care about the bits above the first 1. */
2056 unsigned int i = 0;
2057 while (x.val[i] == 0)
2058 ++i;
2059 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2062 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2063 return -1. */
2065 wi::exact_log2 (const wide_int_ref &x)
2067 /* Reject cases where there are implicit -1 blocks above HIGH. */
2068 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2069 return -1;
2071 /* Set CRUX to the index of the entry that should be nonzero.
2072 If the top block is zero then the next lowest block (if any)
2073 must have the high bit set. */
2074 unsigned int crux = x.len - 1;
2075 if (crux > 0 && x.val[crux] == 0)
2076 crux -= 1;
2078 /* Check that all lower blocks are zero. */
2079 for (unsigned int i = 0; i < crux; ++i)
2080 if (x.val[i] != 0)
2081 return -1;
2083 /* Get a zero-extended form of block CRUX. */
2084 unsigned HOST_WIDE_INT hwi = x.val[crux];
2085 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2086 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2088 /* Now it's down to whether HWI is a power of 2. */
2089 int res = ::exact_log2 (hwi);
2090 if (res >= 0)
2091 res += crux * HOST_BITS_PER_WIDE_INT;
2092 return res;
2095 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2097 wi::floor_log2 (const wide_int_ref &x)
2099 return x.precision - 1 - clz (x);
2102 /* Return the index of the first (lowest) set bit in X, counting from 1.
2103 Return 0 if X is 0. */
2105 wi::ffs (const wide_int_ref &x)
2107 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2110 /* Return true if sign-extending X to have precision PRECISION would give
2111 the minimum signed value at that precision. */
2112 bool
2113 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2115 return ctz (x) + 1 == int (precision);
2118 /* Return true if X represents the minimum signed value. */
2119 bool
2120 wi::only_sign_bit_p (const wide_int_ref &x)
2122 return only_sign_bit_p (x, x.precision);
2126 * Private utilities.
2129 void gt_ggc_mx (widest_int *) { }
2130 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2131 void gt_pch_nx (widest_int *) { }
2133 template void wide_int::dump () const;
2134 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2135 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2136 template void offset_int::dump () const;
2137 template void widest_int::dump () const;