[RS6000] lqarx and stqcx. registers
[official-gcc.git] / gcc / wide-int.cc
blob5fcec2ee790ddfc29a401d42c5f2c0f815709d7b
1 /* Operations with very long integers.
2 Copyright (C) 2012-2016 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
28 #define HOST_BITS_PER_HALF_WIDE_INT 32
29 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
30 # define HOST_HALF_WIDE_INT long
31 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
32 # define HOST_HALF_WIDE_INT int
33 #else
34 #error Please add support for HOST_HALF_WIDE_INT
35 #endif
37 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
38 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
39 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
40 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
41 typedef unsigned HOST_WIDE_INT UWtype;
42 typedef unsigned int UQItype __attribute__ ((mode (QI)));
43 typedef unsigned int USItype __attribute__ ((mode (SI)));
44 typedef unsigned int UDItype __attribute__ ((mode (DI)));
45 #if W_TYPE_SIZE == 32
46 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
47 #else
48 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
49 #endif
50 #include "longlong.h"
51 #endif
53 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
56 * Internal utilities.
59 /* Quantities to deal with values that hold half of a wide int. Used
60 in multiply and divide. */
61 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
63 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
64 #define BLOCKS_NEEDED(PREC) \
65 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
66 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
68 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
69 based on the top existing bit of VAL. */
71 static unsigned HOST_WIDE_INT
72 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
74 return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
77 /* Convert the integer in VAL to canonical form, returning its new length.
78 LEN is the number of blocks currently in VAL and PRECISION is the number
79 of bits in the integer it represents.
81 This function only changes the representation, not the value. */
82 static unsigned int
83 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
85 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
86 HOST_WIDE_INT top;
87 int i;
89 if (len > blocks_needed)
90 len = blocks_needed;
92 if (len == 1)
93 return len;
95 top = val[len - 1];
96 if (len * HOST_BITS_PER_WIDE_INT > precision)
97 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
98 if (top != 0 && top != (HOST_WIDE_INT)-1)
99 return len;
101 /* At this point we know that the top is either 0 or -1. Find the
102 first block that is not a copy of this. */
103 for (i = len - 2; i >= 0; i--)
105 HOST_WIDE_INT x = val[i];
106 if (x != top)
108 if (SIGN_MASK (x) == top)
109 return i + 1;
111 /* We need an extra block because the top bit block i does
112 not match the extension. */
113 return i + 2;
117 /* The number is 0 or -1. */
118 return 1;
122 * Conversion routines in and out of wide_int.
125 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
126 result for an integer with precision PRECISION. Return the length
127 of VAL (after any canonization. */
128 unsigned int
129 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
130 unsigned int xlen, unsigned int precision, bool need_canon)
132 for (unsigned i = 0; i < xlen; i++)
133 val[i] = xval[i];
134 return need_canon ? canonize (val, xlen, precision) : xlen;
137 /* Construct a wide int from a buffer of length LEN. BUFFER will be
138 read according to byte endianess and word endianess of the target.
139 Only the lower BUFFER_LEN bytes of the result are set; the remaining
140 high bytes are cleared. */
141 wide_int
142 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
144 unsigned int precision = buffer_len * BITS_PER_UNIT;
145 wide_int result = wide_int::create (precision);
146 unsigned int words = buffer_len / UNITS_PER_WORD;
148 /* We have to clear all the bits ourself, as we merely or in values
149 below. */
150 unsigned int len = BLOCKS_NEEDED (precision);
151 HOST_WIDE_INT *val = result.write_val ();
152 for (unsigned int i = 0; i < len; ++i)
153 val[i] = 0;
155 for (unsigned int byte = 0; byte < buffer_len; byte++)
157 unsigned int offset;
158 unsigned int index;
159 unsigned int bitpos = byte * BITS_PER_UNIT;
160 unsigned HOST_WIDE_INT value;
162 if (buffer_len > UNITS_PER_WORD)
164 unsigned int word = byte / UNITS_PER_WORD;
166 if (WORDS_BIG_ENDIAN)
167 word = (words - 1) - word;
169 offset = word * UNITS_PER_WORD;
171 if (BYTES_BIG_ENDIAN)
172 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
173 else
174 offset += byte % UNITS_PER_WORD;
176 else
177 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
179 value = (unsigned HOST_WIDE_INT) buffer[offset];
181 index = bitpos / HOST_BITS_PER_WIDE_INT;
182 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
185 result.set_len (canonize (val, len, precision));
187 return result;
190 /* Sets RESULT from X, the sign is taken according to SGN. */
191 void
192 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
194 int len = x.get_len ();
195 const HOST_WIDE_INT *v = x.get_val ();
196 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
198 if (wi::neg_p (x, sgn))
200 /* We use ones complement to avoid -x80..0 edge case that -
201 won't work on. */
202 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
203 for (int i = 0; i < len; i++)
204 t[i] = ~v[i];
205 if (excess > 0)
206 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
207 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
208 mpz_com (result, result);
210 else if (excess > 0)
212 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
213 for (int i = 0; i < len - 1; i++)
214 t[i] = v[i];
215 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
216 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
218 else
219 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
222 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
223 values of VAL will be wrapped; otherwise, they will be set to the
224 appropriate minimum or maximum TYPE bound. */
225 wide_int
226 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
228 size_t count, numb;
229 unsigned int prec = TYPE_PRECISION (type);
230 wide_int res = wide_int::create (prec);
232 if (!wrap)
234 mpz_t min, max;
236 mpz_init (min);
237 mpz_init (max);
238 get_type_static_bounds (type, min, max);
240 if (mpz_cmp (x, min) < 0)
241 mpz_set (x, min);
242 else if (mpz_cmp (x, max) > 0)
243 mpz_set (x, max);
245 mpz_clear (min);
246 mpz_clear (max);
249 /* Determine the number of unsigned HOST_WIDE_INTs that are required
250 for representing the absolute value. The code to calculate count is
251 extracted from the GMP manual, section "Integer Import and Export":
252 http://gmplib.org/manual/Integer-Import-and-Export.html */
253 numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
254 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
255 HOST_WIDE_INT *val = res.write_val ();
256 /* Read the absolute value.
258 Write directly to the wide_int storage if possible, otherwise leave
259 GMP to allocate the memory for us. It might be slightly more efficient
260 to use mpz_tdiv_r_2exp for the latter case, but the situation is
261 pathological and it seems safer to operate on the original mpz value
262 in all cases. */
263 void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
264 &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
265 if (count < 1)
267 val[0] = 0;
268 count = 1;
270 count = MIN (count, BLOCKS_NEEDED (prec));
271 if (valres != val)
273 memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
274 free (valres);
276 /* Zero-extend the absolute value to PREC bits. */
277 if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
278 val[count++] = 0;
279 else
280 count = canonize (val, count, prec);
281 res.set_len (count);
283 if (mpz_sgn (x) < 0)
284 res = -res;
286 return res;
290 * Largest and smallest values in a mode.
293 /* Return the largest SGNed number that is representable in PRECISION bits.
295 TODO: There is still code from the double_int era that trys to
296 make up for the fact that double int's could not represent the
297 min and max values of all types. This code should be removed
298 because the min and max values can always be represented in
299 wide_ints and int-csts. */
300 wide_int
301 wi::max_value (unsigned int precision, signop sgn)
303 gcc_checking_assert (precision != 0);
304 if (sgn == UNSIGNED)
305 /* The unsigned max is just all ones. */
306 return shwi (-1, precision);
307 else
308 /* The signed max is all ones except the top bit. This must be
309 explicitly represented. */
310 return mask (precision - 1, false, precision);
313 /* Return the largest SGNed number that is representable in PRECISION bits. */
314 wide_int
315 wi::min_value (unsigned int precision, signop sgn)
317 gcc_checking_assert (precision != 0);
318 if (sgn == UNSIGNED)
319 return uhwi (0, precision);
320 else
321 /* The signed min is all zeros except the top bit. This must be
322 explicitly represented. */
323 return wi::set_bit_in_zero (precision - 1, precision);
327 * Public utilities.
330 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
331 signedness SGN, to an integer that has PRECISION bits. Store the blocks
332 in VAL and return the number of blocks used.
334 This function can handle both extension (PRECISION > XPRECISION)
335 and truncation (PRECISION < XPRECISION). */
336 unsigned int
337 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
338 unsigned int xlen, unsigned int xprecision,
339 unsigned int precision, signop sgn)
341 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
342 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
343 for (unsigned i = 0; i < len; i++)
344 val[i] = xval[i];
346 if (precision > xprecision)
348 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
350 /* Expanding. */
351 if (sgn == UNSIGNED)
353 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
354 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
355 else if (val[len - 1] < 0)
357 while (len < BLOCKS_NEEDED (xprecision))
358 val[len++] = -1;
359 if (small_xprecision)
360 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
361 else
362 val[len++] = 0;
365 else
367 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
368 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
371 len = canonize (val, len, precision);
373 return len;
376 /* This function hides the fact that we cannot rely on the bits beyond
377 the precision. This issue comes up in the relational comparisions
378 where we do allow comparisons of values of different precisions. */
379 static inline HOST_WIDE_INT
380 selt (const HOST_WIDE_INT *a, unsigned int len,
381 unsigned int blocks_needed, unsigned int small_prec,
382 unsigned int index, signop sgn)
384 HOST_WIDE_INT val;
385 if (index < len)
386 val = a[index];
387 else if (index < blocks_needed || sgn == SIGNED)
388 /* Signed or within the precision. */
389 val = SIGN_MASK (a[len - 1]);
390 else
391 /* Unsigned extension beyond the precision. */
392 val = 0;
394 if (small_prec && index == blocks_needed - 1)
395 return (sgn == SIGNED
396 ? sext_hwi (val, small_prec)
397 : zext_hwi (val, small_prec));
398 else
399 return val;
402 /* Find the highest bit represented in a wide int. This will in
403 general have the same value as the sign bit. */
404 static inline HOST_WIDE_INT
405 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
407 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
408 unsigned HOST_WIDE_INT val = a[len - 1];
409 if (excess > 0)
410 val <<= excess;
411 return val >> (HOST_BITS_PER_WIDE_INT - 1);
415 * Comparisons, note that only equality is an operator. The other
416 * comparisons cannot be operators since they are inherently signed or
417 * unsigned and C++ has no such operators.
420 /* Return true if OP0 == OP1. */
421 bool
422 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
423 const HOST_WIDE_INT *op1, unsigned int op1len,
424 unsigned int prec)
426 int l0 = op0len - 1;
427 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
429 if (op0len != op1len)
430 return false;
432 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
434 /* It does not matter if we zext or sext here, we just have to
435 do both the same way. */
436 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
437 return false;
438 l0--;
441 while (l0 >= 0)
442 if (op0[l0] != op1[l0])
443 return false;
444 else
445 l0--;
447 return true;
450 /* Return true if OP0 < OP1 using signed comparisons. */
451 bool
452 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
453 unsigned int precision,
454 const HOST_WIDE_INT *op1, unsigned int op1len)
456 HOST_WIDE_INT s0, s1;
457 unsigned HOST_WIDE_INT u0, u1;
458 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
459 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
460 int l = MAX (op0len - 1, op1len - 1);
462 /* Only the top block is compared as signed. The rest are unsigned
463 comparisons. */
464 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
465 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
466 if (s0 < s1)
467 return true;
468 if (s0 > s1)
469 return false;
471 l--;
472 while (l >= 0)
474 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
475 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
477 if (u0 < u1)
478 return true;
479 if (u0 > u1)
480 return false;
481 l--;
484 return false;
487 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
488 signed compares. */
490 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
491 unsigned int precision,
492 const HOST_WIDE_INT *op1, unsigned int op1len)
494 HOST_WIDE_INT s0, s1;
495 unsigned HOST_WIDE_INT u0, u1;
496 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
497 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
498 int l = MAX (op0len - 1, op1len - 1);
500 /* Only the top block is compared as signed. The rest are unsigned
501 comparisons. */
502 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
503 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
504 if (s0 < s1)
505 return -1;
506 if (s0 > s1)
507 return 1;
509 l--;
510 while (l >= 0)
512 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
513 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
515 if (u0 < u1)
516 return -1;
517 if (u0 > u1)
518 return 1;
519 l--;
522 return 0;
525 /* Return true if OP0 < OP1 using unsigned comparisons. */
526 bool
527 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
528 unsigned int precision,
529 const HOST_WIDE_INT *op1, unsigned int op1len)
531 unsigned HOST_WIDE_INT x0;
532 unsigned HOST_WIDE_INT x1;
533 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
534 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
535 int l = MAX (op0len - 1, op1len - 1);
537 while (l >= 0)
539 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
540 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
541 if (x0 < x1)
542 return true;
543 if (x0 > x1)
544 return false;
545 l--;
548 return false;
551 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
552 unsigned compares. */
554 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
555 unsigned int precision,
556 const HOST_WIDE_INT *op1, unsigned int op1len)
558 unsigned HOST_WIDE_INT x0;
559 unsigned HOST_WIDE_INT x1;
560 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
561 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
562 int l = MAX (op0len - 1, op1len - 1);
564 while (l >= 0)
566 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
567 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
568 if (x0 < x1)
569 return -1;
570 if (x0 > x1)
571 return 1;
572 l--;
575 return 0;
579 * Extension.
582 /* Sign-extend the number represented by XVAL and XLEN into VAL,
583 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
584 and VAL have PRECISION bits. */
585 unsigned int
586 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
587 unsigned int xlen, unsigned int precision, unsigned int offset)
589 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
590 /* Extending beyond the precision is a no-op. If we have only stored
591 OFFSET bits or fewer, the rest are already signs. */
592 if (offset >= precision || len >= xlen)
594 for (unsigned i = 0; i < xlen; ++i)
595 val[i] = xval[i];
596 return xlen;
598 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
599 for (unsigned int i = 0; i < len; i++)
600 val[i] = xval[i];
601 if (suboffset > 0)
603 val[len] = sext_hwi (xval[len], suboffset);
604 len += 1;
606 return canonize (val, len, precision);
609 /* Zero-extend the number represented by XVAL and XLEN into VAL,
610 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
611 and VAL have PRECISION bits. */
612 unsigned int
613 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
614 unsigned int xlen, unsigned int precision, unsigned int offset)
616 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
617 /* Extending beyond the precision is a no-op. If we have only stored
618 OFFSET bits or fewer, and the upper stored bit is zero, then there
619 is nothing to do. */
620 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
622 for (unsigned i = 0; i < xlen; ++i)
623 val[i] = xval[i];
624 return xlen;
626 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
627 for (unsigned int i = 0; i < len; i++)
628 val[i] = i < xlen ? xval[i] : -1;
629 if (suboffset > 0)
630 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
631 else
632 val[len] = 0;
633 return canonize (val, len + 1, precision);
637 * Masking, inserting, shifting, rotating.
640 /* Insert WIDTH bits from Y into X starting at START. */
641 wide_int
642 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
643 unsigned int width)
645 wide_int result;
646 wide_int mask;
647 wide_int tmp;
649 unsigned int precision = x.get_precision ();
650 if (start >= precision)
651 return x;
653 gcc_checking_assert (precision >= width);
655 if (start + width >= precision)
656 width = precision - start;
658 mask = wi::shifted_mask (start, width, false, precision);
659 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
660 result = tmp & mask;
662 tmp = wi::bit_and_not (x, mask);
663 result = result | tmp;
665 return result;
668 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
669 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
670 bits. */
671 unsigned int
672 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
673 unsigned int xlen, unsigned int precision, unsigned int bit)
675 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
676 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
678 if (block + 1 >= xlen)
680 /* The operation either affects the last current block or needs
681 a new block. */
682 unsigned int len = block + 1;
683 for (unsigned int i = 0; i < len; i++)
684 val[i] = safe_uhwi (xval, xlen, i);
685 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
687 /* If the bit we just set is at the msb of the block, make sure
688 that any higher bits are zeros. */
689 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
690 val[len++] = 0;
691 return len;
693 else
695 for (unsigned int i = 0; i < xlen; i++)
696 val[i] = xval[i];
697 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
698 return canonize (val, xlen, precision);
702 /* bswap THIS. */
703 wide_int
704 wide_int_storage::bswap () const
706 wide_int result = wide_int::create (precision);
707 unsigned int i, s;
708 unsigned int len = BLOCKS_NEEDED (precision);
709 unsigned int xlen = get_len ();
710 const HOST_WIDE_INT *xval = get_val ();
711 HOST_WIDE_INT *val = result.write_val ();
713 /* This is not a well defined operation if the precision is not a
714 multiple of 8. */
715 gcc_assert ((precision & 0x7) == 0);
717 for (i = 0; i < len; i++)
718 val[i] = 0;
720 /* Only swap the bytes that are not the padding. */
721 for (s = 0; s < precision; s += 8)
723 unsigned int d = precision - s - 8;
724 unsigned HOST_WIDE_INT byte;
726 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
727 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
729 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
731 block = d / HOST_BITS_PER_WIDE_INT;
732 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
734 val[block] |= byte << offset;
737 result.set_len (canonize (val, len, precision));
738 return result;
741 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
742 above that up to PREC are zeros. The result is inverted if NEGATE
743 is true. Return the number of blocks in VAL. */
744 unsigned int
745 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
746 unsigned int prec)
748 if (width >= prec)
750 val[0] = negate ? 0 : -1;
751 return 1;
753 else if (width == 0)
755 val[0] = negate ? -1 : 0;
756 return 1;
759 unsigned int i = 0;
760 while (i < width / HOST_BITS_PER_WIDE_INT)
761 val[i++] = negate ? 0 : -1;
763 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
764 if (shift != 0)
766 HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
767 val[i++] = negate ? ~last : last;
769 else
770 val[i++] = negate ? -1 : 0;
772 return i;
775 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
776 bits are ones, and the bits above that up to PREC are zeros. The result
777 is inverted if NEGATE is true. Return the number of blocks in VAL. */
778 unsigned int
779 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
780 bool negate, unsigned int prec)
782 if (start >= prec || width == 0)
784 val[0] = negate ? -1 : 0;
785 return 1;
788 if (width > prec - start)
789 width = prec - start;
790 unsigned int end = start + width;
792 unsigned int i = 0;
793 while (i < start / HOST_BITS_PER_WIDE_INT)
794 val[i++] = negate ? -1 : 0;
796 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
797 if (shift)
799 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
800 shift += width;
801 if (shift < HOST_BITS_PER_WIDE_INT)
803 /* case 000111000 */
804 block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
805 val[i++] = negate ? ~block : block;
806 return i;
808 else
809 /* ...111000 */
810 val[i++] = negate ? block : ~block;
813 while (i < end / HOST_BITS_PER_WIDE_INT)
814 /* 1111111 */
815 val[i++] = negate ? 0 : -1;
817 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
818 if (shift != 0)
820 /* 000011111 */
821 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
822 val[i++] = negate ? ~block : block;
824 else if (end < prec)
825 val[i++] = negate ? -1 : 0;
827 return i;
831 * logical operations.
834 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
835 unsigned int
836 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
837 unsigned int op0len, const HOST_WIDE_INT *op1,
838 unsigned int op1len, unsigned int prec)
840 int l0 = op0len - 1;
841 int l1 = op1len - 1;
842 bool need_canon = true;
844 unsigned int len = MAX (op0len, op1len);
845 if (l0 > l1)
847 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
848 if (op1mask == 0)
850 l0 = l1;
851 len = l1 + 1;
853 else
855 need_canon = false;
856 while (l0 > l1)
858 val[l0] = op0[l0];
859 l0--;
863 else if (l1 > l0)
865 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
866 if (op0mask == 0)
867 len = l0 + 1;
868 else
870 need_canon = false;
871 while (l1 > l0)
873 val[l1] = op1[l1];
874 l1--;
879 while (l0 >= 0)
881 val[l0] = op0[l0] & op1[l0];
882 l0--;
885 if (need_canon)
886 len = canonize (val, len, prec);
888 return len;
891 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
892 unsigned int
893 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
894 unsigned int op0len, const HOST_WIDE_INT *op1,
895 unsigned int op1len, unsigned int prec)
897 wide_int result;
898 int l0 = op0len - 1;
899 int l1 = op1len - 1;
900 bool need_canon = true;
902 unsigned int len = MAX (op0len, op1len);
903 if (l0 > l1)
905 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
906 if (op1mask != 0)
908 l0 = l1;
909 len = l1 + 1;
911 else
913 need_canon = false;
914 while (l0 > l1)
916 val[l0] = op0[l0];
917 l0--;
921 else if (l1 > l0)
923 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
924 if (op0mask == 0)
925 len = l0 + 1;
926 else
928 need_canon = false;
929 while (l1 > l0)
931 val[l1] = ~op1[l1];
932 l1--;
937 while (l0 >= 0)
939 val[l0] = op0[l0] & ~op1[l0];
940 l0--;
943 if (need_canon)
944 len = canonize (val, len, prec);
946 return len;
949 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
950 unsigned int
951 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
952 unsigned int op0len, const HOST_WIDE_INT *op1,
953 unsigned int op1len, unsigned int prec)
955 wide_int result;
956 int l0 = op0len - 1;
957 int l1 = op1len - 1;
958 bool need_canon = true;
960 unsigned int len = MAX (op0len, op1len);
961 if (l0 > l1)
963 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
964 if (op1mask != 0)
966 l0 = l1;
967 len = l1 + 1;
969 else
971 need_canon = false;
972 while (l0 > l1)
974 val[l0] = op0[l0];
975 l0--;
979 else if (l1 > l0)
981 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
982 if (op0mask != 0)
983 len = l0 + 1;
984 else
986 need_canon = false;
987 while (l1 > l0)
989 val[l1] = op1[l1];
990 l1--;
995 while (l0 >= 0)
997 val[l0] = op0[l0] | op1[l0];
998 l0--;
1001 if (need_canon)
1002 len = canonize (val, len, prec);
1004 return len;
1007 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1008 unsigned int
1009 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1010 unsigned int op0len, const HOST_WIDE_INT *op1,
1011 unsigned int op1len, unsigned int prec)
1013 wide_int result;
1014 int l0 = op0len - 1;
1015 int l1 = op1len - 1;
1016 bool need_canon = true;
1018 unsigned int len = MAX (op0len, op1len);
1019 if (l0 > l1)
1021 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1022 if (op1mask == 0)
1024 l0 = l1;
1025 len = l1 + 1;
1027 else
1029 need_canon = false;
1030 while (l0 > l1)
1032 val[l0] = op0[l0];
1033 l0--;
1037 else if (l1 > l0)
1039 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1040 if (op0mask != 0)
1041 len = l0 + 1;
1042 else
1044 need_canon = false;
1045 while (l1 > l0)
1047 val[l1] = ~op1[l1];
1048 l1--;
1053 while (l0 >= 0)
1055 val[l0] = op0[l0] | ~op1[l0];
1056 l0--;
1059 if (need_canon)
1060 len = canonize (val, len, prec);
1062 return len;
1065 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1066 unsigned int
1067 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1068 unsigned int op0len, const HOST_WIDE_INT *op1,
1069 unsigned int op1len, unsigned int prec)
1071 wide_int result;
1072 int l0 = op0len - 1;
1073 int l1 = op1len - 1;
1075 unsigned int len = MAX (op0len, op1len);
1076 if (l0 > l1)
1078 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1079 while (l0 > l1)
1081 val[l0] = op0[l0] ^ op1mask;
1082 l0--;
1086 if (l1 > l0)
1088 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1089 while (l1 > l0)
1091 val[l1] = op0mask ^ op1[l1];
1092 l1--;
1096 while (l0 >= 0)
1098 val[l0] = op0[l0] ^ op1[l0];
1099 l0--;
1102 return canonize (val, len, prec);
1106 * math
1109 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1110 whether the result overflows when OP0 and OP1 are treated as having
1111 signedness SGN. Return the number of blocks in VAL. */
1112 unsigned int
1113 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1114 unsigned int op0len, const HOST_WIDE_INT *op1,
1115 unsigned int op1len, unsigned int prec,
1116 signop sgn, bool *overflow)
1118 unsigned HOST_WIDE_INT o0 = 0;
1119 unsigned HOST_WIDE_INT o1 = 0;
1120 unsigned HOST_WIDE_INT x = 0;
1121 unsigned HOST_WIDE_INT carry = 0;
1122 unsigned HOST_WIDE_INT old_carry = 0;
1123 unsigned HOST_WIDE_INT mask0, mask1;
1124 unsigned int i;
1126 unsigned int len = MAX (op0len, op1len);
1127 mask0 = -top_bit_of (op0, op0len, prec);
1128 mask1 = -top_bit_of (op1, op1len, prec);
1129 /* Add all of the explicitly defined elements. */
1131 for (i = 0; i < len; i++)
1133 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1134 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1135 x = o0 + o1 + carry;
1136 val[i] = x;
1137 old_carry = carry;
1138 carry = carry == 0 ? x < o0 : x <= o0;
1141 if (len * HOST_BITS_PER_WIDE_INT < prec)
1143 val[len] = mask0 + mask1 + carry;
1144 len++;
1145 if (overflow)
1146 *overflow = false;
1148 else if (overflow)
1150 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1151 if (sgn == SIGNED)
1153 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1154 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1156 else
1158 /* Put the MSB of X and O0 and in the top of the HWI. */
1159 x <<= shift;
1160 o0 <<= shift;
1161 if (old_carry)
1162 *overflow = (x <= o0);
1163 else
1164 *overflow = (x < o0);
1168 return canonize (val, len, prec);
1171 /* Subroutines of the multiplication and division operations. Unpack
1172 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1173 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1174 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1175 static void
1176 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1177 unsigned int in_len, unsigned int out_len,
1178 unsigned int prec, signop sgn)
1180 unsigned int i;
1181 unsigned int j = 0;
1182 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1183 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1184 HOST_WIDE_INT mask;
1186 if (sgn == SIGNED)
1188 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1189 mask &= HALF_INT_MASK;
1191 else
1192 mask = 0;
1194 for (i = 0; i < blocks_needed - 1; i++)
1196 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1197 result[j++] = x;
1198 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1201 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1202 if (small_prec)
1204 if (sgn == SIGNED)
1205 x = sext_hwi (x, small_prec);
1206 else
1207 x = zext_hwi (x, small_prec);
1209 result[j++] = x;
1210 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1212 /* Smear the sign bit. */
1213 while (j < out_len)
1214 result[j++] = mask;
1217 /* The inverse of wi_unpack. IN_LEN is the number of input
1218 blocks and PRECISION is the precision of the result. Return the
1219 number of blocks in the canonicalized result. */
1220 static unsigned int
1221 wi_pack (HOST_WIDE_INT *result,
1222 const unsigned HOST_HALF_WIDE_INT *input,
1223 unsigned int in_len, unsigned int precision)
1225 unsigned int i = 0;
1226 unsigned int j = 0;
1227 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
1229 while (i + 1 < in_len)
1231 result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1232 | ((unsigned HOST_WIDE_INT) input[i + 1]
1233 << HOST_BITS_PER_HALF_WIDE_INT));
1234 i += 2;
1237 /* Handle the case where in_len is odd. For this we zero extend. */
1238 if (in_len & 1)
1239 result[j++] = (unsigned HOST_WIDE_INT) input[i];
1240 else if (j < blocks_needed)
1241 result[j++] = 0;
1242 return canonize (result, j, precision);
1245 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1246 result is returned.
1248 If HIGH is not set, throw away the upper half after the check is
1249 made to see if it overflows. Unfortunately there is no better way
1250 to check for overflow than to do this. If OVERFLOW is nonnull,
1251 record in *OVERFLOW whether the result overflowed. SGN controls
1252 the signedness and is used to check overflow or if HIGH is set. */
1253 unsigned int
1254 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1255 unsigned int op1len, const HOST_WIDE_INT *op2val,
1256 unsigned int op2len, unsigned int prec, signop sgn,
1257 bool *overflow, bool high)
1259 unsigned HOST_WIDE_INT o0, o1, k, t;
1260 unsigned int i;
1261 unsigned int j;
1262 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1263 unsigned int half_blocks_needed = blocks_needed * 2;
1264 /* The sizes here are scaled to support a 2x largest mode by 2x
1265 largest mode yielding a 4x largest mode result. This is what is
1266 needed by vpn. */
1268 unsigned HOST_HALF_WIDE_INT
1269 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1270 unsigned HOST_HALF_WIDE_INT
1271 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1272 /* The '2' in 'R' is because we are internally doing a full
1273 multiply. */
1274 unsigned HOST_HALF_WIDE_INT
1275 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1276 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1278 /* If the top level routine did not really pass in an overflow, then
1279 just make sure that we never attempt to set it. */
1280 bool needs_overflow = (overflow != 0);
1281 if (needs_overflow)
1282 *overflow = false;
1284 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1285 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1287 /* This is a surprisingly common case, so do it first. */
1288 if (op1 == 0 || op2 == 0)
1290 val[0] = 0;
1291 return 1;
1294 #ifdef umul_ppmm
1295 if (sgn == UNSIGNED)
1297 /* If the inputs are single HWIs and the output has room for at
1298 least two HWIs, we can use umul_ppmm directly. */
1299 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1300 && wi::fits_uhwi_p (op1)
1301 && wi::fits_uhwi_p (op2))
1303 /* This case never overflows. */
1304 if (high)
1306 val[0] = 0;
1307 return 1;
1309 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1310 if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1312 val[2] = 0;
1313 return 3;
1315 return 1 + (val[1] != 0 || val[0] < 0);
1317 /* Likewise if the output is a full single HWI, except that the
1318 upper HWI of the result is only used for determining overflow.
1319 (We handle this case inline when overflow isn't needed.) */
1320 else if (prec == HOST_BITS_PER_WIDE_INT)
1322 unsigned HOST_WIDE_INT upper;
1323 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1324 if (needs_overflow)
1325 *overflow = (upper != 0);
1326 if (high)
1327 val[0] = upper;
1328 return 1;
1331 #endif
1333 /* Handle multiplications by 1. */
1334 if (op1 == 1)
1336 if (high)
1338 val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1339 return 1;
1341 for (i = 0; i < op2len; i++)
1342 val[i] = op2val[i];
1343 return op2len;
1345 if (op2 == 1)
1347 if (high)
1349 val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1350 return 1;
1352 for (i = 0; i < op1len; i++)
1353 val[i] = op1val[i];
1354 return op1len;
1357 /* If we need to check for overflow, we can only do half wide
1358 multiplies quickly because we need to look at the top bits to
1359 check for the overflow. */
1360 if ((high || needs_overflow)
1361 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1363 unsigned HOST_WIDE_INT r;
1365 if (sgn == SIGNED)
1367 o0 = op1.to_shwi ();
1368 o1 = op2.to_shwi ();
1370 else
1372 o0 = op1.to_uhwi ();
1373 o1 = op2.to_uhwi ();
1376 r = o0 * o1;
1377 if (needs_overflow)
1379 if (sgn == SIGNED)
1381 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1382 *overflow = true;
1384 else
1386 if ((r >> prec) != 0)
1387 *overflow = true;
1390 val[0] = high ? r >> prec : r;
1391 return 1;
1394 /* We do unsigned mul and then correct it. */
1395 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1396 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1398 /* The 2 is for a full mult. */
1399 memset (r, 0, half_blocks_needed * 2
1400 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1402 for (j = 0; j < half_blocks_needed; j++)
1404 k = 0;
1405 for (i = 0; i < half_blocks_needed; i++)
1407 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1408 + r[i + j] + k);
1409 r[i + j] = t & HALF_INT_MASK;
1410 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1412 r[j + half_blocks_needed] = k;
1415 /* We did unsigned math above. For signed we must adjust the
1416 product (assuming we need to see that). */
1417 if (sgn == SIGNED && (high || needs_overflow))
1419 unsigned HOST_WIDE_INT b;
1420 if (wi::neg_p (op1))
1422 b = 0;
1423 for (i = 0; i < half_blocks_needed; i++)
1425 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1426 - (unsigned HOST_WIDE_INT)v[i] - b;
1427 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1428 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1431 if (wi::neg_p (op2))
1433 b = 0;
1434 for (i = 0; i < half_blocks_needed; i++)
1436 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1437 - (unsigned HOST_WIDE_INT)u[i] - b;
1438 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1439 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1444 if (needs_overflow)
1446 HOST_WIDE_INT top;
1448 /* For unsigned, overflow is true if any of the top bits are set.
1449 For signed, overflow is true if any of the top bits are not equal
1450 to the sign bit. */
1451 if (sgn == UNSIGNED)
1452 top = 0;
1453 else
1455 top = r[(half_blocks_needed) - 1];
1456 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1457 top &= mask;
1460 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1461 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1462 *overflow = true;
1465 int r_offset = high ? half_blocks_needed : 0;
1466 return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
1469 /* Compute the population count of X. */
1471 wi::popcount (const wide_int_ref &x)
1473 unsigned int i;
1474 int count;
1476 /* The high order block is special if it is the last block and the
1477 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1478 have to clear out any ones above the precision before doing
1479 popcount on this block. */
1480 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1481 unsigned int stop = x.len;
1482 if (count < 0)
1484 count = popcount_hwi (x.uhigh () << -count);
1485 stop -= 1;
1487 else
1489 if (x.sign_mask () >= 0)
1490 count = 0;
1493 for (i = 0; i < stop; ++i)
1494 count += popcount_hwi (x.val[i]);
1496 return count;
1499 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1500 whether the result overflows when OP0 and OP1 are treated as having
1501 signedness SGN. Return the number of blocks in VAL. */
1502 unsigned int
1503 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1504 unsigned int op0len, const HOST_WIDE_INT *op1,
1505 unsigned int op1len, unsigned int prec,
1506 signop sgn, bool *overflow)
1508 unsigned HOST_WIDE_INT o0 = 0;
1509 unsigned HOST_WIDE_INT o1 = 0;
1510 unsigned HOST_WIDE_INT x = 0;
1511 /* We implement subtraction as an in place negate and add. Negation
1512 is just inversion and add 1, so we can do the add of 1 by just
1513 starting the borrow in of the first element at 1. */
1514 unsigned HOST_WIDE_INT borrow = 0;
1515 unsigned HOST_WIDE_INT old_borrow = 0;
1517 unsigned HOST_WIDE_INT mask0, mask1;
1518 unsigned int i;
1520 unsigned int len = MAX (op0len, op1len);
1521 mask0 = -top_bit_of (op0, op0len, prec);
1522 mask1 = -top_bit_of (op1, op1len, prec);
1524 /* Subtract all of the explicitly defined elements. */
1525 for (i = 0; i < len; i++)
1527 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1528 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1529 x = o0 - o1 - borrow;
1530 val[i] = x;
1531 old_borrow = borrow;
1532 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1535 if (len * HOST_BITS_PER_WIDE_INT < prec)
1537 val[len] = mask0 - mask1 - borrow;
1538 len++;
1539 if (overflow)
1540 *overflow = false;
1542 else if (overflow)
1544 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1545 if (sgn == SIGNED)
1547 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1548 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1550 else
1552 /* Put the MSB of X and O0 and in the top of the HWI. */
1553 x <<= shift;
1554 o0 <<= shift;
1555 if (old_borrow)
1556 *overflow = (x >= o0);
1557 else
1558 *overflow = (x > o0);
1562 return canonize (val, len, prec);
1567 * Division and Mod
1570 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1571 algorithm is a small modification of the algorithm in Hacker's
1572 Delight by Warren, which itself is a small modification of Knuth's
1573 algorithm. M is the number of significant elements of U however
1574 there needs to be at least one extra element of B_DIVIDEND
1575 allocated, N is the number of elements of B_DIVISOR. */
1576 static void
1577 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1578 unsigned HOST_HALF_WIDE_INT *b_remainder,
1579 unsigned HOST_HALF_WIDE_INT *b_dividend,
1580 unsigned HOST_HALF_WIDE_INT *b_divisor,
1581 int m, int n)
1583 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1584 HOST_WIDE_INT and stored in the lower bits of each word. This
1585 algorithm should work properly on both 32 and 64 bit
1586 machines. */
1587 unsigned HOST_WIDE_INT b
1588 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1589 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1590 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1591 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1592 HOST_WIDE_INT t, k;
1593 int i, j, s;
1595 /* Single digit divisor. */
1596 if (n == 1)
1598 k = 0;
1599 for (j = m - 1; j >= 0; j--)
1601 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1602 k = ((k * b + b_dividend[j])
1603 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1604 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1606 b_remainder[0] = k;
1607 return;
1610 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1612 if (s)
1614 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1615 algorithm, we can overwrite b_dividend and b_divisor, so we do
1616 that. */
1617 for (i = n - 1; i > 0; i--)
1618 b_divisor[i] = (b_divisor[i] << s)
1619 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1620 b_divisor[0] = b_divisor[0] << s;
1622 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1623 for (i = m - 1; i > 0; i--)
1624 b_dividend[i] = (b_dividend[i] << s)
1625 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1626 b_dividend[0] = b_dividend[0] << s;
1629 /* Main loop. */
1630 for (j = m - n; j >= 0; j--)
1632 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1633 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1634 again:
1635 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1637 qhat -= 1;
1638 rhat += b_divisor[n-1];
1639 if (rhat < b)
1640 goto again;
1643 /* Multiply and subtract. */
1644 k = 0;
1645 for (i = 0; i < n; i++)
1647 p = qhat * b_divisor[i];
1648 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1649 b_dividend[i + j] = t;
1650 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1651 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1653 t = b_dividend[j+n] - k;
1654 b_dividend[j+n] = t;
1656 b_quotient[j] = qhat;
1657 if (t < 0)
1659 b_quotient[j] -= 1;
1660 k = 0;
1661 for (i = 0; i < n; i++)
1663 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1664 b_dividend[i+j] = t;
1665 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1667 b_dividend[j+n] += k;
1670 if (s)
1671 for (i = 0; i < n; i++)
1672 b_remainder[i] = (b_dividend[i] >> s)
1673 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1674 else
1675 for (i = 0; i < n; i++)
1676 b_remainder[i] = b_dividend[i];
1680 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1681 the result. If QUOTIENT is nonnull, store the value of the quotient
1682 there and return the number of blocks in it. The return value is
1683 not defined otherwise. If REMAINDER is nonnull, store the value
1684 of the remainder there and store the number of blocks in
1685 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1686 the division overflowed. */
1687 unsigned int
1688 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1689 HOST_WIDE_INT *remainder,
1690 const HOST_WIDE_INT *dividend_val,
1691 unsigned int dividend_len, unsigned int dividend_prec,
1692 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1693 unsigned int divisor_prec, signop sgn,
1694 bool *oflow)
1696 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1697 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1698 unsigned HOST_HALF_WIDE_INT
1699 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1700 unsigned HOST_HALF_WIDE_INT
1701 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1702 unsigned HOST_HALF_WIDE_INT
1703 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1704 unsigned HOST_HALF_WIDE_INT
1705 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1706 unsigned int m, n;
1707 bool dividend_neg = false;
1708 bool divisor_neg = false;
1709 bool overflow = false;
1710 wide_int neg_dividend, neg_divisor;
1712 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1713 dividend_prec);
1714 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1715 divisor_prec);
1716 if (divisor == 0)
1717 overflow = true;
1719 /* The smallest signed number / -1 causes overflow. The dividend_len
1720 check is for speed rather than correctness. */
1721 if (sgn == SIGNED
1722 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1723 && divisor == -1
1724 && wi::only_sign_bit_p (dividend))
1725 overflow = true;
1727 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1728 (signed min / -1) has the same representation as the orignal dividend.
1729 We have traditionally made division by zero act as division by one,
1730 so there too we use the original dividend. */
1731 if (overflow)
1733 if (remainder)
1735 *remainder_len = 1;
1736 remainder[0] = 0;
1738 if (oflow != 0)
1739 *oflow = true;
1740 if (quotient)
1741 for (unsigned int i = 0; i < dividend_len; ++i)
1742 quotient[i] = dividend_val[i];
1743 return dividend_len;
1746 if (oflow)
1747 *oflow = false;
1749 /* Do it on the host if you can. */
1750 if (sgn == SIGNED
1751 && wi::fits_shwi_p (dividend)
1752 && wi::fits_shwi_p (divisor))
1754 HOST_WIDE_INT o0 = dividend.to_shwi ();
1755 HOST_WIDE_INT o1 = divisor.to_shwi ();
1757 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1759 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1760 if (quotient)
1762 quotient[0] = HOST_WIDE_INT_MIN;
1763 quotient[1] = 0;
1765 if (remainder)
1767 remainder[0] = 0;
1768 *remainder_len = 1;
1770 return 2;
1772 else
1774 if (quotient)
1775 quotient[0] = o0 / o1;
1776 if (remainder)
1778 remainder[0] = o0 % o1;
1779 *remainder_len = 1;
1781 return 1;
1785 if (sgn == UNSIGNED
1786 && wi::fits_uhwi_p (dividend)
1787 && wi::fits_uhwi_p (divisor))
1789 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1790 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1791 unsigned int quotient_len = 1;
1793 if (quotient)
1795 quotient[0] = o0 / o1;
1796 if (o1 == 1
1797 && (HOST_WIDE_INT) o0 < 0
1798 && dividend_prec > HOST_BITS_PER_WIDE_INT)
1800 quotient[1] = 0;
1801 quotient_len = 2;
1804 if (remainder)
1806 remainder[0] = o0 % o1;
1807 if ((HOST_WIDE_INT) remainder[0] < 0
1808 && dividend_prec > HOST_BITS_PER_WIDE_INT)
1810 remainder[1] = 0;
1811 *remainder_len = 2;
1813 else
1814 *remainder_len = 1;
1816 return quotient_len;
1819 /* Make the divisor and dividend positive and remember what we
1820 did. */
1821 if (sgn == SIGNED)
1823 if (wi::neg_p (dividend))
1825 neg_dividend = -dividend;
1826 dividend = neg_dividend;
1827 dividend_neg = true;
1829 if (wi::neg_p (divisor))
1831 neg_divisor = -divisor;
1832 divisor = neg_divisor;
1833 divisor_neg = true;
1837 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1838 dividend_blocks_needed, dividend_prec, sgn);
1839 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1840 divisor_blocks_needed, divisor_prec, sgn);
1842 m = dividend_blocks_needed;
1843 b_dividend[m] = 0;
1844 while (m > 1 && b_dividend[m - 1] == 0)
1845 m--;
1847 n = divisor_blocks_needed;
1848 while (n > 1 && b_divisor[n - 1] == 0)
1849 n--;
1851 memset (b_quotient, 0, sizeof (b_quotient));
1853 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1855 unsigned int quotient_len = 0;
1856 if (quotient)
1858 quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
1859 /* The quotient is neg if exactly one of the divisor or dividend is
1860 neg. */
1861 if (dividend_neg != divisor_neg)
1862 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1863 quotient_len, dividend_prec,
1864 UNSIGNED, 0);
1867 if (remainder)
1869 *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
1870 /* The remainder is always the same sign as the dividend. */
1871 if (dividend_neg)
1872 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1873 *remainder_len, dividend_prec,
1874 UNSIGNED, 0);
1877 return quotient_len;
1881 * Shifting, rotating and extraction.
1884 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1885 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1886 unsigned int
1887 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1888 unsigned int xlen, unsigned int precision,
1889 unsigned int shift)
1891 /* Split the shift into a whole-block shift and a subblock shift. */
1892 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1893 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1895 /* The whole-block shift fills with zeros. */
1896 unsigned int len = BLOCKS_NEEDED (precision);
1897 for (unsigned int i = 0; i < skip; ++i)
1898 val[i] = 0;
1900 /* It's easier to handle the simple block case specially. */
1901 if (small_shift == 0)
1902 for (unsigned int i = skip; i < len; ++i)
1903 val[i] = safe_uhwi (xval, xlen, i - skip);
1904 else
1906 /* The first unfilled output block is a left shift of the first
1907 block in XVAL. The other output blocks contain bits from two
1908 consecutive input blocks. */
1909 unsigned HOST_WIDE_INT carry = 0;
1910 for (unsigned int i = skip; i < len; ++i)
1912 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1913 val[i] = (x << small_shift) | carry;
1914 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1917 return canonize (val, len, precision);
1920 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1921 number of blocks in VAL. The input has XPRECISION bits and the
1922 output has XPRECISION - SHIFT bits. */
1923 static unsigned int
1924 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1925 unsigned int xlen, unsigned int xprecision,
1926 unsigned int shift)
1928 /* Split the shift into a whole-block shift and a subblock shift. */
1929 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1930 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1932 /* Work out how many blocks are needed to store the significant bits
1933 (excluding the upper zeros or signs). */
1934 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1936 /* It's easier to handle the simple block case specially. */
1937 if (small_shift == 0)
1938 for (unsigned int i = 0; i < len; ++i)
1939 val[i] = safe_uhwi (xval, xlen, i + skip);
1940 else
1942 /* Each output block but the last is a combination of two input blocks.
1943 The last block is a right shift of the last block in XVAL. */
1944 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1945 for (unsigned int i = 0; i < len; ++i)
1947 val[i] = curr >> small_shift;
1948 curr = safe_uhwi (xval, xlen, i + skip + 1);
1949 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1952 return len;
1955 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1956 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1957 VAL has PRECISION bits. */
1958 unsigned int
1959 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1960 unsigned int xlen, unsigned int xprecision,
1961 unsigned int precision, unsigned int shift)
1963 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1965 /* The value we just created has precision XPRECISION - SHIFT.
1966 Zero-extend it to wider precisions. */
1967 if (precision > xprecision - shift)
1969 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1970 if (small_prec)
1971 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1972 else if (val[len - 1] < 0)
1974 /* Add a new block with a zero. */
1975 val[len++] = 0;
1976 return len;
1979 return canonize (val, len, precision);
1982 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1983 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1984 VAL has PRECISION bits. */
1985 unsigned int
1986 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1987 unsigned int xlen, unsigned int xprecision,
1988 unsigned int precision, unsigned int shift)
1990 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1992 /* The value we just created has precision XPRECISION - SHIFT.
1993 Sign-extend it to wider types. */
1994 if (precision > xprecision - shift)
1996 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1997 if (small_prec)
1998 val[len - 1] = sext_hwi (val[len - 1], small_prec);
2000 return canonize (val, len, precision);
2003 /* Return the number of leading (upper) zeros in X. */
2005 wi::clz (const wide_int_ref &x)
2007 /* Calculate how many bits there above the highest represented block. */
2008 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2010 unsigned HOST_WIDE_INT high = x.uhigh ();
2011 if (count < 0)
2012 /* The upper -COUNT bits of HIGH are not part of the value.
2013 Clear them out. */
2014 high = (high << -count) >> -count;
2015 else if (x.sign_mask () < 0)
2016 /* The upper bit is set, so there are no leading zeros. */
2017 return 0;
2019 /* We don't need to look below HIGH. Either HIGH is nonzero,
2020 or the top bit of the block below is nonzero; clz_hwi is
2021 HOST_BITS_PER_WIDE_INT in the latter case. */
2022 return count + clz_hwi (high);
2025 /* Return the number of redundant sign bits in X. (That is, the number
2026 of bits immediately below the sign bit that have the same value as
2027 the sign bit.) */
2029 wi::clrsb (const wide_int_ref &x)
2031 /* Calculate how many bits there above the highest represented block. */
2032 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2034 unsigned HOST_WIDE_INT high = x.uhigh ();
2035 unsigned HOST_WIDE_INT mask = -1;
2036 if (count < 0)
2038 /* The upper -COUNT bits of HIGH are not part of the value.
2039 Clear them from both MASK and HIGH. */
2040 mask >>= -count;
2041 high &= mask;
2044 /* If the top bit is 1, count the number of leading 1s. If the top
2045 bit is zero, count the number of leading zeros. */
2046 if (high > mask / 2)
2047 high ^= mask;
2049 /* There are no sign bits below the top block, so we don't need to look
2050 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2051 HIGH is 0. */
2052 return count + clz_hwi (high) - 1;
2055 /* Return the number of trailing (lower) zeros in X. */
2057 wi::ctz (const wide_int_ref &x)
2059 if (x.len == 1 && x.ulow () == 0)
2060 return x.precision;
2062 /* Having dealt with the zero case, there must be a block with a
2063 nonzero bit. We don't care about the bits above the first 1. */
2064 unsigned int i = 0;
2065 while (x.val[i] == 0)
2066 ++i;
2067 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2070 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2071 return -1. */
2073 wi::exact_log2 (const wide_int_ref &x)
2075 /* Reject cases where there are implicit -1 blocks above HIGH. */
2076 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2077 return -1;
2079 /* Set CRUX to the index of the entry that should be nonzero.
2080 If the top block is zero then the next lowest block (if any)
2081 must have the high bit set. */
2082 unsigned int crux = x.len - 1;
2083 if (crux > 0 && x.val[crux] == 0)
2084 crux -= 1;
2086 /* Check that all lower blocks are zero. */
2087 for (unsigned int i = 0; i < crux; ++i)
2088 if (x.val[i] != 0)
2089 return -1;
2091 /* Get a zero-extended form of block CRUX. */
2092 unsigned HOST_WIDE_INT hwi = x.val[crux];
2093 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2094 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2096 /* Now it's down to whether HWI is a power of 2. */
2097 int res = ::exact_log2 (hwi);
2098 if (res >= 0)
2099 res += crux * HOST_BITS_PER_WIDE_INT;
2100 return res;
2103 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2105 wi::floor_log2 (const wide_int_ref &x)
2107 return x.precision - 1 - clz (x);
2110 /* Return the index of the first (lowest) set bit in X, counting from 1.
2111 Return 0 if X is 0. */
2113 wi::ffs (const wide_int_ref &x)
2115 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2118 /* Return true if sign-extending X to have precision PRECISION would give
2119 the minimum signed value at that precision. */
2120 bool
2121 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2123 return ctz (x) + 1 == int (precision);
2126 /* Return true if X represents the minimum signed value. */
2127 bool
2128 wi::only_sign_bit_p (const wide_int_ref &x)
2130 return only_sign_bit_p (x, x.precision);
2134 * Private utilities.
2137 void gt_ggc_mx (widest_int *) { }
2138 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2139 void gt_pch_nx (widest_int *) { }
2141 template void wide_int::dump () const;
2142 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2143 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2144 template void offset_int::dump () const;
2145 template void widest_int::dump () const;