[Patch AArch64 1/3] Enable CRC by default for armv8.1-a
[official-gcc.git] / gcc / wide-int.cc
blob8648e7dc286342bc06885af11279871ef79622a9
1 /* Operations with very long integers.
2 Copyright (C) 2012-2016 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
28 #define HOST_BITS_PER_HALF_WIDE_INT 32
29 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
30 # define HOST_HALF_WIDE_INT long
31 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
32 # define HOST_HALF_WIDE_INT int
33 #else
34 #error Please add support for HOST_HALF_WIDE_INT
35 #endif
37 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
38 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
39 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
40 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
41 typedef unsigned HOST_WIDE_INT UWtype;
42 typedef unsigned int UQItype __attribute__ ((mode (QI)));
43 typedef unsigned int USItype __attribute__ ((mode (SI)));
44 typedef unsigned int UDItype __attribute__ ((mode (DI)));
45 #if W_TYPE_SIZE == 32
46 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
47 #else
48 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
49 #endif
50 #include "longlong.h"
51 #endif
53 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
56 * Internal utilities.
59 /* Quantities to deal with values that hold half of a wide int. Used
60 in multiply and divide. */
61 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
63 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
64 #define BLOCKS_NEEDED(PREC) \
65 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
66 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
68 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
69 based on the top existing bit of VAL. */
71 static unsigned HOST_WIDE_INT
72 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
74 return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
77 /* Convert the integer in VAL to canonical form, returning its new length.
78 LEN is the number of blocks currently in VAL and PRECISION is the number
79 of bits in the integer it represents.
81 This function only changes the representation, not the value. */
82 static unsigned int
83 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
85 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
86 HOST_WIDE_INT top;
87 int i;
89 if (len > blocks_needed)
90 len = blocks_needed;
92 if (len == 1)
93 return len;
95 top = val[len - 1];
96 if (len * HOST_BITS_PER_WIDE_INT > precision)
97 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
98 if (top != 0 && top != (HOST_WIDE_INT)-1)
99 return len;
101 /* At this point we know that the top is either 0 or -1. Find the
102 first block that is not a copy of this. */
103 for (i = len - 2; i >= 0; i--)
105 HOST_WIDE_INT x = val[i];
106 if (x != top)
108 if (SIGN_MASK (x) == top)
109 return i + 1;
111 /* We need an extra block because the top bit block i does
112 not match the extension. */
113 return i + 2;
117 /* The number is 0 or -1. */
118 return 1;
121 /* VAL[0] is the unsigned result of an operation. Canonize it by adding
122 another 0 block if needed, and return number of blocks needed. */
124 static inline unsigned int
125 canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision)
127 if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT)
129 val[1] = 0;
130 return 2;
132 return 1;
136 * Conversion routines in and out of wide_int.
139 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
140 result for an integer with precision PRECISION. Return the length
141 of VAL (after any canonization. */
142 unsigned int
143 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
144 unsigned int xlen, unsigned int precision, bool need_canon)
146 for (unsigned i = 0; i < xlen; i++)
147 val[i] = xval[i];
148 return need_canon ? canonize (val, xlen, precision) : xlen;
151 /* Construct a wide int from a buffer of length LEN. BUFFER will be
152 read according to byte endianess and word endianess of the target.
153 Only the lower BUFFER_LEN bytes of the result are set; the remaining
154 high bytes are cleared. */
155 wide_int
156 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
158 unsigned int precision = buffer_len * BITS_PER_UNIT;
159 wide_int result = wide_int::create (precision);
160 unsigned int words = buffer_len / UNITS_PER_WORD;
162 /* We have to clear all the bits ourself, as we merely or in values
163 below. */
164 unsigned int len = BLOCKS_NEEDED (precision);
165 HOST_WIDE_INT *val = result.write_val ();
166 for (unsigned int i = 0; i < len; ++i)
167 val[i] = 0;
169 for (unsigned int byte = 0; byte < buffer_len; byte++)
171 unsigned int offset;
172 unsigned int index;
173 unsigned int bitpos = byte * BITS_PER_UNIT;
174 unsigned HOST_WIDE_INT value;
176 if (buffer_len > UNITS_PER_WORD)
178 unsigned int word = byte / UNITS_PER_WORD;
180 if (WORDS_BIG_ENDIAN)
181 word = (words - 1) - word;
183 offset = word * UNITS_PER_WORD;
185 if (BYTES_BIG_ENDIAN)
186 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
187 else
188 offset += byte % UNITS_PER_WORD;
190 else
191 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
193 value = (unsigned HOST_WIDE_INT) buffer[offset];
195 index = bitpos / HOST_BITS_PER_WIDE_INT;
196 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
199 result.set_len (canonize (val, len, precision));
201 return result;
204 /* Sets RESULT from X, the sign is taken according to SGN. */
205 void
206 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
208 int len = x.get_len ();
209 const HOST_WIDE_INT *v = x.get_val ();
210 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
212 if (wi::neg_p (x, sgn))
214 /* We use ones complement to avoid -x80..0 edge case that -
215 won't work on. */
216 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
217 for (int i = 0; i < len; i++)
218 t[i] = ~v[i];
219 if (excess > 0)
220 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
221 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
222 mpz_com (result, result);
224 else if (excess > 0)
226 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
227 for (int i = 0; i < len - 1; i++)
228 t[i] = v[i];
229 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
230 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
232 else
233 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
236 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
237 values of VAL will be wrapped; otherwise, they will be set to the
238 appropriate minimum or maximum TYPE bound. */
239 wide_int
240 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
242 size_t count, numb;
243 unsigned int prec = TYPE_PRECISION (type);
244 wide_int res = wide_int::create (prec);
246 if (!wrap)
248 mpz_t min, max;
250 mpz_init (min);
251 mpz_init (max);
252 get_type_static_bounds (type, min, max);
254 if (mpz_cmp (x, min) < 0)
255 mpz_set (x, min);
256 else if (mpz_cmp (x, max) > 0)
257 mpz_set (x, max);
259 mpz_clear (min);
260 mpz_clear (max);
263 /* Determine the number of unsigned HOST_WIDE_INTs that are required
264 for representing the absolute value. The code to calculate count is
265 extracted from the GMP manual, section "Integer Import and Export":
266 http://gmplib.org/manual/Integer-Import-and-Export.html */
267 numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
268 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
269 HOST_WIDE_INT *val = res.write_val ();
270 /* Read the absolute value.
272 Write directly to the wide_int storage if possible, otherwise leave
273 GMP to allocate the memory for us. It might be slightly more efficient
274 to use mpz_tdiv_r_2exp for the latter case, but the situation is
275 pathological and it seems safer to operate on the original mpz value
276 in all cases. */
277 void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
278 &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
279 if (count < 1)
281 val[0] = 0;
282 count = 1;
284 count = MIN (count, BLOCKS_NEEDED (prec));
285 if (valres != val)
287 memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
288 free (valres);
290 /* Zero-extend the absolute value to PREC bits. */
291 if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
292 val[count++] = 0;
293 else
294 count = canonize (val, count, prec);
295 res.set_len (count);
297 if (mpz_sgn (x) < 0)
298 res = -res;
300 return res;
304 * Largest and smallest values in a mode.
307 /* Return the largest SGNed number that is representable in PRECISION bits.
309 TODO: There is still code from the double_int era that trys to
310 make up for the fact that double int's could not represent the
311 min and max values of all types. This code should be removed
312 because the min and max values can always be represented in
313 wide_ints and int-csts. */
314 wide_int
315 wi::max_value (unsigned int precision, signop sgn)
317 gcc_checking_assert (precision != 0);
318 if (sgn == UNSIGNED)
319 /* The unsigned max is just all ones. */
320 return shwi (-1, precision);
321 else
322 /* The signed max is all ones except the top bit. This must be
323 explicitly represented. */
324 return mask (precision - 1, false, precision);
327 /* Return the largest SGNed number that is representable in PRECISION bits. */
328 wide_int
329 wi::min_value (unsigned int precision, signop sgn)
331 gcc_checking_assert (precision != 0);
332 if (sgn == UNSIGNED)
333 return uhwi (0, precision);
334 else
335 /* The signed min is all zeros except the top bit. This must be
336 explicitly represented. */
337 return wi::set_bit_in_zero (precision - 1, precision);
341 * Public utilities.
344 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
345 signedness SGN, to an integer that has PRECISION bits. Store the blocks
346 in VAL and return the number of blocks used.
348 This function can handle both extension (PRECISION > XPRECISION)
349 and truncation (PRECISION < XPRECISION). */
350 unsigned int
351 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
352 unsigned int xlen, unsigned int xprecision,
353 unsigned int precision, signop sgn)
355 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
356 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
357 for (unsigned i = 0; i < len; i++)
358 val[i] = xval[i];
360 if (precision > xprecision)
362 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
364 /* Expanding. */
365 if (sgn == UNSIGNED)
367 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
368 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
369 else if (val[len - 1] < 0)
371 while (len < BLOCKS_NEEDED (xprecision))
372 val[len++] = -1;
373 if (small_xprecision)
374 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
375 else
376 val[len++] = 0;
379 else
381 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
382 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
385 len = canonize (val, len, precision);
387 return len;
390 /* This function hides the fact that we cannot rely on the bits beyond
391 the precision. This issue comes up in the relational comparisions
392 where we do allow comparisons of values of different precisions. */
393 static inline HOST_WIDE_INT
394 selt (const HOST_WIDE_INT *a, unsigned int len,
395 unsigned int blocks_needed, unsigned int small_prec,
396 unsigned int index, signop sgn)
398 HOST_WIDE_INT val;
399 if (index < len)
400 val = a[index];
401 else if (index < blocks_needed || sgn == SIGNED)
402 /* Signed or within the precision. */
403 val = SIGN_MASK (a[len - 1]);
404 else
405 /* Unsigned extension beyond the precision. */
406 val = 0;
408 if (small_prec && index == blocks_needed - 1)
409 return (sgn == SIGNED
410 ? sext_hwi (val, small_prec)
411 : zext_hwi (val, small_prec));
412 else
413 return val;
416 /* Find the highest bit represented in a wide int. This will in
417 general have the same value as the sign bit. */
418 static inline HOST_WIDE_INT
419 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
421 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
422 unsigned HOST_WIDE_INT val = a[len - 1];
423 if (excess > 0)
424 val <<= excess;
425 return val >> (HOST_BITS_PER_WIDE_INT - 1);
429 * Comparisons, note that only equality is an operator. The other
430 * comparisons cannot be operators since they are inherently signed or
431 * unsigned and C++ has no such operators.
434 /* Return true if OP0 == OP1. */
435 bool
436 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
437 const HOST_WIDE_INT *op1, unsigned int op1len,
438 unsigned int prec)
440 int l0 = op0len - 1;
441 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
443 if (op0len != op1len)
444 return false;
446 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
448 /* It does not matter if we zext or sext here, we just have to
449 do both the same way. */
450 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
451 return false;
452 l0--;
455 while (l0 >= 0)
456 if (op0[l0] != op1[l0])
457 return false;
458 else
459 l0--;
461 return true;
464 /* Return true if OP0 < OP1 using signed comparisons. */
465 bool
466 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
467 unsigned int precision,
468 const HOST_WIDE_INT *op1, unsigned int op1len)
470 HOST_WIDE_INT s0, s1;
471 unsigned HOST_WIDE_INT u0, u1;
472 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
473 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
474 int l = MAX (op0len - 1, op1len - 1);
476 /* Only the top block is compared as signed. The rest are unsigned
477 comparisons. */
478 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
479 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
480 if (s0 < s1)
481 return true;
482 if (s0 > s1)
483 return false;
485 l--;
486 while (l >= 0)
488 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
489 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
491 if (u0 < u1)
492 return true;
493 if (u0 > u1)
494 return false;
495 l--;
498 return false;
501 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
502 signed compares. */
504 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
505 unsigned int precision,
506 const HOST_WIDE_INT *op1, unsigned int op1len)
508 HOST_WIDE_INT s0, s1;
509 unsigned HOST_WIDE_INT u0, u1;
510 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
511 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
512 int l = MAX (op0len - 1, op1len - 1);
514 /* Only the top block is compared as signed. The rest are unsigned
515 comparisons. */
516 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
517 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
518 if (s0 < s1)
519 return -1;
520 if (s0 > s1)
521 return 1;
523 l--;
524 while (l >= 0)
526 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
527 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
529 if (u0 < u1)
530 return -1;
531 if (u0 > u1)
532 return 1;
533 l--;
536 return 0;
539 /* Return true if OP0 < OP1 using unsigned comparisons. */
540 bool
541 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
542 unsigned int precision,
543 const HOST_WIDE_INT *op1, unsigned int op1len)
545 unsigned HOST_WIDE_INT x0;
546 unsigned HOST_WIDE_INT x1;
547 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
548 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
549 int l = MAX (op0len - 1, op1len - 1);
551 while (l >= 0)
553 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
554 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
555 if (x0 < x1)
556 return true;
557 if (x0 > x1)
558 return false;
559 l--;
562 return false;
565 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
566 unsigned compares. */
568 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
569 unsigned int precision,
570 const HOST_WIDE_INT *op1, unsigned int op1len)
572 unsigned HOST_WIDE_INT x0;
573 unsigned HOST_WIDE_INT x1;
574 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
575 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
576 int l = MAX (op0len - 1, op1len - 1);
578 while (l >= 0)
580 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
581 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
582 if (x0 < x1)
583 return -1;
584 if (x0 > x1)
585 return 1;
586 l--;
589 return 0;
593 * Extension.
596 /* Sign-extend the number represented by XVAL and XLEN into VAL,
597 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
598 and VAL have PRECISION bits. */
599 unsigned int
600 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
601 unsigned int xlen, unsigned int precision, unsigned int offset)
603 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
604 /* Extending beyond the precision is a no-op. If we have only stored
605 OFFSET bits or fewer, the rest are already signs. */
606 if (offset >= precision || len >= xlen)
608 for (unsigned i = 0; i < xlen; ++i)
609 val[i] = xval[i];
610 return xlen;
612 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
613 for (unsigned int i = 0; i < len; i++)
614 val[i] = xval[i];
615 if (suboffset > 0)
617 val[len] = sext_hwi (xval[len], suboffset);
618 len += 1;
620 return canonize (val, len, precision);
623 /* Zero-extend the number represented by XVAL and XLEN into VAL,
624 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
625 and VAL have PRECISION bits. */
626 unsigned int
627 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
628 unsigned int xlen, unsigned int precision, unsigned int offset)
630 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
631 /* Extending beyond the precision is a no-op. If we have only stored
632 OFFSET bits or fewer, and the upper stored bit is zero, then there
633 is nothing to do. */
634 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
636 for (unsigned i = 0; i < xlen; ++i)
637 val[i] = xval[i];
638 return xlen;
640 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
641 for (unsigned int i = 0; i < len; i++)
642 val[i] = i < xlen ? xval[i] : -1;
643 if (suboffset > 0)
644 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
645 else
646 val[len] = 0;
647 return canonize (val, len + 1, precision);
651 * Masking, inserting, shifting, rotating.
654 /* Insert WIDTH bits from Y into X starting at START. */
655 wide_int
656 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
657 unsigned int width)
659 wide_int result;
660 wide_int mask;
661 wide_int tmp;
663 unsigned int precision = x.get_precision ();
664 if (start >= precision)
665 return x;
667 gcc_checking_assert (precision >= width);
669 if (start + width >= precision)
670 width = precision - start;
672 mask = wi::shifted_mask (start, width, false, precision);
673 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
674 result = tmp & mask;
676 tmp = wi::bit_and_not (x, mask);
677 result = result | tmp;
679 return result;
682 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
683 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
684 bits. */
685 unsigned int
686 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
687 unsigned int xlen, unsigned int precision, unsigned int bit)
689 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
690 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
692 if (block + 1 >= xlen)
694 /* The operation either affects the last current block or needs
695 a new block. */
696 unsigned int len = block + 1;
697 for (unsigned int i = 0; i < len; i++)
698 val[i] = safe_uhwi (xval, xlen, i);
699 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
701 /* If the bit we just set is at the msb of the block, make sure
702 that any higher bits are zeros. */
703 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
704 val[len++] = 0;
705 return len;
707 else
709 for (unsigned int i = 0; i < xlen; i++)
710 val[i] = xval[i];
711 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
712 return canonize (val, xlen, precision);
716 /* bswap THIS. */
717 wide_int
718 wide_int_storage::bswap () const
720 wide_int result = wide_int::create (precision);
721 unsigned int i, s;
722 unsigned int len = BLOCKS_NEEDED (precision);
723 unsigned int xlen = get_len ();
724 const HOST_WIDE_INT *xval = get_val ();
725 HOST_WIDE_INT *val = result.write_val ();
727 /* This is not a well defined operation if the precision is not a
728 multiple of 8. */
729 gcc_assert ((precision & 0x7) == 0);
731 for (i = 0; i < len; i++)
732 val[i] = 0;
734 /* Only swap the bytes that are not the padding. */
735 for (s = 0; s < precision; s += 8)
737 unsigned int d = precision - s - 8;
738 unsigned HOST_WIDE_INT byte;
740 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
741 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
743 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
745 block = d / HOST_BITS_PER_WIDE_INT;
746 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
748 val[block] |= byte << offset;
751 result.set_len (canonize (val, len, precision));
752 return result;
755 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
756 above that up to PREC are zeros. The result is inverted if NEGATE
757 is true. Return the number of blocks in VAL. */
758 unsigned int
759 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
760 unsigned int prec)
762 if (width >= prec)
764 val[0] = negate ? 0 : -1;
765 return 1;
767 else if (width == 0)
769 val[0] = negate ? -1 : 0;
770 return 1;
773 unsigned int i = 0;
774 while (i < width / HOST_BITS_PER_WIDE_INT)
775 val[i++] = negate ? 0 : -1;
777 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
778 if (shift != 0)
780 HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
781 val[i++] = negate ? ~last : last;
783 else
784 val[i++] = negate ? -1 : 0;
786 return i;
789 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
790 bits are ones, and the bits above that up to PREC are zeros. The result
791 is inverted if NEGATE is true. Return the number of blocks in VAL. */
792 unsigned int
793 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
794 bool negate, unsigned int prec)
796 if (start >= prec || width == 0)
798 val[0] = negate ? -1 : 0;
799 return 1;
802 if (width > prec - start)
803 width = prec - start;
804 unsigned int end = start + width;
806 unsigned int i = 0;
807 while (i < start / HOST_BITS_PER_WIDE_INT)
808 val[i++] = negate ? -1 : 0;
810 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
811 if (shift)
813 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
814 shift += width;
815 if (shift < HOST_BITS_PER_WIDE_INT)
817 /* case 000111000 */
818 block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
819 val[i++] = negate ? ~block : block;
820 return i;
822 else
823 /* ...111000 */
824 val[i++] = negate ? block : ~block;
827 while (i < end / HOST_BITS_PER_WIDE_INT)
828 /* 1111111 */
829 val[i++] = negate ? 0 : -1;
831 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
832 if (shift != 0)
834 /* 000011111 */
835 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
836 val[i++] = negate ? ~block : block;
838 else if (end < prec)
839 val[i++] = negate ? -1 : 0;
841 return i;
845 * logical operations.
848 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
849 unsigned int
850 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
851 unsigned int op0len, const HOST_WIDE_INT *op1,
852 unsigned int op1len, unsigned int prec)
854 int l0 = op0len - 1;
855 int l1 = op1len - 1;
856 bool need_canon = true;
858 unsigned int len = MAX (op0len, op1len);
859 if (l0 > l1)
861 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
862 if (op1mask == 0)
864 l0 = l1;
865 len = l1 + 1;
867 else
869 need_canon = false;
870 while (l0 > l1)
872 val[l0] = op0[l0];
873 l0--;
877 else if (l1 > l0)
879 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
880 if (op0mask == 0)
881 len = l0 + 1;
882 else
884 need_canon = false;
885 while (l1 > l0)
887 val[l1] = op1[l1];
888 l1--;
893 while (l0 >= 0)
895 val[l0] = op0[l0] & op1[l0];
896 l0--;
899 if (need_canon)
900 len = canonize (val, len, prec);
902 return len;
905 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
906 unsigned int
907 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
908 unsigned int op0len, const HOST_WIDE_INT *op1,
909 unsigned int op1len, unsigned int prec)
911 wide_int result;
912 int l0 = op0len - 1;
913 int l1 = op1len - 1;
914 bool need_canon = true;
916 unsigned int len = MAX (op0len, op1len);
917 if (l0 > l1)
919 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
920 if (op1mask != 0)
922 l0 = l1;
923 len = l1 + 1;
925 else
927 need_canon = false;
928 while (l0 > l1)
930 val[l0] = op0[l0];
931 l0--;
935 else if (l1 > l0)
937 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
938 if (op0mask == 0)
939 len = l0 + 1;
940 else
942 need_canon = false;
943 while (l1 > l0)
945 val[l1] = ~op1[l1];
946 l1--;
951 while (l0 >= 0)
953 val[l0] = op0[l0] & ~op1[l0];
954 l0--;
957 if (need_canon)
958 len = canonize (val, len, prec);
960 return len;
963 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
964 unsigned int
965 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
966 unsigned int op0len, const HOST_WIDE_INT *op1,
967 unsigned int op1len, unsigned int prec)
969 wide_int result;
970 int l0 = op0len - 1;
971 int l1 = op1len - 1;
972 bool need_canon = true;
974 unsigned int len = MAX (op0len, op1len);
975 if (l0 > l1)
977 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
978 if (op1mask != 0)
980 l0 = l1;
981 len = l1 + 1;
983 else
985 need_canon = false;
986 while (l0 > l1)
988 val[l0] = op0[l0];
989 l0--;
993 else if (l1 > l0)
995 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
996 if (op0mask != 0)
997 len = l0 + 1;
998 else
1000 need_canon = false;
1001 while (l1 > l0)
1003 val[l1] = op1[l1];
1004 l1--;
1009 while (l0 >= 0)
1011 val[l0] = op0[l0] | op1[l0];
1012 l0--;
1015 if (need_canon)
1016 len = canonize (val, len, prec);
1018 return len;
1021 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1022 unsigned int
1023 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1024 unsigned int op0len, const HOST_WIDE_INT *op1,
1025 unsigned int op1len, unsigned int prec)
1027 wide_int result;
1028 int l0 = op0len - 1;
1029 int l1 = op1len - 1;
1030 bool need_canon = true;
1032 unsigned int len = MAX (op0len, op1len);
1033 if (l0 > l1)
1035 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1036 if (op1mask == 0)
1038 l0 = l1;
1039 len = l1 + 1;
1041 else
1043 need_canon = false;
1044 while (l0 > l1)
1046 val[l0] = op0[l0];
1047 l0--;
1051 else if (l1 > l0)
1053 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1054 if (op0mask != 0)
1055 len = l0 + 1;
1056 else
1058 need_canon = false;
1059 while (l1 > l0)
1061 val[l1] = ~op1[l1];
1062 l1--;
1067 while (l0 >= 0)
1069 val[l0] = op0[l0] | ~op1[l0];
1070 l0--;
1073 if (need_canon)
1074 len = canonize (val, len, prec);
1076 return len;
1079 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1080 unsigned int
1081 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1082 unsigned int op0len, const HOST_WIDE_INT *op1,
1083 unsigned int op1len, unsigned int prec)
1085 wide_int result;
1086 int l0 = op0len - 1;
1087 int l1 = op1len - 1;
1089 unsigned int len = MAX (op0len, op1len);
1090 if (l0 > l1)
1092 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1093 while (l0 > l1)
1095 val[l0] = op0[l0] ^ op1mask;
1096 l0--;
1100 if (l1 > l0)
1102 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1103 while (l1 > l0)
1105 val[l1] = op0mask ^ op1[l1];
1106 l1--;
1110 while (l0 >= 0)
1112 val[l0] = op0[l0] ^ op1[l0];
1113 l0--;
1116 return canonize (val, len, prec);
1120 * math
1123 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1124 whether the result overflows when OP0 and OP1 are treated as having
1125 signedness SGN. Return the number of blocks in VAL. */
1126 unsigned int
1127 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1128 unsigned int op0len, const HOST_WIDE_INT *op1,
1129 unsigned int op1len, unsigned int prec,
1130 signop sgn, bool *overflow)
1132 unsigned HOST_WIDE_INT o0 = 0;
1133 unsigned HOST_WIDE_INT o1 = 0;
1134 unsigned HOST_WIDE_INT x = 0;
1135 unsigned HOST_WIDE_INT carry = 0;
1136 unsigned HOST_WIDE_INT old_carry = 0;
1137 unsigned HOST_WIDE_INT mask0, mask1;
1138 unsigned int i;
1140 unsigned int len = MAX (op0len, op1len);
1141 mask0 = -top_bit_of (op0, op0len, prec);
1142 mask1 = -top_bit_of (op1, op1len, prec);
1143 /* Add all of the explicitly defined elements. */
1145 for (i = 0; i < len; i++)
1147 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1148 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1149 x = o0 + o1 + carry;
1150 val[i] = x;
1151 old_carry = carry;
1152 carry = carry == 0 ? x < o0 : x <= o0;
1155 if (len * HOST_BITS_PER_WIDE_INT < prec)
1157 val[len] = mask0 + mask1 + carry;
1158 len++;
1159 if (overflow)
1160 *overflow = false;
1162 else if (overflow)
1164 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1165 if (sgn == SIGNED)
1167 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1168 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1170 else
1172 /* Put the MSB of X and O0 and in the top of the HWI. */
1173 x <<= shift;
1174 o0 <<= shift;
1175 if (old_carry)
1176 *overflow = (x <= o0);
1177 else
1178 *overflow = (x < o0);
1182 return canonize (val, len, prec);
1185 /* Subroutines of the multiplication and division operations. Unpack
1186 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1187 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1188 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1189 static void
1190 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1191 unsigned int in_len, unsigned int out_len,
1192 unsigned int prec, signop sgn)
1194 unsigned int i;
1195 unsigned int j = 0;
1196 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1197 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1198 HOST_WIDE_INT mask;
1200 if (sgn == SIGNED)
1202 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1203 mask &= HALF_INT_MASK;
1205 else
1206 mask = 0;
1208 for (i = 0; i < blocks_needed - 1; i++)
1210 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1211 result[j++] = x;
1212 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1215 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1216 if (small_prec)
1218 if (sgn == SIGNED)
1219 x = sext_hwi (x, small_prec);
1220 else
1221 x = zext_hwi (x, small_prec);
1223 result[j++] = x;
1224 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1226 /* Smear the sign bit. */
1227 while (j < out_len)
1228 result[j++] = mask;
1231 /* The inverse of wi_unpack. IN_LEN is the number of input
1232 blocks and PRECISION is the precision of the result. Return the
1233 number of blocks in the canonicalized result. */
1234 static unsigned int
1235 wi_pack (HOST_WIDE_INT *result,
1236 const unsigned HOST_HALF_WIDE_INT *input,
1237 unsigned int in_len, unsigned int precision)
1239 unsigned int i = 0;
1240 unsigned int j = 0;
1241 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
1243 while (i + 1 < in_len)
1245 result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1246 | ((unsigned HOST_WIDE_INT) input[i + 1]
1247 << HOST_BITS_PER_HALF_WIDE_INT));
1248 i += 2;
1251 /* Handle the case where in_len is odd. For this we zero extend. */
1252 if (in_len & 1)
1253 result[j++] = (unsigned HOST_WIDE_INT) input[i];
1254 else if (j < blocks_needed)
1255 result[j++] = 0;
1256 return canonize (result, j, precision);
1259 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1260 result is returned.
1262 If HIGH is not set, throw away the upper half after the check is
1263 made to see if it overflows. Unfortunately there is no better way
1264 to check for overflow than to do this. If OVERFLOW is nonnull,
1265 record in *OVERFLOW whether the result overflowed. SGN controls
1266 the signedness and is used to check overflow or if HIGH is set. */
1267 unsigned int
1268 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1269 unsigned int op1len, const HOST_WIDE_INT *op2val,
1270 unsigned int op2len, unsigned int prec, signop sgn,
1271 bool *overflow, bool high)
1273 unsigned HOST_WIDE_INT o0, o1, k, t;
1274 unsigned int i;
1275 unsigned int j;
1276 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1277 unsigned int half_blocks_needed = blocks_needed * 2;
1278 /* The sizes here are scaled to support a 2x largest mode by 2x
1279 largest mode yielding a 4x largest mode result. This is what is
1280 needed by vpn. */
1282 unsigned HOST_HALF_WIDE_INT
1283 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1284 unsigned HOST_HALF_WIDE_INT
1285 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1286 /* The '2' in 'R' is because we are internally doing a full
1287 multiply. */
1288 unsigned HOST_HALF_WIDE_INT
1289 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1290 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1292 /* If the top level routine did not really pass in an overflow, then
1293 just make sure that we never attempt to set it. */
1294 bool needs_overflow = (overflow != 0);
1295 if (needs_overflow)
1296 *overflow = false;
1298 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1299 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1301 /* This is a surprisingly common case, so do it first. */
1302 if (op1 == 0 || op2 == 0)
1304 val[0] = 0;
1305 return 1;
1308 #ifdef umul_ppmm
1309 if (sgn == UNSIGNED)
1311 /* If the inputs are single HWIs and the output has room for at
1312 least two HWIs, we can use umul_ppmm directly. */
1313 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1314 && wi::fits_uhwi_p (op1)
1315 && wi::fits_uhwi_p (op2))
1317 /* This case never overflows. */
1318 if (high)
1320 val[0] = 0;
1321 return 1;
1323 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1324 if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1326 val[2] = 0;
1327 return 3;
1329 return 1 + (val[1] != 0 || val[0] < 0);
1331 /* Likewise if the output is a full single HWI, except that the
1332 upper HWI of the result is only used for determining overflow.
1333 (We handle this case inline when overflow isn't needed.) */
1334 else if (prec == HOST_BITS_PER_WIDE_INT)
1336 unsigned HOST_WIDE_INT upper;
1337 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1338 if (needs_overflow)
1339 *overflow = (upper != 0);
1340 if (high)
1341 val[0] = upper;
1342 return 1;
1345 #endif
1347 /* Handle multiplications by 1. */
1348 if (op1 == 1)
1350 if (high)
1352 val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1353 return 1;
1355 for (i = 0; i < op2len; i++)
1356 val[i] = op2val[i];
1357 return op2len;
1359 if (op2 == 1)
1361 if (high)
1363 val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1364 return 1;
1366 for (i = 0; i < op1len; i++)
1367 val[i] = op1val[i];
1368 return op1len;
1371 /* If we need to check for overflow, we can only do half wide
1372 multiplies quickly because we need to look at the top bits to
1373 check for the overflow. */
1374 if ((high || needs_overflow)
1375 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1377 unsigned HOST_WIDE_INT r;
1379 if (sgn == SIGNED)
1381 o0 = op1.to_shwi ();
1382 o1 = op2.to_shwi ();
1384 else
1386 o0 = op1.to_uhwi ();
1387 o1 = op2.to_uhwi ();
1390 r = o0 * o1;
1391 if (needs_overflow)
1393 if (sgn == SIGNED)
1395 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1396 *overflow = true;
1398 else
1400 if ((r >> prec) != 0)
1401 *overflow = true;
1404 val[0] = high ? r >> prec : r;
1405 return 1;
1408 /* We do unsigned mul and then correct it. */
1409 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1410 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1412 /* The 2 is for a full mult. */
1413 memset (r, 0, half_blocks_needed * 2
1414 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1416 for (j = 0; j < half_blocks_needed; j++)
1418 k = 0;
1419 for (i = 0; i < half_blocks_needed; i++)
1421 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1422 + r[i + j] + k);
1423 r[i + j] = t & HALF_INT_MASK;
1424 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1426 r[j + half_blocks_needed] = k;
1429 /* We did unsigned math above. For signed we must adjust the
1430 product (assuming we need to see that). */
1431 if (sgn == SIGNED && (high || needs_overflow))
1433 unsigned HOST_WIDE_INT b;
1434 if (wi::neg_p (op1))
1436 b = 0;
1437 for (i = 0; i < half_blocks_needed; i++)
1439 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1440 - (unsigned HOST_WIDE_INT)v[i] - b;
1441 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1442 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1445 if (wi::neg_p (op2))
1447 b = 0;
1448 for (i = 0; i < half_blocks_needed; i++)
1450 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1451 - (unsigned HOST_WIDE_INT)u[i] - b;
1452 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1453 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1458 if (needs_overflow)
1460 HOST_WIDE_INT top;
1462 /* For unsigned, overflow is true if any of the top bits are set.
1463 For signed, overflow is true if any of the top bits are not equal
1464 to the sign bit. */
1465 if (sgn == UNSIGNED)
1466 top = 0;
1467 else
1469 top = r[(half_blocks_needed) - 1];
1470 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1471 top &= mask;
1474 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1475 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1476 *overflow = true;
1479 int r_offset = high ? half_blocks_needed : 0;
1480 return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
1483 /* Compute the population count of X. */
1485 wi::popcount (const wide_int_ref &x)
1487 unsigned int i;
1488 int count;
1490 /* The high order block is special if it is the last block and the
1491 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1492 have to clear out any ones above the precision before doing
1493 popcount on this block. */
1494 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1495 unsigned int stop = x.len;
1496 if (count < 0)
1498 count = popcount_hwi (x.uhigh () << -count);
1499 stop -= 1;
1501 else
1503 if (x.sign_mask () >= 0)
1504 count = 0;
1507 for (i = 0; i < stop; ++i)
1508 count += popcount_hwi (x.val[i]);
1510 return count;
1513 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1514 whether the result overflows when OP0 and OP1 are treated as having
1515 signedness SGN. Return the number of blocks in VAL. */
1516 unsigned int
1517 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1518 unsigned int op0len, const HOST_WIDE_INT *op1,
1519 unsigned int op1len, unsigned int prec,
1520 signop sgn, bool *overflow)
1522 unsigned HOST_WIDE_INT o0 = 0;
1523 unsigned HOST_WIDE_INT o1 = 0;
1524 unsigned HOST_WIDE_INT x = 0;
1525 /* We implement subtraction as an in place negate and add. Negation
1526 is just inversion and add 1, so we can do the add of 1 by just
1527 starting the borrow in of the first element at 1. */
1528 unsigned HOST_WIDE_INT borrow = 0;
1529 unsigned HOST_WIDE_INT old_borrow = 0;
1531 unsigned HOST_WIDE_INT mask0, mask1;
1532 unsigned int i;
1534 unsigned int len = MAX (op0len, op1len);
1535 mask0 = -top_bit_of (op0, op0len, prec);
1536 mask1 = -top_bit_of (op1, op1len, prec);
1538 /* Subtract all of the explicitly defined elements. */
1539 for (i = 0; i < len; i++)
1541 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1542 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1543 x = o0 - o1 - borrow;
1544 val[i] = x;
1545 old_borrow = borrow;
1546 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1549 if (len * HOST_BITS_PER_WIDE_INT < prec)
1551 val[len] = mask0 - mask1 - borrow;
1552 len++;
1553 if (overflow)
1554 *overflow = false;
1556 else if (overflow)
1558 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1559 if (sgn == SIGNED)
1561 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1562 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1564 else
1566 /* Put the MSB of X and O0 and in the top of the HWI. */
1567 x <<= shift;
1568 o0 <<= shift;
1569 if (old_borrow)
1570 *overflow = (x >= o0);
1571 else
1572 *overflow = (x > o0);
1576 return canonize (val, len, prec);
1581 * Division and Mod
1584 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1585 algorithm is a small modification of the algorithm in Hacker's
1586 Delight by Warren, which itself is a small modification of Knuth's
1587 algorithm. M is the number of significant elements of U however
1588 there needs to be at least one extra element of B_DIVIDEND
1589 allocated, N is the number of elements of B_DIVISOR. */
1590 static void
1591 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1592 unsigned HOST_HALF_WIDE_INT *b_remainder,
1593 unsigned HOST_HALF_WIDE_INT *b_dividend,
1594 unsigned HOST_HALF_WIDE_INT *b_divisor,
1595 int m, int n)
1597 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1598 HOST_WIDE_INT and stored in the lower bits of each word. This
1599 algorithm should work properly on both 32 and 64 bit
1600 machines. */
1601 unsigned HOST_WIDE_INT b
1602 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1603 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1604 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1605 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1606 HOST_WIDE_INT t, k;
1607 int i, j, s;
1609 /* Single digit divisor. */
1610 if (n == 1)
1612 k = 0;
1613 for (j = m - 1; j >= 0; j--)
1615 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1616 k = ((k * b + b_dividend[j])
1617 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1618 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1620 b_remainder[0] = k;
1621 return;
1624 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1626 if (s)
1628 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1629 algorithm, we can overwrite b_dividend and b_divisor, so we do
1630 that. */
1631 for (i = n - 1; i > 0; i--)
1632 b_divisor[i] = (b_divisor[i] << s)
1633 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1634 b_divisor[0] = b_divisor[0] << s;
1636 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1637 for (i = m - 1; i > 0; i--)
1638 b_dividend[i] = (b_dividend[i] << s)
1639 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1640 b_dividend[0] = b_dividend[0] << s;
1643 /* Main loop. */
1644 for (j = m - n; j >= 0; j--)
1646 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1647 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1648 again:
1649 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1651 qhat -= 1;
1652 rhat += b_divisor[n-1];
1653 if (rhat < b)
1654 goto again;
1657 /* Multiply and subtract. */
1658 k = 0;
1659 for (i = 0; i < n; i++)
1661 p = qhat * b_divisor[i];
1662 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1663 b_dividend[i + j] = t;
1664 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1665 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1667 t = b_dividend[j+n] - k;
1668 b_dividend[j+n] = t;
1670 b_quotient[j] = qhat;
1671 if (t < 0)
1673 b_quotient[j] -= 1;
1674 k = 0;
1675 for (i = 0; i < n; i++)
1677 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1678 b_dividend[i+j] = t;
1679 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1681 b_dividend[j+n] += k;
1684 if (s)
1685 for (i = 0; i < n; i++)
1686 b_remainder[i] = (b_dividend[i] >> s)
1687 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1688 else
1689 for (i = 0; i < n; i++)
1690 b_remainder[i] = b_dividend[i];
1694 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1695 the result. If QUOTIENT is nonnull, store the value of the quotient
1696 there and return the number of blocks in it. The return value is
1697 not defined otherwise. If REMAINDER is nonnull, store the value
1698 of the remainder there and store the number of blocks in
1699 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1700 the division overflowed. */
1701 unsigned int
1702 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1703 HOST_WIDE_INT *remainder,
1704 const HOST_WIDE_INT *dividend_val,
1705 unsigned int dividend_len, unsigned int dividend_prec,
1706 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1707 unsigned int divisor_prec, signop sgn,
1708 bool *oflow)
1710 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1711 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1712 unsigned HOST_HALF_WIDE_INT
1713 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1714 unsigned HOST_HALF_WIDE_INT
1715 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1716 unsigned HOST_HALF_WIDE_INT
1717 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1718 unsigned HOST_HALF_WIDE_INT
1719 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1720 unsigned int m, n;
1721 bool dividend_neg = false;
1722 bool divisor_neg = false;
1723 bool overflow = false;
1724 wide_int neg_dividend, neg_divisor;
1726 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1727 dividend_prec);
1728 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1729 divisor_prec);
1730 if (divisor == 0)
1731 overflow = true;
1733 /* The smallest signed number / -1 causes overflow. The dividend_len
1734 check is for speed rather than correctness. */
1735 if (sgn == SIGNED
1736 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1737 && divisor == -1
1738 && wi::only_sign_bit_p (dividend))
1739 overflow = true;
1741 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1742 (signed min / -1) has the same representation as the orignal dividend.
1743 We have traditionally made division by zero act as division by one,
1744 so there too we use the original dividend. */
1745 if (overflow)
1747 if (remainder)
1749 *remainder_len = 1;
1750 remainder[0] = 0;
1752 if (oflow != 0)
1753 *oflow = true;
1754 if (quotient)
1755 for (unsigned int i = 0; i < dividend_len; ++i)
1756 quotient[i] = dividend_val[i];
1757 return dividend_len;
1760 if (oflow)
1761 *oflow = false;
1763 /* Do it on the host if you can. */
1764 if (sgn == SIGNED
1765 && wi::fits_shwi_p (dividend)
1766 && wi::fits_shwi_p (divisor))
1768 HOST_WIDE_INT o0 = dividend.to_shwi ();
1769 HOST_WIDE_INT o1 = divisor.to_shwi ();
1771 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1773 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1774 if (quotient)
1776 quotient[0] = HOST_WIDE_INT_MIN;
1777 quotient[1] = 0;
1779 if (remainder)
1781 remainder[0] = 0;
1782 *remainder_len = 1;
1784 return 2;
1786 else
1788 if (quotient)
1789 quotient[0] = o0 / o1;
1790 if (remainder)
1792 remainder[0] = o0 % o1;
1793 *remainder_len = 1;
1795 return 1;
1799 if (sgn == UNSIGNED
1800 && wi::fits_uhwi_p (dividend)
1801 && wi::fits_uhwi_p (divisor))
1803 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1804 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1805 unsigned int quotient_len = 1;
1807 if (quotient)
1809 quotient[0] = o0 / o1;
1810 quotient_len = canonize_uhwi (quotient, dividend_prec);
1812 if (remainder)
1814 remainder[0] = o0 % o1;
1815 *remainder_len = canonize_uhwi (remainder, dividend_prec);
1817 return quotient_len;
1820 /* Make the divisor and dividend positive and remember what we
1821 did. */
1822 if (sgn == SIGNED)
1824 if (wi::neg_p (dividend))
1826 neg_dividend = -dividend;
1827 dividend = neg_dividend;
1828 dividend_neg = true;
1830 if (wi::neg_p (divisor))
1832 neg_divisor = -divisor;
1833 divisor = neg_divisor;
1834 divisor_neg = true;
1838 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1839 dividend_blocks_needed, dividend_prec, sgn);
1840 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1841 divisor_blocks_needed, divisor_prec, sgn);
1843 m = dividend_blocks_needed;
1844 b_dividend[m] = 0;
1845 while (m > 1 && b_dividend[m - 1] == 0)
1846 m--;
1848 n = divisor_blocks_needed;
1849 while (n > 1 && b_divisor[n - 1] == 0)
1850 n--;
1852 memset (b_quotient, 0, sizeof (b_quotient));
1854 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1856 unsigned int quotient_len = 0;
1857 if (quotient)
1859 quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
1860 /* The quotient is neg if exactly one of the divisor or dividend is
1861 neg. */
1862 if (dividend_neg != divisor_neg)
1863 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1864 quotient_len, dividend_prec,
1865 UNSIGNED, 0);
1868 if (remainder)
1870 *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
1871 /* The remainder is always the same sign as the dividend. */
1872 if (dividend_neg)
1873 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1874 *remainder_len, dividend_prec,
1875 UNSIGNED, 0);
1878 return quotient_len;
1882 * Shifting, rotating and extraction.
1885 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1886 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1887 unsigned int
1888 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1889 unsigned int xlen, unsigned int precision,
1890 unsigned int shift)
1892 /* Split the shift into a whole-block shift and a subblock shift. */
1893 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1894 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1896 /* The whole-block shift fills with zeros. */
1897 unsigned int len = BLOCKS_NEEDED (precision);
1898 for (unsigned int i = 0; i < skip; ++i)
1899 val[i] = 0;
1901 /* It's easier to handle the simple block case specially. */
1902 if (small_shift == 0)
1903 for (unsigned int i = skip; i < len; ++i)
1904 val[i] = safe_uhwi (xval, xlen, i - skip);
1905 else
1907 /* The first unfilled output block is a left shift of the first
1908 block in XVAL. The other output blocks contain bits from two
1909 consecutive input blocks. */
1910 unsigned HOST_WIDE_INT carry = 0;
1911 for (unsigned int i = skip; i < len; ++i)
1913 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1914 val[i] = (x << small_shift) | carry;
1915 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1918 return canonize (val, len, precision);
1921 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1922 number of blocks in VAL. The input has XPRECISION bits and the
1923 output has XPRECISION - SHIFT bits. */
1924 static unsigned int
1925 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1926 unsigned int xlen, unsigned int xprecision,
1927 unsigned int shift)
1929 /* Split the shift into a whole-block shift and a subblock shift. */
1930 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1931 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1933 /* Work out how many blocks are needed to store the significant bits
1934 (excluding the upper zeros or signs). */
1935 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1937 /* It's easier to handle the simple block case specially. */
1938 if (small_shift == 0)
1939 for (unsigned int i = 0; i < len; ++i)
1940 val[i] = safe_uhwi (xval, xlen, i + skip);
1941 else
1943 /* Each output block but the last is a combination of two input blocks.
1944 The last block is a right shift of the last block in XVAL. */
1945 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1946 for (unsigned int i = 0; i < len; ++i)
1948 val[i] = curr >> small_shift;
1949 curr = safe_uhwi (xval, xlen, i + skip + 1);
1950 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1953 return len;
1956 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1957 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1958 VAL has PRECISION bits. */
1959 unsigned int
1960 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1961 unsigned int xlen, unsigned int xprecision,
1962 unsigned int precision, unsigned int shift)
1964 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1966 /* The value we just created has precision XPRECISION - SHIFT.
1967 Zero-extend it to wider precisions. */
1968 if (precision > xprecision - shift)
1970 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1971 if (small_prec)
1972 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1973 else if (val[len - 1] < 0)
1975 /* Add a new block with a zero. */
1976 val[len++] = 0;
1977 return len;
1980 return canonize (val, len, precision);
1983 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1984 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1985 VAL has PRECISION bits. */
1986 unsigned int
1987 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1988 unsigned int xlen, unsigned int xprecision,
1989 unsigned int precision, unsigned int shift)
1991 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1993 /* The value we just created has precision XPRECISION - SHIFT.
1994 Sign-extend it to wider types. */
1995 if (precision > xprecision - shift)
1997 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1998 if (small_prec)
1999 val[len - 1] = sext_hwi (val[len - 1], small_prec);
2001 return canonize (val, len, precision);
2004 /* Return the number of leading (upper) zeros in X. */
2006 wi::clz (const wide_int_ref &x)
2008 /* Calculate how many bits there above the highest represented block. */
2009 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2011 unsigned HOST_WIDE_INT high = x.uhigh ();
2012 if (count < 0)
2013 /* The upper -COUNT bits of HIGH are not part of the value.
2014 Clear them out. */
2015 high = (high << -count) >> -count;
2016 else if (x.sign_mask () < 0)
2017 /* The upper bit is set, so there are no leading zeros. */
2018 return 0;
2020 /* We don't need to look below HIGH. Either HIGH is nonzero,
2021 or the top bit of the block below is nonzero; clz_hwi is
2022 HOST_BITS_PER_WIDE_INT in the latter case. */
2023 return count + clz_hwi (high);
2026 /* Return the number of redundant sign bits in X. (That is, the number
2027 of bits immediately below the sign bit that have the same value as
2028 the sign bit.) */
2030 wi::clrsb (const wide_int_ref &x)
2032 /* Calculate how many bits there above the highest represented block. */
2033 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2035 unsigned HOST_WIDE_INT high = x.uhigh ();
2036 unsigned HOST_WIDE_INT mask = -1;
2037 if (count < 0)
2039 /* The upper -COUNT bits of HIGH are not part of the value.
2040 Clear them from both MASK and HIGH. */
2041 mask >>= -count;
2042 high &= mask;
2045 /* If the top bit is 1, count the number of leading 1s. If the top
2046 bit is zero, count the number of leading zeros. */
2047 if (high > mask / 2)
2048 high ^= mask;
2050 /* There are no sign bits below the top block, so we don't need to look
2051 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2052 HIGH is 0. */
2053 return count + clz_hwi (high) - 1;
2056 /* Return the number of trailing (lower) zeros in X. */
2058 wi::ctz (const wide_int_ref &x)
2060 if (x.len == 1 && x.ulow () == 0)
2061 return x.precision;
2063 /* Having dealt with the zero case, there must be a block with a
2064 nonzero bit. We don't care about the bits above the first 1. */
2065 unsigned int i = 0;
2066 while (x.val[i] == 0)
2067 ++i;
2068 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2071 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2072 return -1. */
2074 wi::exact_log2 (const wide_int_ref &x)
2076 /* Reject cases where there are implicit -1 blocks above HIGH. */
2077 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2078 return -1;
2080 /* Set CRUX to the index of the entry that should be nonzero.
2081 If the top block is zero then the next lowest block (if any)
2082 must have the high bit set. */
2083 unsigned int crux = x.len - 1;
2084 if (crux > 0 && x.val[crux] == 0)
2085 crux -= 1;
2087 /* Check that all lower blocks are zero. */
2088 for (unsigned int i = 0; i < crux; ++i)
2089 if (x.val[i] != 0)
2090 return -1;
2092 /* Get a zero-extended form of block CRUX. */
2093 unsigned HOST_WIDE_INT hwi = x.val[crux];
2094 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2095 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2097 /* Now it's down to whether HWI is a power of 2. */
2098 int res = ::exact_log2 (hwi);
2099 if (res >= 0)
2100 res += crux * HOST_BITS_PER_WIDE_INT;
2101 return res;
2104 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2106 wi::floor_log2 (const wide_int_ref &x)
2108 return x.precision - 1 - clz (x);
2111 /* Return the index of the first (lowest) set bit in X, counting from 1.
2112 Return 0 if X is 0. */
2114 wi::ffs (const wide_int_ref &x)
2116 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2119 /* Return true if sign-extending X to have precision PRECISION would give
2120 the minimum signed value at that precision. */
2121 bool
2122 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2124 return ctz (x) + 1 == int (precision);
2127 /* Return true if X represents the minimum signed value. */
2128 bool
2129 wi::only_sign_bit_p (const wide_int_ref &x)
2131 return only_sign_bit_p (x, x.precision);
2135 * Private utilities.
2138 void gt_ggc_mx (widest_int *) { }
2139 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2140 void gt_pch_nx (widest_int *) { }
2142 template void wide_int::dump () const;
2143 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2144 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2145 template void offset_int::dump () const;
2146 template void widest_int::dump () const;