2018-05-29 Richard Biener <rguenther@suse.de>
[official-gcc.git] / gcc / wide-int.cc
blob81731465137bff25a37d8760e63343e79ce59457
1 /* Operations with very long integers.
2 Copyright (C) 2012-2018 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "selftest.h"
29 #define HOST_BITS_PER_HALF_WIDE_INT 32
30 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
31 # define HOST_HALF_WIDE_INT long
32 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
33 # define HOST_HALF_WIDE_INT int
34 #else
35 #error Please add support for HOST_HALF_WIDE_INT
36 #endif
38 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
39 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
40 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
41 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
42 typedef unsigned HOST_WIDE_INT UWtype;
43 typedef unsigned int UQItype __attribute__ ((mode (QI)));
44 typedef unsigned int USItype __attribute__ ((mode (SI)));
45 typedef unsigned int UDItype __attribute__ ((mode (DI)));
46 #if W_TYPE_SIZE == 32
47 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
48 #else
49 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
50 #endif
51 #include "longlong.h"
52 #endif
54 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
57 * Internal utilities.
60 /* Quantities to deal with values that hold half of a wide int. Used
61 in multiply and divide. */
62 #define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
64 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
65 #define BLOCKS_NEEDED(PREC) \
66 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
67 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
69 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
70 based on the top existing bit of VAL. */
72 static unsigned HOST_WIDE_INT
73 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
75 return i < len ? val[i] : val[len - 1] < 0 ? HOST_WIDE_INT_M1 : 0;
78 /* Convert the integer in VAL to canonical form, returning its new length.
79 LEN is the number of blocks currently in VAL and PRECISION is the number
80 of bits in the integer it represents.
82 This function only changes the representation, not the value. */
83 static unsigned int
84 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
86 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
87 HOST_WIDE_INT top;
88 int i;
90 if (len > blocks_needed)
91 len = blocks_needed;
93 if (len == 1)
94 return len;
96 top = val[len - 1];
97 if (len * HOST_BITS_PER_WIDE_INT > precision)
98 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
99 if (top != 0 && top != (HOST_WIDE_INT)-1)
100 return len;
102 /* At this point we know that the top is either 0 or -1. Find the
103 first block that is not a copy of this. */
104 for (i = len - 2; i >= 0; i--)
106 HOST_WIDE_INT x = val[i];
107 if (x != top)
109 if (SIGN_MASK (x) == top)
110 return i + 1;
112 /* We need an extra block because the top bit block i does
113 not match the extension. */
114 return i + 2;
118 /* The number is 0 or -1. */
119 return 1;
122 /* VAL[0] is the unsigned result of an operation. Canonize it by adding
123 another 0 block if needed, and return number of blocks needed. */
125 static inline unsigned int
126 canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision)
128 if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT)
130 val[1] = 0;
131 return 2;
133 return 1;
137 * Conversion routines in and out of wide_int.
140 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
141 result for an integer with precision PRECISION. Return the length
142 of VAL (after any canonization. */
143 unsigned int
144 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
145 unsigned int xlen, unsigned int precision, bool need_canon)
147 for (unsigned i = 0; i < xlen; i++)
148 val[i] = xval[i];
149 return need_canon ? canonize (val, xlen, precision) : xlen;
152 /* Construct a wide int from a buffer of length LEN. BUFFER will be
153 read according to byte endianness and word endianness of the target.
154 Only the lower BUFFER_LEN bytes of the result are set; the remaining
155 high bytes are cleared. */
156 wide_int
157 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
159 unsigned int precision = buffer_len * BITS_PER_UNIT;
160 wide_int result = wide_int::create (precision);
161 unsigned int words = buffer_len / UNITS_PER_WORD;
163 /* We have to clear all the bits ourself, as we merely or in values
164 below. */
165 unsigned int len = BLOCKS_NEEDED (precision);
166 HOST_WIDE_INT *val = result.write_val ();
167 for (unsigned int i = 0; i < len; ++i)
168 val[i] = 0;
170 for (unsigned int byte = 0; byte < buffer_len; byte++)
172 unsigned int offset;
173 unsigned int index;
174 unsigned int bitpos = byte * BITS_PER_UNIT;
175 unsigned HOST_WIDE_INT value;
177 if (buffer_len > UNITS_PER_WORD)
179 unsigned int word = byte / UNITS_PER_WORD;
181 if (WORDS_BIG_ENDIAN)
182 word = (words - 1) - word;
184 offset = word * UNITS_PER_WORD;
186 if (BYTES_BIG_ENDIAN)
187 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
188 else
189 offset += byte % UNITS_PER_WORD;
191 else
192 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
194 value = (unsigned HOST_WIDE_INT) buffer[offset];
196 index = bitpos / HOST_BITS_PER_WIDE_INT;
197 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
200 result.set_len (canonize (val, len, precision));
202 return result;
205 /* Sets RESULT from X, the sign is taken according to SGN. */
206 void
207 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
209 int len = x.get_len ();
210 const HOST_WIDE_INT *v = x.get_val ();
211 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
213 if (wi::neg_p (x, sgn))
215 /* We use ones complement to avoid -x80..0 edge case that -
216 won't work on. */
217 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
218 for (int i = 0; i < len; i++)
219 t[i] = ~v[i];
220 if (excess > 0)
221 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
222 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
223 mpz_com (result, result);
225 else if (excess > 0)
227 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
228 for (int i = 0; i < len - 1; i++)
229 t[i] = v[i];
230 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
231 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
233 else
234 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
237 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
238 values of VAL will be wrapped; otherwise, they will be set to the
239 appropriate minimum or maximum TYPE bound. */
240 wide_int
241 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
243 size_t count, numb;
244 unsigned int prec = TYPE_PRECISION (type);
245 wide_int res = wide_int::create (prec);
247 if (!wrap)
249 mpz_t min, max;
251 mpz_init (min);
252 mpz_init (max);
253 get_type_static_bounds (type, min, max);
255 if (mpz_cmp (x, min) < 0)
256 mpz_set (x, min);
257 else if (mpz_cmp (x, max) > 0)
258 mpz_set (x, max);
260 mpz_clear (min);
261 mpz_clear (max);
264 /* Determine the number of unsigned HOST_WIDE_INTs that are required
265 for representing the absolute value. The code to calculate count is
266 extracted from the GMP manual, section "Integer Import and Export":
267 http://gmplib.org/manual/Integer-Import-and-Export.html */
268 numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
269 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
270 HOST_WIDE_INT *val = res.write_val ();
271 /* Read the absolute value.
273 Write directly to the wide_int storage if possible, otherwise leave
274 GMP to allocate the memory for us. It might be slightly more efficient
275 to use mpz_tdiv_r_2exp for the latter case, but the situation is
276 pathological and it seems safer to operate on the original mpz value
277 in all cases. */
278 void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
279 &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
280 if (count < 1)
282 val[0] = 0;
283 count = 1;
285 count = MIN (count, BLOCKS_NEEDED (prec));
286 if (valres != val)
288 memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
289 free (valres);
291 /* Zero-extend the absolute value to PREC bits. */
292 if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
293 val[count++] = 0;
294 else
295 count = canonize (val, count, prec);
296 res.set_len (count);
298 if (mpz_sgn (x) < 0)
299 res = -res;
301 return res;
305 * Largest and smallest values in a mode.
308 /* Return the largest SGNed number that is representable in PRECISION bits.
310 TODO: There is still code from the double_int era that trys to
311 make up for the fact that double int's could not represent the
312 min and max values of all types. This code should be removed
313 because the min and max values can always be represented in
314 wide_ints and int-csts. */
315 wide_int
316 wi::max_value (unsigned int precision, signop sgn)
318 gcc_checking_assert (precision != 0);
319 if (sgn == UNSIGNED)
320 /* The unsigned max is just all ones. */
321 return shwi (-1, precision);
322 else
323 /* The signed max is all ones except the top bit. This must be
324 explicitly represented. */
325 return mask (precision - 1, false, precision);
328 /* Return the largest SGNed number that is representable in PRECISION bits. */
329 wide_int
330 wi::min_value (unsigned int precision, signop sgn)
332 gcc_checking_assert (precision != 0);
333 if (sgn == UNSIGNED)
334 return uhwi (0, precision);
335 else
336 /* The signed min is all zeros except the top bit. This must be
337 explicitly represented. */
338 return wi::set_bit_in_zero (precision - 1, precision);
342 * Public utilities.
345 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
346 signedness SGN, to an integer that has PRECISION bits. Store the blocks
347 in VAL and return the number of blocks used.
349 This function can handle both extension (PRECISION > XPRECISION)
350 and truncation (PRECISION < XPRECISION). */
351 unsigned int
352 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
353 unsigned int xlen, unsigned int xprecision,
354 unsigned int precision, signop sgn)
356 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
357 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
358 for (unsigned i = 0; i < len; i++)
359 val[i] = xval[i];
361 if (precision > xprecision)
363 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
365 /* Expanding. */
366 if (sgn == UNSIGNED)
368 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
369 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
370 else if (val[len - 1] < 0)
372 while (len < BLOCKS_NEEDED (xprecision))
373 val[len++] = -1;
374 if (small_xprecision)
375 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
376 else
377 val[len++] = 0;
380 else
382 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
383 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
386 len = canonize (val, len, precision);
388 return len;
391 /* This function hides the fact that we cannot rely on the bits beyond
392 the precision. This issue comes up in the relational comparisions
393 where we do allow comparisons of values of different precisions. */
394 static inline HOST_WIDE_INT
395 selt (const HOST_WIDE_INT *a, unsigned int len,
396 unsigned int blocks_needed, unsigned int small_prec,
397 unsigned int index, signop sgn)
399 HOST_WIDE_INT val;
400 if (index < len)
401 val = a[index];
402 else if (index < blocks_needed || sgn == SIGNED)
403 /* Signed or within the precision. */
404 val = SIGN_MASK (a[len - 1]);
405 else
406 /* Unsigned extension beyond the precision. */
407 val = 0;
409 if (small_prec && index == blocks_needed - 1)
410 return (sgn == SIGNED
411 ? sext_hwi (val, small_prec)
412 : zext_hwi (val, small_prec));
413 else
414 return val;
417 /* Find the highest bit represented in a wide int. This will in
418 general have the same value as the sign bit. */
419 static inline HOST_WIDE_INT
420 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
422 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
423 unsigned HOST_WIDE_INT val = a[len - 1];
424 if (excess > 0)
425 val <<= excess;
426 return val >> (HOST_BITS_PER_WIDE_INT - 1);
430 * Comparisons, note that only equality is an operator. The other
431 * comparisons cannot be operators since they are inherently signed or
432 * unsigned and C++ has no such operators.
435 /* Return true if OP0 == OP1. */
436 bool
437 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
438 const HOST_WIDE_INT *op1, unsigned int op1len,
439 unsigned int prec)
441 int l0 = op0len - 1;
442 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
444 if (op0len != op1len)
445 return false;
447 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
449 /* It does not matter if we zext or sext here, we just have to
450 do both the same way. */
451 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
452 return false;
453 l0--;
456 while (l0 >= 0)
457 if (op0[l0] != op1[l0])
458 return false;
459 else
460 l0--;
462 return true;
465 /* Return true if OP0 < OP1 using signed comparisons. */
466 bool
467 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
468 unsigned int precision,
469 const HOST_WIDE_INT *op1, unsigned int op1len)
471 HOST_WIDE_INT s0, s1;
472 unsigned HOST_WIDE_INT u0, u1;
473 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
474 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
475 int l = MAX (op0len - 1, op1len - 1);
477 /* Only the top block is compared as signed. The rest are unsigned
478 comparisons. */
479 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
480 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
481 if (s0 < s1)
482 return true;
483 if (s0 > s1)
484 return false;
486 l--;
487 while (l >= 0)
489 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
490 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
492 if (u0 < u1)
493 return true;
494 if (u0 > u1)
495 return false;
496 l--;
499 return false;
502 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
503 signed compares. */
505 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
506 unsigned int precision,
507 const HOST_WIDE_INT *op1, unsigned int op1len)
509 HOST_WIDE_INT s0, s1;
510 unsigned HOST_WIDE_INT u0, u1;
511 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
512 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
513 int l = MAX (op0len - 1, op1len - 1);
515 /* Only the top block is compared as signed. The rest are unsigned
516 comparisons. */
517 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
518 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
519 if (s0 < s1)
520 return -1;
521 if (s0 > s1)
522 return 1;
524 l--;
525 while (l >= 0)
527 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
528 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
530 if (u0 < u1)
531 return -1;
532 if (u0 > u1)
533 return 1;
534 l--;
537 return 0;
540 /* Return true if OP0 < OP1 using unsigned comparisons. */
541 bool
542 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
543 unsigned int precision,
544 const HOST_WIDE_INT *op1, unsigned int op1len)
546 unsigned HOST_WIDE_INT x0;
547 unsigned HOST_WIDE_INT x1;
548 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
549 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
550 int l = MAX (op0len - 1, op1len - 1);
552 while (l >= 0)
554 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
555 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
556 if (x0 < x1)
557 return true;
558 if (x0 > x1)
559 return false;
560 l--;
563 return false;
566 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
567 unsigned compares. */
569 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
570 unsigned int precision,
571 const HOST_WIDE_INT *op1, unsigned int op1len)
573 unsigned HOST_WIDE_INT x0;
574 unsigned HOST_WIDE_INT x1;
575 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
576 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
577 int l = MAX (op0len - 1, op1len - 1);
579 while (l >= 0)
581 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
582 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
583 if (x0 < x1)
584 return -1;
585 if (x0 > x1)
586 return 1;
587 l--;
590 return 0;
594 * Extension.
597 /* Sign-extend the number represented by XVAL and XLEN into VAL,
598 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
599 and VAL have PRECISION bits. */
600 unsigned int
601 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
602 unsigned int xlen, unsigned int precision, unsigned int offset)
604 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
605 /* Extending beyond the precision is a no-op. If we have only stored
606 OFFSET bits or fewer, the rest are already signs. */
607 if (offset >= precision || len >= xlen)
609 for (unsigned i = 0; i < xlen; ++i)
610 val[i] = xval[i];
611 return xlen;
613 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
614 for (unsigned int i = 0; i < len; i++)
615 val[i] = xval[i];
616 if (suboffset > 0)
618 val[len] = sext_hwi (xval[len], suboffset);
619 len += 1;
621 return canonize (val, len, precision);
624 /* Zero-extend the number represented by XVAL and XLEN into VAL,
625 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
626 and VAL have PRECISION bits. */
627 unsigned int
628 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
629 unsigned int xlen, unsigned int precision, unsigned int offset)
631 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
632 /* Extending beyond the precision is a no-op. If we have only stored
633 OFFSET bits or fewer, and the upper stored bit is zero, then there
634 is nothing to do. */
635 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
637 for (unsigned i = 0; i < xlen; ++i)
638 val[i] = xval[i];
639 return xlen;
641 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
642 for (unsigned int i = 0; i < len; i++)
643 val[i] = i < xlen ? xval[i] : -1;
644 if (suboffset > 0)
645 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
646 else
647 val[len] = 0;
648 return canonize (val, len + 1, precision);
652 * Masking, inserting, shifting, rotating.
655 /* Insert WIDTH bits from Y into X starting at START. */
656 wide_int
657 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
658 unsigned int width)
660 wide_int result;
661 wide_int mask;
662 wide_int tmp;
664 unsigned int precision = x.get_precision ();
665 if (start >= precision)
666 return x;
668 gcc_checking_assert (precision >= width);
670 if (start + width >= precision)
671 width = precision - start;
673 mask = wi::shifted_mask (start, width, false, precision);
674 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
675 result = tmp & mask;
677 tmp = wi::bit_and_not (x, mask);
678 result = result | tmp;
680 return result;
683 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
684 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
685 bits. */
686 unsigned int
687 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
688 unsigned int xlen, unsigned int precision, unsigned int bit)
690 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
691 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
693 if (block + 1 >= xlen)
695 /* The operation either affects the last current block or needs
696 a new block. */
697 unsigned int len = block + 1;
698 for (unsigned int i = 0; i < len; i++)
699 val[i] = safe_uhwi (xval, xlen, i);
700 val[block] |= HOST_WIDE_INT_1U << subbit;
702 /* If the bit we just set is at the msb of the block, make sure
703 that any higher bits are zeros. */
704 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
705 val[len++] = 0;
706 return len;
708 else
710 for (unsigned int i = 0; i < xlen; i++)
711 val[i] = xval[i];
712 val[block] |= HOST_WIDE_INT_1U << subbit;
713 return canonize (val, xlen, precision);
717 /* bswap THIS. */
718 wide_int
719 wide_int_storage::bswap () const
721 wide_int result = wide_int::create (precision);
722 unsigned int i, s;
723 unsigned int len = BLOCKS_NEEDED (precision);
724 unsigned int xlen = get_len ();
725 const HOST_WIDE_INT *xval = get_val ();
726 HOST_WIDE_INT *val = result.write_val ();
728 /* This is not a well defined operation if the precision is not a
729 multiple of 8. */
730 gcc_assert ((precision & 0x7) == 0);
732 for (i = 0; i < len; i++)
733 val[i] = 0;
735 /* Only swap the bytes that are not the padding. */
736 for (s = 0; s < precision; s += 8)
738 unsigned int d = precision - s - 8;
739 unsigned HOST_WIDE_INT byte;
741 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
742 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
744 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
746 block = d / HOST_BITS_PER_WIDE_INT;
747 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
749 val[block] |= byte << offset;
752 result.set_len (canonize (val, len, precision));
753 return result;
756 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
757 above that up to PREC are zeros. The result is inverted if NEGATE
758 is true. Return the number of blocks in VAL. */
759 unsigned int
760 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
761 unsigned int prec)
763 if (width >= prec)
765 val[0] = negate ? 0 : -1;
766 return 1;
768 else if (width == 0)
770 val[0] = negate ? -1 : 0;
771 return 1;
774 unsigned int i = 0;
775 while (i < width / HOST_BITS_PER_WIDE_INT)
776 val[i++] = negate ? 0 : -1;
778 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
779 if (shift != 0)
781 HOST_WIDE_INT last = (HOST_WIDE_INT_1U << shift) - 1;
782 val[i++] = negate ? ~last : last;
784 else
785 val[i++] = negate ? -1 : 0;
787 return i;
790 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
791 bits are ones, and the bits above that up to PREC are zeros. The result
792 is inverted if NEGATE is true. Return the number of blocks in VAL. */
793 unsigned int
794 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
795 bool negate, unsigned int prec)
797 if (start >= prec || width == 0)
799 val[0] = negate ? -1 : 0;
800 return 1;
803 if (width > prec - start)
804 width = prec - start;
805 unsigned int end = start + width;
807 unsigned int i = 0;
808 while (i < start / HOST_BITS_PER_WIDE_INT)
809 val[i++] = negate ? -1 : 0;
811 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
812 if (shift)
814 HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
815 shift += width;
816 if (shift < HOST_BITS_PER_WIDE_INT)
818 /* case 000111000 */
819 block = (HOST_WIDE_INT_1U << shift) - block - 1;
820 val[i++] = negate ? ~block : block;
821 return i;
823 else
824 /* ...111000 */
825 val[i++] = negate ? block : ~block;
828 while (i < end / HOST_BITS_PER_WIDE_INT)
829 /* 1111111 */
830 val[i++] = negate ? 0 : -1;
832 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
833 if (shift != 0)
835 /* 000011111 */
836 HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
837 val[i++] = negate ? ~block : block;
839 else if (end < prec)
840 val[i++] = negate ? -1 : 0;
842 return i;
846 * logical operations.
849 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
850 unsigned int
851 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
852 unsigned int op0len, const HOST_WIDE_INT *op1,
853 unsigned int op1len, unsigned int prec)
855 int l0 = op0len - 1;
856 int l1 = op1len - 1;
857 bool need_canon = true;
859 unsigned int len = MAX (op0len, op1len);
860 if (l0 > l1)
862 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
863 if (op1mask == 0)
865 l0 = l1;
866 len = l1 + 1;
868 else
870 need_canon = false;
871 while (l0 > l1)
873 val[l0] = op0[l0];
874 l0--;
878 else if (l1 > l0)
880 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
881 if (op0mask == 0)
882 len = l0 + 1;
883 else
885 need_canon = false;
886 while (l1 > l0)
888 val[l1] = op1[l1];
889 l1--;
894 while (l0 >= 0)
896 val[l0] = op0[l0] & op1[l0];
897 l0--;
900 if (need_canon)
901 len = canonize (val, len, prec);
903 return len;
906 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
907 unsigned int
908 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
909 unsigned int op0len, const HOST_WIDE_INT *op1,
910 unsigned int op1len, unsigned int prec)
912 wide_int result;
913 int l0 = op0len - 1;
914 int l1 = op1len - 1;
915 bool need_canon = true;
917 unsigned int len = MAX (op0len, op1len);
918 if (l0 > l1)
920 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
921 if (op1mask != 0)
923 l0 = l1;
924 len = l1 + 1;
926 else
928 need_canon = false;
929 while (l0 > l1)
931 val[l0] = op0[l0];
932 l0--;
936 else if (l1 > l0)
938 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
939 if (op0mask == 0)
940 len = l0 + 1;
941 else
943 need_canon = false;
944 while (l1 > l0)
946 val[l1] = ~op1[l1];
947 l1--;
952 while (l0 >= 0)
954 val[l0] = op0[l0] & ~op1[l0];
955 l0--;
958 if (need_canon)
959 len = canonize (val, len, prec);
961 return len;
964 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
965 unsigned int
966 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
967 unsigned int op0len, const HOST_WIDE_INT *op1,
968 unsigned int op1len, unsigned int prec)
970 wide_int result;
971 int l0 = op0len - 1;
972 int l1 = op1len - 1;
973 bool need_canon = true;
975 unsigned int len = MAX (op0len, op1len);
976 if (l0 > l1)
978 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
979 if (op1mask != 0)
981 l0 = l1;
982 len = l1 + 1;
984 else
986 need_canon = false;
987 while (l0 > l1)
989 val[l0] = op0[l0];
990 l0--;
994 else if (l1 > l0)
996 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
997 if (op0mask != 0)
998 len = l0 + 1;
999 else
1001 need_canon = false;
1002 while (l1 > l0)
1004 val[l1] = op1[l1];
1005 l1--;
1010 while (l0 >= 0)
1012 val[l0] = op0[l0] | op1[l0];
1013 l0--;
1016 if (need_canon)
1017 len = canonize (val, len, prec);
1019 return len;
1022 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1023 unsigned int
1024 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1025 unsigned int op0len, const HOST_WIDE_INT *op1,
1026 unsigned int op1len, unsigned int prec)
1028 wide_int result;
1029 int l0 = op0len - 1;
1030 int l1 = op1len - 1;
1031 bool need_canon = true;
1033 unsigned int len = MAX (op0len, op1len);
1034 if (l0 > l1)
1036 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1037 if (op1mask == 0)
1039 l0 = l1;
1040 len = l1 + 1;
1042 else
1044 need_canon = false;
1045 while (l0 > l1)
1047 val[l0] = op0[l0];
1048 l0--;
1052 else if (l1 > l0)
1054 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1055 if (op0mask != 0)
1056 len = l0 + 1;
1057 else
1059 need_canon = false;
1060 while (l1 > l0)
1062 val[l1] = ~op1[l1];
1063 l1--;
1068 while (l0 >= 0)
1070 val[l0] = op0[l0] | ~op1[l0];
1071 l0--;
1074 if (need_canon)
1075 len = canonize (val, len, prec);
1077 return len;
1080 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1081 unsigned int
1082 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1083 unsigned int op0len, const HOST_WIDE_INT *op1,
1084 unsigned int op1len, unsigned int prec)
1086 wide_int result;
1087 int l0 = op0len - 1;
1088 int l1 = op1len - 1;
1090 unsigned int len = MAX (op0len, op1len);
1091 if (l0 > l1)
1093 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1094 while (l0 > l1)
1096 val[l0] = op0[l0] ^ op1mask;
1097 l0--;
1101 if (l1 > l0)
1103 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1104 while (l1 > l0)
1106 val[l1] = op0mask ^ op1[l1];
1107 l1--;
1111 while (l0 >= 0)
1113 val[l0] = op0[l0] ^ op1[l0];
1114 l0--;
1117 return canonize (val, len, prec);
1121 * math
1124 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1125 whether the result overflows when OP0 and OP1 are treated as having
1126 signedness SGN. Return the number of blocks in VAL. */
1127 unsigned int
1128 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1129 unsigned int op0len, const HOST_WIDE_INT *op1,
1130 unsigned int op1len, unsigned int prec,
1131 signop sgn, bool *overflow)
1133 unsigned HOST_WIDE_INT o0 = 0;
1134 unsigned HOST_WIDE_INT o1 = 0;
1135 unsigned HOST_WIDE_INT x = 0;
1136 unsigned HOST_WIDE_INT carry = 0;
1137 unsigned HOST_WIDE_INT old_carry = 0;
1138 unsigned HOST_WIDE_INT mask0, mask1;
1139 unsigned int i;
1141 unsigned int len = MAX (op0len, op1len);
1142 mask0 = -top_bit_of (op0, op0len, prec);
1143 mask1 = -top_bit_of (op1, op1len, prec);
1144 /* Add all of the explicitly defined elements. */
1146 for (i = 0; i < len; i++)
1148 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1149 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1150 x = o0 + o1 + carry;
1151 val[i] = x;
1152 old_carry = carry;
1153 carry = carry == 0 ? x < o0 : x <= o0;
1156 if (len * HOST_BITS_PER_WIDE_INT < prec)
1158 val[len] = mask0 + mask1 + carry;
1159 len++;
1160 if (overflow)
1161 *overflow = (sgn == UNSIGNED && carry);
1163 else if (overflow)
1165 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1166 if (sgn == SIGNED)
1168 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1169 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1171 else
1173 /* Put the MSB of X and O0 and in the top of the HWI. */
1174 x <<= shift;
1175 o0 <<= shift;
1176 if (old_carry)
1177 *overflow = (x <= o0);
1178 else
1179 *overflow = (x < o0);
1183 return canonize (val, len, prec);
1186 /* Subroutines of the multiplication and division operations. Unpack
1187 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1188 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1189 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1190 static void
1191 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1192 unsigned int in_len, unsigned int out_len,
1193 unsigned int prec, signop sgn)
1195 unsigned int i;
1196 unsigned int j = 0;
1197 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1198 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1199 HOST_WIDE_INT mask;
1201 if (sgn == SIGNED)
1203 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1204 mask &= HALF_INT_MASK;
1206 else
1207 mask = 0;
1209 for (i = 0; i < blocks_needed - 1; i++)
1211 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1212 result[j++] = x;
1213 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1216 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1217 if (small_prec)
1219 if (sgn == SIGNED)
1220 x = sext_hwi (x, small_prec);
1221 else
1222 x = zext_hwi (x, small_prec);
1224 result[j++] = x;
1225 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1227 /* Smear the sign bit. */
1228 while (j < out_len)
1229 result[j++] = mask;
1232 /* The inverse of wi_unpack. IN_LEN is the number of input
1233 blocks and PRECISION is the precision of the result. Return the
1234 number of blocks in the canonicalized result. */
1235 static unsigned int
1236 wi_pack (HOST_WIDE_INT *result,
1237 const unsigned HOST_HALF_WIDE_INT *input,
1238 unsigned int in_len, unsigned int precision)
1240 unsigned int i = 0;
1241 unsigned int j = 0;
1242 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
1244 while (i + 1 < in_len)
1246 result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1247 | ((unsigned HOST_WIDE_INT) input[i + 1]
1248 << HOST_BITS_PER_HALF_WIDE_INT));
1249 i += 2;
1252 /* Handle the case where in_len is odd. For this we zero extend. */
1253 if (in_len & 1)
1254 result[j++] = (unsigned HOST_WIDE_INT) input[i];
1255 else if (j < blocks_needed)
1256 result[j++] = 0;
1257 return canonize (result, j, precision);
1260 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1261 result is returned.
1263 If HIGH is not set, throw away the upper half after the check is
1264 made to see if it overflows. Unfortunately there is no better way
1265 to check for overflow than to do this. If OVERFLOW is nonnull,
1266 record in *OVERFLOW whether the result overflowed. SGN controls
1267 the signedness and is used to check overflow or if HIGH is set. */
1268 unsigned int
1269 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1270 unsigned int op1len, const HOST_WIDE_INT *op2val,
1271 unsigned int op2len, unsigned int prec, signop sgn,
1272 bool *overflow, bool high)
1274 unsigned HOST_WIDE_INT o0, o1, k, t;
1275 unsigned int i;
1276 unsigned int j;
1277 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1278 unsigned int half_blocks_needed = blocks_needed * 2;
1279 /* The sizes here are scaled to support a 2x largest mode by 2x
1280 largest mode yielding a 4x largest mode result. This is what is
1281 needed by vpn. */
1283 unsigned HOST_HALF_WIDE_INT
1284 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1285 unsigned HOST_HALF_WIDE_INT
1286 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1287 /* The '2' in 'R' is because we are internally doing a full
1288 multiply. */
1289 unsigned HOST_HALF_WIDE_INT
1290 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1291 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1293 /* If the top level routine did not really pass in an overflow, then
1294 just make sure that we never attempt to set it. */
1295 bool needs_overflow = (overflow != 0);
1296 if (needs_overflow)
1297 *overflow = false;
1299 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1300 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1302 /* This is a surprisingly common case, so do it first. */
1303 if (op1 == 0 || op2 == 0)
1305 val[0] = 0;
1306 return 1;
1309 #ifdef umul_ppmm
1310 if (sgn == UNSIGNED)
1312 /* If the inputs are single HWIs and the output has room for at
1313 least two HWIs, we can use umul_ppmm directly. */
1314 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1315 && wi::fits_uhwi_p (op1)
1316 && wi::fits_uhwi_p (op2))
1318 /* This case never overflows. */
1319 if (high)
1321 val[0] = 0;
1322 return 1;
1324 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1325 if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1327 val[2] = 0;
1328 return 3;
1330 return 1 + (val[1] != 0 || val[0] < 0);
1332 /* Likewise if the output is a full single HWI, except that the
1333 upper HWI of the result is only used for determining overflow.
1334 (We handle this case inline when overflow isn't needed.) */
1335 else if (prec == HOST_BITS_PER_WIDE_INT)
1337 unsigned HOST_WIDE_INT upper;
1338 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1339 if (needs_overflow)
1340 *overflow = (upper != 0);
1341 if (high)
1342 val[0] = upper;
1343 return 1;
1346 #endif
1348 /* Handle multiplications by 1. */
1349 if (op1 == 1)
1351 if (high)
1353 val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1354 return 1;
1356 for (i = 0; i < op2len; i++)
1357 val[i] = op2val[i];
1358 return op2len;
1360 if (op2 == 1)
1362 if (high)
1364 val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1365 return 1;
1367 for (i = 0; i < op1len; i++)
1368 val[i] = op1val[i];
1369 return op1len;
1372 /* If we need to check for overflow, we can only do half wide
1373 multiplies quickly because we need to look at the top bits to
1374 check for the overflow. */
1375 if ((high || needs_overflow)
1376 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1378 unsigned HOST_WIDE_INT r;
1380 if (sgn == SIGNED)
1382 o0 = op1.to_shwi ();
1383 o1 = op2.to_shwi ();
1385 else
1387 o0 = op1.to_uhwi ();
1388 o1 = op2.to_uhwi ();
1391 r = o0 * o1;
1392 if (needs_overflow)
1394 if (sgn == SIGNED)
1396 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1397 *overflow = true;
1399 else
1401 if ((r >> prec) != 0)
1402 *overflow = true;
1405 val[0] = high ? r >> prec : r;
1406 return 1;
1409 /* We do unsigned mul and then correct it. */
1410 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1411 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1413 /* The 2 is for a full mult. */
1414 memset (r, 0, half_blocks_needed * 2
1415 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1417 for (j = 0; j < half_blocks_needed; j++)
1419 k = 0;
1420 for (i = 0; i < half_blocks_needed; i++)
1422 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1423 + r[i + j] + k);
1424 r[i + j] = t & HALF_INT_MASK;
1425 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1427 r[j + half_blocks_needed] = k;
1430 /* We did unsigned math above. For signed we must adjust the
1431 product (assuming we need to see that). */
1432 if (sgn == SIGNED && (high || needs_overflow))
1434 unsigned HOST_WIDE_INT b;
1435 if (wi::neg_p (op1))
1437 b = 0;
1438 for (i = 0; i < half_blocks_needed; i++)
1440 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1441 - (unsigned HOST_WIDE_INT)v[i] - b;
1442 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1443 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1446 if (wi::neg_p (op2))
1448 b = 0;
1449 for (i = 0; i < half_blocks_needed; i++)
1451 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1452 - (unsigned HOST_WIDE_INT)u[i] - b;
1453 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1454 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1459 if (needs_overflow)
1461 HOST_WIDE_INT top;
1463 /* For unsigned, overflow is true if any of the top bits are set.
1464 For signed, overflow is true if any of the top bits are not equal
1465 to the sign bit. */
1466 if (sgn == UNSIGNED)
1467 top = 0;
1468 else
1470 top = r[(half_blocks_needed) - 1];
1471 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1472 top &= mask;
1475 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1476 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1477 *overflow = true;
1480 int r_offset = high ? half_blocks_needed : 0;
1481 return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
1484 /* Compute the population count of X. */
1486 wi::popcount (const wide_int_ref &x)
1488 unsigned int i;
1489 int count;
1491 /* The high order block is special if it is the last block and the
1492 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1493 have to clear out any ones above the precision before doing
1494 popcount on this block. */
1495 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1496 unsigned int stop = x.len;
1497 if (count < 0)
1499 count = popcount_hwi (x.uhigh () << -count);
1500 stop -= 1;
1502 else
1504 if (x.sign_mask () >= 0)
1505 count = 0;
1508 for (i = 0; i < stop; ++i)
1509 count += popcount_hwi (x.val[i]);
1511 return count;
1514 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1515 whether the result overflows when OP0 and OP1 are treated as having
1516 signedness SGN. Return the number of blocks in VAL. */
1517 unsigned int
1518 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1519 unsigned int op0len, const HOST_WIDE_INT *op1,
1520 unsigned int op1len, unsigned int prec,
1521 signop sgn, bool *overflow)
1523 unsigned HOST_WIDE_INT o0 = 0;
1524 unsigned HOST_WIDE_INT o1 = 0;
1525 unsigned HOST_WIDE_INT x = 0;
1526 /* We implement subtraction as an in place negate and add. Negation
1527 is just inversion and add 1, so we can do the add of 1 by just
1528 starting the borrow in of the first element at 1. */
1529 unsigned HOST_WIDE_INT borrow = 0;
1530 unsigned HOST_WIDE_INT old_borrow = 0;
1532 unsigned HOST_WIDE_INT mask0, mask1;
1533 unsigned int i;
1535 unsigned int len = MAX (op0len, op1len);
1536 mask0 = -top_bit_of (op0, op0len, prec);
1537 mask1 = -top_bit_of (op1, op1len, prec);
1539 /* Subtract all of the explicitly defined elements. */
1540 for (i = 0; i < len; i++)
1542 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1543 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1544 x = o0 - o1 - borrow;
1545 val[i] = x;
1546 old_borrow = borrow;
1547 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1550 if (len * HOST_BITS_PER_WIDE_INT < prec)
1552 val[len] = mask0 - mask1 - borrow;
1553 len++;
1554 if (overflow)
1555 *overflow = (sgn == UNSIGNED && borrow);
1557 else if (overflow)
1559 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1560 if (sgn == SIGNED)
1562 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1563 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1565 else
1567 /* Put the MSB of X and O0 and in the top of the HWI. */
1568 x <<= shift;
1569 o0 <<= shift;
1570 if (old_borrow)
1571 *overflow = (x >= o0);
1572 else
1573 *overflow = (x > o0);
1577 return canonize (val, len, prec);
1582 * Division and Mod
1585 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1586 algorithm is a small modification of the algorithm in Hacker's
1587 Delight by Warren, which itself is a small modification of Knuth's
1588 algorithm. M is the number of significant elements of U however
1589 there needs to be at least one extra element of B_DIVIDEND
1590 allocated, N is the number of elements of B_DIVISOR. */
1591 static void
1592 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1593 unsigned HOST_HALF_WIDE_INT *b_remainder,
1594 unsigned HOST_HALF_WIDE_INT *b_dividend,
1595 unsigned HOST_HALF_WIDE_INT *b_divisor,
1596 int m, int n)
1598 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1599 HOST_WIDE_INT and stored in the lower bits of each word. This
1600 algorithm should work properly on both 32 and 64 bit
1601 machines. */
1602 unsigned HOST_WIDE_INT b
1603 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1604 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1605 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1606 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1607 HOST_WIDE_INT t, k;
1608 int i, j, s;
1610 /* Single digit divisor. */
1611 if (n == 1)
1613 k = 0;
1614 for (j = m - 1; j >= 0; j--)
1616 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1617 k = ((k * b + b_dividend[j])
1618 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1619 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1621 b_remainder[0] = k;
1622 return;
1625 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1627 if (s)
1629 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1630 algorithm, we can overwrite b_dividend and b_divisor, so we do
1631 that. */
1632 for (i = n - 1; i > 0; i--)
1633 b_divisor[i] = (b_divisor[i] << s)
1634 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1635 b_divisor[0] = b_divisor[0] << s;
1637 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1638 for (i = m - 1; i > 0; i--)
1639 b_dividend[i] = (b_dividend[i] << s)
1640 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1641 b_dividend[0] = b_dividend[0] << s;
1644 /* Main loop. */
1645 for (j = m - n; j >= 0; j--)
1647 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1648 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1649 again:
1650 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1652 qhat -= 1;
1653 rhat += b_divisor[n-1];
1654 if (rhat < b)
1655 goto again;
1658 /* Multiply and subtract. */
1659 k = 0;
1660 for (i = 0; i < n; i++)
1662 p = qhat * b_divisor[i];
1663 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1664 b_dividend[i + j] = t;
1665 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1666 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1668 t = b_dividend[j+n] - k;
1669 b_dividend[j+n] = t;
1671 b_quotient[j] = qhat;
1672 if (t < 0)
1674 b_quotient[j] -= 1;
1675 k = 0;
1676 for (i = 0; i < n; i++)
1678 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1679 b_dividend[i+j] = t;
1680 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1682 b_dividend[j+n] += k;
1685 if (s)
1686 for (i = 0; i < n; i++)
1687 b_remainder[i] = (b_dividend[i] >> s)
1688 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1689 else
1690 for (i = 0; i < n; i++)
1691 b_remainder[i] = b_dividend[i];
1695 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1696 the result. If QUOTIENT is nonnull, store the value of the quotient
1697 there and return the number of blocks in it. The return value is
1698 not defined otherwise. If REMAINDER is nonnull, store the value
1699 of the remainder there and store the number of blocks in
1700 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1701 the division overflowed. */
1702 unsigned int
1703 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1704 HOST_WIDE_INT *remainder,
1705 const HOST_WIDE_INT *dividend_val,
1706 unsigned int dividend_len, unsigned int dividend_prec,
1707 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1708 unsigned int divisor_prec, signop sgn,
1709 bool *oflow)
1711 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1712 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1713 unsigned HOST_HALF_WIDE_INT
1714 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1715 unsigned HOST_HALF_WIDE_INT
1716 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1717 unsigned HOST_HALF_WIDE_INT
1718 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1719 unsigned HOST_HALF_WIDE_INT
1720 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1721 unsigned int m, n;
1722 bool dividend_neg = false;
1723 bool divisor_neg = false;
1724 bool overflow = false;
1725 wide_int neg_dividend, neg_divisor;
1727 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1728 dividend_prec);
1729 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1730 divisor_prec);
1731 if (divisor == 0)
1732 overflow = true;
1734 /* The smallest signed number / -1 causes overflow. The dividend_len
1735 check is for speed rather than correctness. */
1736 if (sgn == SIGNED
1737 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1738 && divisor == -1
1739 && wi::only_sign_bit_p (dividend))
1740 overflow = true;
1742 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1743 (signed min / -1) has the same representation as the orignal dividend.
1744 We have traditionally made division by zero act as division by one,
1745 so there too we use the original dividend. */
1746 if (overflow)
1748 if (remainder)
1750 *remainder_len = 1;
1751 remainder[0] = 0;
1753 if (oflow != 0)
1754 *oflow = true;
1755 if (quotient)
1756 for (unsigned int i = 0; i < dividend_len; ++i)
1757 quotient[i] = dividend_val[i];
1758 return dividend_len;
1761 if (oflow)
1762 *oflow = false;
1764 /* Do it on the host if you can. */
1765 if (sgn == SIGNED
1766 && wi::fits_shwi_p (dividend)
1767 && wi::fits_shwi_p (divisor))
1769 HOST_WIDE_INT o0 = dividend.to_shwi ();
1770 HOST_WIDE_INT o1 = divisor.to_shwi ();
1772 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1774 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1775 if (quotient)
1777 quotient[0] = HOST_WIDE_INT_MIN;
1778 quotient[1] = 0;
1780 if (remainder)
1782 remainder[0] = 0;
1783 *remainder_len = 1;
1785 return 2;
1787 else
1789 if (quotient)
1790 quotient[0] = o0 / o1;
1791 if (remainder)
1793 remainder[0] = o0 % o1;
1794 *remainder_len = 1;
1796 return 1;
1800 if (sgn == UNSIGNED
1801 && wi::fits_uhwi_p (dividend)
1802 && wi::fits_uhwi_p (divisor))
1804 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1805 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1806 unsigned int quotient_len = 1;
1808 if (quotient)
1810 quotient[0] = o0 / o1;
1811 quotient_len = canonize_uhwi (quotient, dividend_prec);
1813 if (remainder)
1815 remainder[0] = o0 % o1;
1816 *remainder_len = canonize_uhwi (remainder, dividend_prec);
1818 return quotient_len;
1821 /* Make the divisor and dividend positive and remember what we
1822 did. */
1823 if (sgn == SIGNED)
1825 if (wi::neg_p (dividend))
1827 neg_dividend = -dividend;
1828 dividend = neg_dividend;
1829 dividend_neg = true;
1831 if (wi::neg_p (divisor))
1833 neg_divisor = -divisor;
1834 divisor = neg_divisor;
1835 divisor_neg = true;
1839 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1840 dividend_blocks_needed, dividend_prec, sgn);
1841 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1842 divisor_blocks_needed, divisor_prec, sgn);
1844 m = dividend_blocks_needed;
1845 b_dividend[m] = 0;
1846 while (m > 1 && b_dividend[m - 1] == 0)
1847 m--;
1849 n = divisor_blocks_needed;
1850 while (n > 1 && b_divisor[n - 1] == 0)
1851 n--;
1853 memset (b_quotient, 0, sizeof (b_quotient));
1855 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1857 unsigned int quotient_len = 0;
1858 if (quotient)
1860 quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
1861 /* The quotient is neg if exactly one of the divisor or dividend is
1862 neg. */
1863 if (dividend_neg != divisor_neg)
1864 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1865 quotient_len, dividend_prec,
1866 UNSIGNED, 0);
1869 if (remainder)
1871 *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
1872 /* The remainder is always the same sign as the dividend. */
1873 if (dividend_neg)
1874 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1875 *remainder_len, dividend_prec,
1876 UNSIGNED, 0);
1879 return quotient_len;
1883 * Shifting, rotating and extraction.
1886 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1887 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1888 unsigned int
1889 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1890 unsigned int xlen, unsigned int precision,
1891 unsigned int shift)
1893 /* Split the shift into a whole-block shift and a subblock shift. */
1894 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1895 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1897 /* The whole-block shift fills with zeros. */
1898 unsigned int len = BLOCKS_NEEDED (precision);
1899 for (unsigned int i = 0; i < skip; ++i)
1900 val[i] = 0;
1902 /* It's easier to handle the simple block case specially. */
1903 if (small_shift == 0)
1904 for (unsigned int i = skip; i < len; ++i)
1905 val[i] = safe_uhwi (xval, xlen, i - skip);
1906 else
1908 /* The first unfilled output block is a left shift of the first
1909 block in XVAL. The other output blocks contain bits from two
1910 consecutive input blocks. */
1911 unsigned HOST_WIDE_INT carry = 0;
1912 for (unsigned int i = skip; i < len; ++i)
1914 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1915 val[i] = (x << small_shift) | carry;
1916 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1919 return canonize (val, len, precision);
1922 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1923 number of blocks in VAL. The input has XPRECISION bits and the
1924 output has XPRECISION - SHIFT bits. */
1925 static unsigned int
1926 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1927 unsigned int xlen, unsigned int xprecision,
1928 unsigned int shift)
1930 /* Split the shift into a whole-block shift and a subblock shift. */
1931 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1932 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1934 /* Work out how many blocks are needed to store the significant bits
1935 (excluding the upper zeros or signs). */
1936 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1938 /* It's easier to handle the simple block case specially. */
1939 if (small_shift == 0)
1940 for (unsigned int i = 0; i < len; ++i)
1941 val[i] = safe_uhwi (xval, xlen, i + skip);
1942 else
1944 /* Each output block but the last is a combination of two input blocks.
1945 The last block is a right shift of the last block in XVAL. */
1946 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1947 for (unsigned int i = 0; i < len; ++i)
1949 val[i] = curr >> small_shift;
1950 curr = safe_uhwi (xval, xlen, i + skip + 1);
1951 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1954 return len;
1957 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1958 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1959 VAL has PRECISION bits. */
1960 unsigned int
1961 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1962 unsigned int xlen, unsigned int xprecision,
1963 unsigned int precision, unsigned int shift)
1965 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1967 /* The value we just created has precision XPRECISION - SHIFT.
1968 Zero-extend it to wider precisions. */
1969 if (precision > xprecision - shift)
1971 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1972 if (small_prec)
1973 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1974 else if (val[len - 1] < 0)
1976 /* Add a new block with a zero. */
1977 val[len++] = 0;
1978 return len;
1981 return canonize (val, len, precision);
1984 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1985 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1986 VAL has PRECISION bits. */
1987 unsigned int
1988 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1989 unsigned int xlen, unsigned int xprecision,
1990 unsigned int precision, unsigned int shift)
1992 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1994 /* The value we just created has precision XPRECISION - SHIFT.
1995 Sign-extend it to wider types. */
1996 if (precision > xprecision - shift)
1998 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1999 if (small_prec)
2000 val[len - 1] = sext_hwi (val[len - 1], small_prec);
2002 return canonize (val, len, precision);
2005 /* Return the number of leading (upper) zeros in X. */
2007 wi::clz (const wide_int_ref &x)
2009 /* Calculate how many bits there above the highest represented block. */
2010 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2012 unsigned HOST_WIDE_INT high = x.uhigh ();
2013 if (count < 0)
2014 /* The upper -COUNT bits of HIGH are not part of the value.
2015 Clear them out. */
2016 high = (high << -count) >> -count;
2017 else if (x.sign_mask () < 0)
2018 /* The upper bit is set, so there are no leading zeros. */
2019 return 0;
2021 /* We don't need to look below HIGH. Either HIGH is nonzero,
2022 or the top bit of the block below is nonzero; clz_hwi is
2023 HOST_BITS_PER_WIDE_INT in the latter case. */
2024 return count + clz_hwi (high);
2027 /* Return the number of redundant sign bits in X. (That is, the number
2028 of bits immediately below the sign bit that have the same value as
2029 the sign bit.) */
2031 wi::clrsb (const wide_int_ref &x)
2033 /* Calculate how many bits there above the highest represented block. */
2034 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2036 unsigned HOST_WIDE_INT high = x.uhigh ();
2037 unsigned HOST_WIDE_INT mask = -1;
2038 if (count < 0)
2040 /* The upper -COUNT bits of HIGH are not part of the value.
2041 Clear them from both MASK and HIGH. */
2042 mask >>= -count;
2043 high &= mask;
2046 /* If the top bit is 1, count the number of leading 1s. If the top
2047 bit is zero, count the number of leading zeros. */
2048 if (high > mask / 2)
2049 high ^= mask;
2051 /* There are no sign bits below the top block, so we don't need to look
2052 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2053 HIGH is 0. */
2054 return count + clz_hwi (high) - 1;
2057 /* Return the number of trailing (lower) zeros in X. */
2059 wi::ctz (const wide_int_ref &x)
2061 if (x.len == 1 && x.ulow () == 0)
2062 return x.precision;
2064 /* Having dealt with the zero case, there must be a block with a
2065 nonzero bit. We don't care about the bits above the first 1. */
2066 unsigned int i = 0;
2067 while (x.val[i] == 0)
2068 ++i;
2069 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2072 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2073 return -1. */
2075 wi::exact_log2 (const wide_int_ref &x)
2077 /* Reject cases where there are implicit -1 blocks above HIGH. */
2078 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2079 return -1;
2081 /* Set CRUX to the index of the entry that should be nonzero.
2082 If the top block is zero then the next lowest block (if any)
2083 must have the high bit set. */
2084 unsigned int crux = x.len - 1;
2085 if (crux > 0 && x.val[crux] == 0)
2086 crux -= 1;
2088 /* Check that all lower blocks are zero. */
2089 for (unsigned int i = 0; i < crux; ++i)
2090 if (x.val[i] != 0)
2091 return -1;
2093 /* Get a zero-extended form of block CRUX. */
2094 unsigned HOST_WIDE_INT hwi = x.val[crux];
2095 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2096 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2098 /* Now it's down to whether HWI is a power of 2. */
2099 int res = ::exact_log2 (hwi);
2100 if (res >= 0)
2101 res += crux * HOST_BITS_PER_WIDE_INT;
2102 return res;
2105 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2107 wi::floor_log2 (const wide_int_ref &x)
2109 return x.precision - 1 - clz (x);
2112 /* Return the index of the first (lowest) set bit in X, counting from 1.
2113 Return 0 if X is 0. */
2115 wi::ffs (const wide_int_ref &x)
2117 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2120 /* Return true if sign-extending X to have precision PRECISION would give
2121 the minimum signed value at that precision. */
2122 bool
2123 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2125 return ctz (x) + 1 == int (precision);
2128 /* Return true if X represents the minimum signed value. */
2129 bool
2130 wi::only_sign_bit_p (const wide_int_ref &x)
2132 return only_sign_bit_p (x, x.precision);
2135 /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2136 down to the previous value that has no bits set outside MASK.
2137 This rounding wraps for signed values if VAL is negative and
2138 the top bit of MASK is clear.
2140 For example, round_down_for_mask (6, 0xf1) would give 1 and
2141 round_down_for_mask (24, 0xf1) would give 17. */
2143 wide_int
2144 wi::round_down_for_mask (const wide_int &val, const wide_int &mask)
2146 /* Get the bits in VAL that are outside the mask. */
2147 wide_int extra_bits = wi::bit_and_not (val, mask);
2148 if (extra_bits == 0)
2149 return val;
2151 /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
2152 below that bit. */
2153 unsigned int precision = val.get_precision ();
2154 wide_int lower_mask = wi::mask (precision - wi::clz (extra_bits),
2155 false, precision);
2157 /* Clear the bits that aren't in MASK, but ensure that all bits
2158 in MASK below the top cleared bit are set. */
2159 return (val & mask) | (mask & lower_mask);
2162 /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2163 up to the next value that has no bits set outside MASK. The rounding
2164 wraps if there are no suitable values greater than VAL.
2166 For example, round_up_for_mask (6, 0xf1) would give 16 and
2167 round_up_for_mask (24, 0xf1) would give 32. */
2169 wide_int
2170 wi::round_up_for_mask (const wide_int &val, const wide_int &mask)
2172 /* Get the bits in VAL that are outside the mask. */
2173 wide_int extra_bits = wi::bit_and_not (val, mask);
2174 if (extra_bits == 0)
2175 return val;
2177 /* Get a mask that is all 1s above the top bit in EXTRA_BITS. */
2178 unsigned int precision = val.get_precision ();
2179 wide_int upper_mask = wi::mask (precision - wi::clz (extra_bits),
2180 true, precision);
2182 /* Get the bits of the mask that are above the top bit in EXTRA_BITS. */
2183 upper_mask &= mask;
2185 /* Conceptually we need to:
2187 - clear bits of VAL outside UPPER_MASK
2188 - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
2189 - propagate the carry through the bits of VAL in UPPER_MASK
2191 If (~VAL & UPPER_MASK) is nonzero, the carry eventually
2192 reaches that bit and the process leaves all lower bits clear.
2193 If (~VAL & UPPER_MASK) is zero then the result is also zero. */
2194 wide_int tmp = wi::bit_and_not (upper_mask, val);
2196 return (val | tmp) & -tmp;
2200 * Private utilities.
2203 void gt_ggc_mx (widest_int *) { }
2204 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2205 void gt_pch_nx (widest_int *) { }
2207 template void wide_int::dump () const;
2208 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2209 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2210 template void offset_int::dump () const;
2211 template void widest_int::dump () const;
2213 /* We could add all the above ::dump variants here, but wide_int and
2214 widest_int should handle the common cases. Besides, you can always
2215 call the dump method directly. */
2217 DEBUG_FUNCTION void
2218 debug (const wide_int &ref)
2220 ref.dump ();
2223 DEBUG_FUNCTION void
2224 debug (const wide_int *ptr)
2226 if (ptr)
2227 debug (*ptr);
2228 else
2229 fprintf (stderr, "<nil>\n");
2232 DEBUG_FUNCTION void
2233 debug (const widest_int &ref)
2235 ref.dump ();
2238 DEBUG_FUNCTION void
2239 debug (const widest_int *ptr)
2241 if (ptr)
2242 debug (*ptr);
2243 else
2244 fprintf (stderr, "<nil>\n");
2247 #if CHECKING_P
2249 namespace selftest {
2251 /* Selftests for wide ints. We run these multiple times, once per type. */
2253 /* Helper function for building a test value. */
2255 template <class VALUE_TYPE>
2256 static VALUE_TYPE
2257 from_int (int i);
2259 /* Specializations of the fixture for each wide-int type. */
2261 /* Specialization for VALUE_TYPE == wide_int. */
2263 template <>
2264 wide_int
2265 from_int (int i)
2267 return wi::shwi (i, 32);
2270 /* Specialization for VALUE_TYPE == offset_int. */
2272 template <>
2273 offset_int
2274 from_int (int i)
2276 return offset_int (i);
2279 /* Specialization for VALUE_TYPE == widest_int. */
2281 template <>
2282 widest_int
2283 from_int (int i)
2285 return widest_int (i);
2288 /* Verify that print_dec (WI, ..., SGN) gives the expected string
2289 representation (using base 10). */
2291 static void
2292 assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn)
2294 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2295 print_dec (wi, buf, sgn);
2296 ASSERT_STREQ (expected, buf);
2299 /* Likewise for base 16. */
2301 static void
2302 assert_hexeq (const char *expected, const wide_int_ref &wi)
2304 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2305 print_hex (wi, buf);
2306 ASSERT_STREQ (expected, buf);
2309 /* Test cases. */
2311 /* Verify that print_dec and print_hex work for VALUE_TYPE. */
2313 template <class VALUE_TYPE>
2314 static void
2315 test_printing ()
2317 VALUE_TYPE a = from_int<VALUE_TYPE> (42);
2318 assert_deceq ("42", a, SIGNED);
2319 assert_hexeq ("0x2a", a);
2320 assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
2321 assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
2322 assert_hexeq ("0xffffffffffffffff", wi::mask <widest_int> (64, false));
2323 if (WIDE_INT_MAX_PRECISION > 128)
2325 assert_hexeq ("0x20000000000000000fffffffffffffffe",
2326 wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
2327 assert_hexeq ("0x200000000000004000123456789abcdef",
2328 wi::lshift (1, 129) + wi::lshift (1, 74)
2329 + wi::lshift (0x1234567, 32) + 0x89abcdef);
2333 /* Verify that various operations work correctly for VALUE_TYPE,
2334 unary and binary, using both function syntax, and
2335 overloaded-operators. */
2337 template <class VALUE_TYPE>
2338 static void
2339 test_ops ()
2341 VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2342 VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2344 /* Using functions. */
2345 assert_deceq ("-7", wi::neg (a), SIGNED);
2346 assert_deceq ("10", wi::add (a, b), SIGNED);
2347 assert_deceq ("4", wi::sub (a, b), SIGNED);
2348 assert_deceq ("-4", wi::sub (b, a), SIGNED);
2349 assert_deceq ("21", wi::mul (a, b), SIGNED);
2351 /* Using operators. */
2352 assert_deceq ("-7", -a, SIGNED);
2353 assert_deceq ("10", a + b, SIGNED);
2354 assert_deceq ("4", a - b, SIGNED);
2355 assert_deceq ("-4", b - a, SIGNED);
2356 assert_deceq ("21", a * b, SIGNED);
2359 /* Verify that various comparisons work correctly for VALUE_TYPE. */
2361 template <class VALUE_TYPE>
2362 static void
2363 test_comparisons ()
2365 VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2366 VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2368 /* == */
2369 ASSERT_TRUE (wi::eq_p (a, a));
2370 ASSERT_FALSE (wi::eq_p (a, b));
2372 /* != */
2373 ASSERT_TRUE (wi::ne_p (a, b));
2374 ASSERT_FALSE (wi::ne_p (a, a));
2376 /* < */
2377 ASSERT_FALSE (wi::lts_p (a, a));
2378 ASSERT_FALSE (wi::lts_p (a, b));
2379 ASSERT_TRUE (wi::lts_p (b, a));
2381 /* <= */
2382 ASSERT_TRUE (wi::les_p (a, a));
2383 ASSERT_FALSE (wi::les_p (a, b));
2384 ASSERT_TRUE (wi::les_p (b, a));
2386 /* > */
2387 ASSERT_FALSE (wi::gts_p (a, a));
2388 ASSERT_TRUE (wi::gts_p (a, b));
2389 ASSERT_FALSE (wi::gts_p (b, a));
2391 /* >= */
2392 ASSERT_TRUE (wi::ges_p (a, a));
2393 ASSERT_TRUE (wi::ges_p (a, b));
2394 ASSERT_FALSE (wi::ges_p (b, a));
2396 /* comparison */
2397 ASSERT_EQ (-1, wi::cmps (b, a));
2398 ASSERT_EQ (0, wi::cmps (a, a));
2399 ASSERT_EQ (1, wi::cmps (a, b));
2402 /* Run all of the selftests, using the given VALUE_TYPE. */
2404 template <class VALUE_TYPE>
2405 static void run_all_wide_int_tests ()
2407 test_printing <VALUE_TYPE> ();
2408 test_ops <VALUE_TYPE> ();
2409 test_comparisons <VALUE_TYPE> ();
2412 /* Test overflow conditions. */
2414 static void
2415 test_overflow ()
2417 static int precs[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
2418 static int offsets[] = { 16, 1, 0 };
2419 for (unsigned int i = 0; i < ARRAY_SIZE (precs); ++i)
2420 for (unsigned int j = 0; j < ARRAY_SIZE (offsets); ++j)
2422 int prec = precs[i];
2423 int offset = offsets[j];
2424 bool overflow;
2425 wide_int sum, diff;
2427 sum = wi::add (wi::max_value (prec, UNSIGNED) - offset, 1,
2428 UNSIGNED, &overflow);
2429 ASSERT_EQ (sum, -offset);
2430 ASSERT_EQ (overflow, offset == 0);
2432 sum = wi::add (1, wi::max_value (prec, UNSIGNED) - offset,
2433 UNSIGNED, &overflow);
2434 ASSERT_EQ (sum, -offset);
2435 ASSERT_EQ (overflow, offset == 0);
2437 diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2438 wi::max_value (prec, UNSIGNED),
2439 UNSIGNED, &overflow);
2440 ASSERT_EQ (diff, -offset);
2441 ASSERT_EQ (overflow, offset != 0);
2443 diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2444 wi::max_value (prec, UNSIGNED) - 1,
2445 UNSIGNED, &overflow);
2446 ASSERT_EQ (diff, 1 - offset);
2447 ASSERT_EQ (overflow, offset > 1);
2451 /* Test the round_{down,up}_for_mask functions. */
2453 static void
2454 test_round_for_mask ()
2456 unsigned int prec = 18;
2457 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec),
2458 wi::shwi (0xf1, prec)));
2459 ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec),
2460 wi::shwi (0xf1, prec)));
2462 ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec),
2463 wi::shwi (0xf1, prec)));
2464 ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec),
2465 wi::shwi (0xf1, prec)));
2467 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec),
2468 wi::shwi (0xf1, prec)));
2469 ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec),
2470 wi::shwi (0xf1, prec)));
2472 ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec),
2473 wi::shwi (0x111, prec)));
2474 ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec),
2475 wi::shwi (0x111, prec)));
2477 ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec),
2478 wi::shwi (0xfc, prec)));
2479 ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec),
2480 wi::shwi (0xfc, prec)));
2482 ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec),
2483 wi::shwi (0xabc, prec)));
2484 ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec),
2485 wi::shwi (0xabc, prec)));
2487 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec),
2488 wi::shwi (0xabc, prec)));
2489 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec),
2490 wi::shwi (0xabc, prec)));
2492 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec),
2493 wi::shwi (0xabc, prec)));
2494 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec),
2495 wi::shwi (0xabc, prec)));
2498 /* Run all of the selftests within this file, for all value types. */
2500 void
2501 wide_int_cc_tests ()
2503 run_all_wide_int_tests <wide_int> ();
2504 run_all_wide_int_tests <offset_int> ();
2505 run_all_wide_int_tests <widest_int> ();
2506 test_overflow ();
2507 test_round_for_mask ();
2510 } // namespace selftest
2511 #endif /* CHECKING_P */