Simplify convert_modes, ignoring invalid old modes for CONST_INTs.
[official-gcc.git] / gcc / wide-int.cc
blobf22b348797bdef459361d2e2deab50cc718f5034
1 /* Operations with very long integers.
2 Copyright (C) 2012-2013 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hwint.h"
26 #include "wide-int.h"
27 #include "tree.h"
28 #include "dumpfile.h"
30 /* This is the maximal size of the buffer needed for dump. */
31 const unsigned int MAX_SIZE = (4 * (MAX_BITSIZE_MODE_ANY_INT / 4
32 + (MAX_BITSIZE_MODE_ANY_INT
33 / HOST_BITS_PER_WIDE_INT)
34 + 32));
36 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
39 * Internal utilities.
42 /* Quantities to deal with values that hold half of a wide int. Used
43 in multiply and divide. */
44 #define HALF_INT_MASK (((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
46 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
47 #define BLOCKS_NEEDED(PREC) \
48 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
49 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
51 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
52 based on the top existing bit of VAL. */
54 static unsigned HOST_WIDE_INT
55 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
57 return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
60 /* Convert the integer in VAL to canonical form, returning its new length.
61 LEN is the number of blocks currently in VAL and PRECISION is the number
62 of bits in the integer it represents.
64 This function only changes the representation, not the value. */
65 static unsigned int
66 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
68 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
69 HOST_WIDE_INT top;
70 int i;
72 if (len > blocks_needed)
73 len = blocks_needed;
75 if (len == 1)
76 return len;
78 top = val[len - 1];
79 if (len * HOST_BITS_PER_WIDE_INT > precision)
80 top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
81 if (top != 0 && top != (HOST_WIDE_INT)-1)
82 return len;
84 /* At this point we know that the top is either 0 or -1. Find the
85 first block that is not a copy of this. */
86 for (i = len - 2; i >= 0; i--)
88 HOST_WIDE_INT x = val[i];
89 if (x != top)
91 if (SIGN_MASK (x) == top)
92 return i + 1;
94 /* We need an extra block because the top bit block i does
95 not match the extension. */
96 return i + 2;
100 /* The number is 0 or -1. */
101 return 1;
105 * Conversion routines in and out of wide_int.
108 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
109 result for an integer with precision PRECISION. Return the length
110 of VAL (after any canonization. */
111 unsigned int
112 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
113 unsigned int xlen, unsigned int precision, bool need_canon)
115 for (unsigned i = 0; i < xlen; i++)
116 val[i] = xval[i];
117 return need_canon ? canonize (val, xlen, precision) : xlen;
120 /* Construct a wide int from a buffer of length LEN. BUFFER will be
121 read according to byte endianess and word endianess of the target.
122 Only the lower LEN bytes of the result are set; the remaining high
123 bytes are cleared. */
124 wide_int
125 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
127 unsigned int precision = buffer_len * BITS_PER_UNIT;
128 wide_int result = wide_int::create (precision);
129 unsigned int words = buffer_len / UNITS_PER_WORD;
131 /* We have to clear all the bits ourself, as we merely or in values
132 below. */
133 unsigned int len = BLOCKS_NEEDED (precision);
134 HOST_WIDE_INT *val = result.write_val ();
135 for (unsigned int i = 0; i < len; ++i)
136 val[i] = 0;
138 for (unsigned int byte = 0; byte < buffer_len; byte++)
140 unsigned int offset;
141 unsigned int index;
142 unsigned int bitpos = byte * BITS_PER_UNIT;
143 unsigned HOST_WIDE_INT value;
145 if (buffer_len > UNITS_PER_WORD)
147 unsigned int word = byte / UNITS_PER_WORD;
149 if (WORDS_BIG_ENDIAN)
150 word = (words - 1) - word;
152 offset = word * UNITS_PER_WORD;
154 if (BYTES_BIG_ENDIAN)
155 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
156 else
157 offset += byte % UNITS_PER_WORD;
159 else
160 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
162 value = (unsigned HOST_WIDE_INT) buffer[offset];
164 index = bitpos / HOST_BITS_PER_WIDE_INT;
165 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
168 result.set_len (canonize (val, len, precision));
170 return result;
173 /* Sets RESULT from THIS, the sign is taken according to SGN. */
174 void
175 wi::to_mpz (wide_int x, mpz_t result, signop sgn)
177 bool negative = false;
178 int len = x.get_len ();
179 const HOST_WIDE_INT *v = x.get_val ();
180 int small_prec = x.get_precision () & (HOST_BITS_PER_WIDE_INT - 1);
182 if (wi::neg_p (x, sgn))
184 negative = true;
185 /* We use ones complement to avoid -x80..0 edge case that -
186 won't work on. */
187 x = ~x;
190 if (sgn == UNSIGNED && small_prec)
192 HOST_WIDE_INT t[WIDE_INT_MAX_ELTS];
194 for (int i = 0; i < len - 1; i++)
195 t[i] = v[i];
196 t[len-1] = zext_hwi (v[len-1], small_prec);
197 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
199 else
200 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
202 if (negative)
203 mpz_com (result, result);
206 /* Returns VAL converted to TYPE. If WRAP is true, then out-of-range
207 values of VAL will be wrapped; otherwise, they will be set to the
208 appropriate minimum or maximum TYPE bound. */
209 wide_int
210 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
212 size_t count, numb;
213 int prec = TYPE_PRECISION (type);
214 wide_int res = wide_int::create (prec);
216 if (!wrap)
218 mpz_t min, max;
220 mpz_init (min);
221 mpz_init (max);
222 get_type_static_bounds (type, min, max);
224 if (mpz_cmp (x, min) < 0)
225 mpz_set (x, min);
226 else if (mpz_cmp (x, max) > 0)
227 mpz_set (x, max);
229 mpz_clear (min);
230 mpz_clear (max);
233 /* Determine the number of unsigned HOST_WIDE_INTs that are required
234 for representing the value. The code to calculate count is
235 extracted from the GMP manual, section "Integer Import and Export":
236 http://gmplib.org/manual/Integer-Import-and-Export.html */
237 numb = 8*sizeof(HOST_WIDE_INT);
238 count = (mpz_sizeinbase (x, 2) + numb-1) / numb;
239 HOST_WIDE_INT *val = res.write_val ();
240 mpz_export (val, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
241 if (count < 1)
243 val[0] = 0;
244 count = 1;
246 res.set_len (count);
248 if (mpz_sgn (x) < 0)
249 res = -res;
251 return res;
255 * Largest and smallest values in a mode.
258 /* Return the largest SGNed number that is representable in PRECISION bits.
260 TODO: There is still code from the double_int era that trys to
261 make up for the fact that double int's could not represent the
262 min and max values of all types. This code should be removed
263 because the min and max values can always be represented in
264 wide_ints and int-csts. */
265 wide_int
266 wi::max_value (unsigned int precision, signop sgn)
268 if (precision == 0)
269 return shwi (0, precision);
270 else if (sgn == UNSIGNED)
271 /* The unsigned max is just all ones. */
272 return shwi (-1, precision);
273 else
274 /* The signed max is all ones except the top bit. This must be
275 explicitly represented. */
276 return mask (precision - 1, false, precision);
279 /* Return the largest SGNed number that is representable in PRECISION bits. */
280 wide_int
281 wi::min_value (unsigned int precision, signop sgn)
283 if (precision == 0 || sgn == UNSIGNED)
284 return uhwi (0, precision);
285 else
286 /* The signed min is all zeros except the top bit. This must be
287 explicitly represented. */
288 return wi::set_bit_in_zero (precision - 1, precision);
292 * Public utilities.
295 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
296 signedness SGN, to an integer that has PRECISION bits. Store the blocks
297 in VAL and return the number of blocks used.
299 This function can handle both extension (PRECISION > XPRECISION)
300 and truncation (PRECISION < XPRECISION). */
301 unsigned int
302 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
303 unsigned int xlen, unsigned int xprecision,
304 unsigned int precision, signop sgn)
306 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
307 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
308 for (unsigned i = 0; i < len; i++)
309 val[i] = xval[i];
311 if (precision > xprecision)
313 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
315 /* Expanding. */
316 if (sgn == UNSIGNED)
318 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
319 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
320 else if (val[len - 1] < 0)
322 while (len < BLOCKS_NEEDED (xprecision))
323 val[len++] = -1;
324 if (small_xprecision)
325 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
326 else
327 val[len++] = 0;
330 else
332 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
333 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
336 len = canonize (val, len, precision);
338 return len;
341 /* This function hides the fact that we cannot rely on the bits beyond
342 the precision. This issue comes up in the relational comparisions
343 where we do allow comparisons of values of different precisions. */
344 static inline HOST_WIDE_INT
345 selt (const HOST_WIDE_INT *a, unsigned int len,
346 unsigned int blocks_needed,
347 unsigned int small_prec,
348 unsigned int index, signop sgn)
350 HOST_WIDE_INT val;
351 if (index < len)
352 val = a[index];
353 else if (index < blocks_needed || sgn == SIGNED)
354 /* Signed or within the precision. */
355 val = SIGN_MASK (a[len - 1]);
356 else
357 /* Unsigned extension beyond the precision. */
358 val = 0;
360 if (small_prec && index == blocks_needed - 1)
361 return (sgn == SIGNED
362 ? sext_hwi (val, small_prec)
363 : zext_hwi (val, small_prec));
364 else
365 return val;
368 /* Find the highest bit represented in a wide int. This will in
369 general have the same value as the sign bit. */
370 static inline HOST_WIDE_INT
371 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
373 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
374 unsigned HOST_WIDE_INT val = a[len - 1];
375 if (excess > 0)
376 val <<= excess;
377 return val >> (HOST_BITS_PER_WIDE_INT - 1);
381 * Comparisons, note that only equality is an operator. The other
382 * comparisons cannot be operators since they are inherently singed or
383 * unsigned and C++ has no such operators.
386 /* Return true if OP0 == OP1. */
387 bool
388 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
389 const HOST_WIDE_INT *op1, unsigned int op1len,
390 unsigned int prec)
392 int l0 = op0len - 1;
393 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
395 while (op0len != op1len)
396 return false;
398 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
400 /* It does not matter if we zext or sext here, we just have to
401 do both the same way. */
402 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
403 return false;
404 l0--;
407 while (l0 >= 0)
408 if (op0[l0] != op1[l0])
409 return false;
410 else
411 l0--;
413 return true;
416 /* Return true if OP0 < OP1 using signed comparisons. */
417 bool
418 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
419 unsigned int precision,
420 const HOST_WIDE_INT *op1, unsigned int op1len)
422 HOST_WIDE_INT s0, s1;
423 unsigned HOST_WIDE_INT u0, u1;
424 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
425 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
426 int l = MAX (op0len - 1, op1len - 1);
428 /* Only the top block is compared as signed. The rest are unsigned
429 comparisons. */
430 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
431 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
432 if (s0 < s1)
433 return true;
434 if (s0 > s1)
435 return false;
437 l--;
438 while (l >= 0)
440 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
441 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
443 if (u0 < u1)
444 return true;
445 if (u0 > u1)
446 return false;
447 l--;
450 return false;
453 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
454 signed compares. */
456 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
457 unsigned int precision,
458 const HOST_WIDE_INT *op1, unsigned int op1len)
460 HOST_WIDE_INT s0, s1;
461 unsigned HOST_WIDE_INT u0, u1;
462 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
463 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
464 int l = MAX (op0len - 1, op1len - 1);
466 /* Only the top block is compared as signed. The rest are unsigned
467 comparisons. */
468 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
469 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
470 if (s0 < s1)
471 return -1;
472 if (s0 > s1)
473 return 1;
475 l--;
476 while (l >= 0)
478 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
479 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
481 if (u0 < u1)
482 return -1;
483 if (u0 > u1)
484 return 1;
485 l--;
488 return 0;
491 /* Return true if OP0 < OP1 using unsigned comparisons. */
492 bool
493 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
494 unsigned int precision,
495 const HOST_WIDE_INT *op1, unsigned int op1len)
497 unsigned HOST_WIDE_INT x0;
498 unsigned HOST_WIDE_INT x1;
499 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
500 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
501 int l = MAX (op0len - 1, op1len - 1);
503 while (l >= 0)
505 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
506 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
507 if (x0 < x1)
508 return true;
509 if (x0 > x1)
510 return false;
511 l--;
514 return false;
517 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
518 unsigned compares. */
520 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
521 unsigned int precision,
522 const HOST_WIDE_INT *op1, unsigned int op1len)
524 unsigned HOST_WIDE_INT x0;
525 unsigned HOST_WIDE_INT x1;
526 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
527 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
528 int l = MAX (op0len - 1, op1len - 1);
530 while (l >= 0)
532 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
533 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
534 if (x0 < x1)
535 return -1;
536 if (x0 > x1)
537 return 1;
538 l--;
541 return 0;
545 * Extension.
548 /* Sign-extend the number represented by XVAL and XLEN into VAL,
549 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
550 and VAL have PRECISION bits. */
551 unsigned int
552 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
553 unsigned int xlen, unsigned int precision, unsigned int offset)
555 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
556 /* Extending beyond the precision is a no-op. If we have only stored
557 OFFSET bits or fewer, the rest are already signs. */
558 if (offset >= precision || len >= xlen)
560 for (unsigned i = 0; i < xlen; ++i)
561 val[i] = xval[i];
562 return xlen;
564 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
565 for (unsigned int i = 0; i < len; i++)
566 val[i] = xval[i];
567 if (suboffset > 0)
569 val[len] = sext_hwi (xval[len], suboffset);
570 len += 1;
572 return canonize (val, len, precision);
575 /* Zero-extend the number represented by XVAL and XLEN into VAL,
576 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
577 and VAL have PRECISION bits. */
578 unsigned int
579 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
580 unsigned int xlen, unsigned int precision, unsigned int offset)
582 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
583 /* Extending beyond the precision is a no-op. If we have only stored
584 OFFSET bits or fewer, and the upper stored bit is zero, then there
585 is nothing to do. */
586 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
588 for (unsigned i = 0; i < xlen; ++i)
589 val[i] = xval[i];
590 return xlen;
592 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
593 for (unsigned int i = 0; i < len; i++)
594 val[i] = i < xlen ? xval[i] : -1;
595 if (suboffset > 0)
596 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
597 else
598 val[len] = 0;
599 return canonize (val, len + 1, precision);
603 * Masking, inserting, shifting, rotating.
606 /* Insert WIDTH bits from Y into X starting at START. */
607 wide_int
608 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
609 unsigned int width)
611 wide_int result;
612 wide_int mask;
613 wide_int tmp;
615 unsigned int precision = x.get_precision ();
616 if (start >= precision)
617 return x;
619 gcc_checking_assert (precision >= width);
621 if (start + width >= precision)
622 width = precision - start;
624 mask = wi::shifted_mask (start, width, false, precision);
625 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
626 result = tmp & mask;
628 tmp = wi::bit_and_not (x, mask);
629 result = result | tmp;
631 return result;
634 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
635 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
636 bits. */
637 unsigned int
638 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
639 unsigned int xlen, unsigned int precision, unsigned int bit)
641 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
642 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
644 if (block + 1 >= xlen)
646 /* The operation either affects the last current block or needs
647 a new block. */
648 unsigned int len = block + 1;
649 for (unsigned int i = 0; i < len; i++)
650 val[i] = safe_uhwi (xval, xlen, i);
651 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
653 /* If the bit we just set is at the msb of the block, make sure
654 that any higher bits are zeros. */
655 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
656 val[len++] = 0;
657 return len;
659 else
661 for (unsigned int i = 0; i < xlen; i++)
662 val[i] = xval[i];
663 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
664 return canonize (val, xlen, precision);
668 /* bswap THIS. */
669 wide_int
670 wide_int_storage::bswap () const
672 wide_int result = wide_int::create (precision);
673 unsigned int i, s;
674 unsigned int len = BLOCKS_NEEDED (precision);
675 unsigned int xlen = get_len ();
676 const HOST_WIDE_INT *xval = get_val ();
677 HOST_WIDE_INT *val = result.write_val ();
679 /* This is not a well defined operation if the precision is not a
680 multiple of 8. */
681 gcc_assert ((precision & 0x7) == 0);
683 for (i = 0; i < len; i++)
684 val[i] = 0;
686 /* Only swap the bytes that are not the padding. */
687 for (s = 0; s < precision; s += 8)
689 unsigned int d = precision - s - 8;
690 unsigned HOST_WIDE_INT byte;
692 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
693 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
695 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
697 block = d / HOST_BITS_PER_WIDE_INT;
698 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
700 val[block] |= byte << offset;
703 result.set_len (canonize (val, len, precision));
704 return result;
707 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
708 above that up to PREC are zeros. The result is inverted if NEGATE
709 is true. Return the number of blocks in VAL. */
710 unsigned int
711 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
712 unsigned int prec)
714 gcc_assert (width < 4 * MAX_BITSIZE_MODE_ANY_INT);
715 gcc_assert (prec <= 4 * MAX_BITSIZE_MODE_ANY_INT);
717 if (width == prec)
719 val[0] = negate ? 0 : -1;
720 return 1;
722 else if (width == 0)
724 val[0] = negate ? -1 : 0;
725 return 1;
728 unsigned int i = 0;
729 while (i < width / HOST_BITS_PER_WIDE_INT)
730 val[i++] = negate ? 0 : -1;
732 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
733 if (shift != 0)
735 HOST_WIDE_INT last = (((unsigned HOST_WIDE_INT) 1) << shift) - 1;
736 val[i++] = negate ? ~last : last;
738 else
739 val[i++] = negate ? -1 : 0;
741 return i;
744 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
745 bits are ones, and the bits above that up to PREC are zeros. The result
746 is inverted if NEGATE is true. Return the number of blocks in VAL. */
747 unsigned int
748 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
749 bool negate, unsigned int prec)
751 if (start >= prec || width == 0)
753 val[0] = negate ? -1 : 0;
754 return 1;
757 if (width > prec - start)
758 width = prec - start;
759 unsigned int end = start + width;
761 unsigned int i = 0;
762 while (i < start / HOST_BITS_PER_WIDE_INT)
763 val[i++] = negate ? -1 : 0;
765 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
766 if (shift)
768 HOST_WIDE_INT block = (((unsigned HOST_WIDE_INT) 1) << shift) - 1;
769 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
770 if (shift)
772 /* case 000111000 */
773 block = (((unsigned HOST_WIDE_INT) 1) << shift) - block - 1;
774 val[i++] = negate ? ~block : block;
775 return i;
777 else
778 /* ...111000 */
779 val[i++] = negate ? block : ~block;
782 while (i < end / HOST_BITS_PER_WIDE_INT)
783 /* 1111111 */
784 val[i++] = negate ? 0 : -1;
786 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
787 if (shift != 0)
789 /* 000011111 */
790 HOST_WIDE_INT block = (((unsigned HOST_WIDE_INT) 1) << shift) - 1;
791 val[i++] = negate ? ~block : block;
793 else if (end < prec)
794 val[i++] = negate ? -1 : 0;
796 return i;
800 * logical operations.
803 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
804 unsigned int
805 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
806 unsigned int op0len, const HOST_WIDE_INT *op1,
807 unsigned int op1len, unsigned int prec)
809 int l0 = op0len - 1;
810 int l1 = op1len - 1;
811 bool need_canon = true;
813 unsigned int len = MAX (op0len, op1len);
814 if (l0 > l1)
816 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
817 if (op1mask == 0)
819 l0 = l1;
820 len = l1 + 1;
822 else
824 need_canon = false;
825 while (l0 > l1)
827 val[l0] = op0[l0];
828 l0--;
832 else if (l1 > l0)
834 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
835 if (op0mask == 0)
836 len = l0 + 1;
837 else
839 need_canon = false;
840 while (l1 > l0)
842 val[l1] = op1[l1];
843 l1--;
848 while (l0 >= 0)
850 val[l0] = op0[l0] & op1[l0];
851 l0--;
854 if (need_canon)
855 len = canonize (val, len, prec);
857 return len;
860 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
861 unsigned int
862 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
863 unsigned int op0len, const HOST_WIDE_INT *op1,
864 unsigned int op1len, unsigned int prec)
866 wide_int result;
867 int l0 = op0len - 1;
868 int l1 = op1len - 1;
869 bool need_canon = true;
871 unsigned int len = MAX (op0len, op1len);
872 if (l0 > l1)
874 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
875 if (op1mask != 0)
877 l0 = l1;
878 len = l1 + 1;
880 else
882 need_canon = false;
883 while (l0 > l1)
885 val[l0] = op0[l0];
886 l0--;
890 else if (l1 > l0)
892 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
893 if (op0mask == 0)
894 len = l0 + 1;
895 else
897 need_canon = false;
898 while (l1 > l0)
900 val[l1] = ~op1[l1];
901 l1--;
906 while (l0 >= 0)
908 val[l0] = op0[l0] & ~op1[l0];
909 l0--;
912 if (need_canon)
913 len = canonize (val, len, prec);
915 return len;
918 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
919 unsigned int
920 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
921 unsigned int op0len, const HOST_WIDE_INT *op1,
922 unsigned int op1len, unsigned int prec)
924 wide_int result;
925 int l0 = op0len - 1;
926 int l1 = op1len - 1;
927 bool need_canon = true;
929 unsigned int len = MAX (op0len, op1len);
930 if (l0 > l1)
932 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
933 if (op1mask != 0)
935 l0 = l1;
936 len = l1 + 1;
938 else
940 need_canon = false;
941 while (l0 > l1)
943 val[l0] = op0[l0];
944 l0--;
948 else if (l1 > l0)
950 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
951 if (op0mask != 0)
952 len = l0 + 1;
953 else
955 need_canon = false;
956 while (l1 > l0)
958 val[l1] = op1[l1];
959 l1--;
964 while (l0 >= 0)
966 val[l0] = op0[l0] | op1[l0];
967 l0--;
970 if (need_canon)
971 len = canonize (val, len, prec);
973 return len;
976 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
977 unsigned int
978 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
979 unsigned int op0len, const HOST_WIDE_INT *op1,
980 unsigned int op1len, unsigned int prec)
982 wide_int result;
983 int l0 = op0len - 1;
984 int l1 = op1len - 1;
985 bool need_canon = true;
987 unsigned int len = MAX (op0len, op1len);
988 if (l0 > l1)
990 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
991 if (op1mask == 0)
993 l0 = l1;
994 len = l1 + 1;
996 else
998 need_canon = false;
999 while (l0 > l1)
1001 val[l0] = op0[l0];
1002 l0--;
1006 else if (l1 > l0)
1008 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1009 if (op0mask != 0)
1010 len = l0 + 1;
1011 else
1013 need_canon = false;
1014 while (l1 > l0)
1016 val[l1] = ~op1[l1];
1017 l1--;
1022 while (l0 >= 0)
1024 val[l0] = op0[l0] | ~op1[l0];
1025 l0--;
1028 if (need_canon)
1029 len = canonize (val, len, prec);
1031 return len;
1034 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1035 unsigned int
1036 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1037 unsigned int op0len, const HOST_WIDE_INT *op1,
1038 unsigned int op1len, unsigned int prec)
1040 wide_int result;
1041 int l0 = op0len - 1;
1042 int l1 = op1len - 1;
1044 unsigned int len = MAX (op0len, op1len);
1045 if (l0 > l1)
1047 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1048 while (l0 > l1)
1050 val[l0] = op0[l0] ^ op1mask;
1051 l0--;
1055 if (l1 > l0)
1057 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1058 while (l1 > l0)
1060 val[l1] = op0mask ^ op1[l1];
1061 l1--;
1065 while (l0 >= 0)
1067 val[l0] = op0[l0] ^ op1[l0];
1068 l0--;
1071 return canonize (val, len, prec);
1075 * math
1078 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1079 whether the result overflows when OP0 and OP1 are treated as having
1080 signedness SGN. Return the number of blocks in VAL. */
1081 unsigned int
1082 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1083 unsigned int op0len, const HOST_WIDE_INT *op1,
1084 unsigned int op1len, unsigned int prec,
1085 signop sgn, bool *overflow)
1087 unsigned HOST_WIDE_INT o0 = 0;
1088 unsigned HOST_WIDE_INT o1 = 0;
1089 unsigned HOST_WIDE_INT x = 0;
1090 unsigned HOST_WIDE_INT carry = 0;
1091 unsigned HOST_WIDE_INT old_carry = 0;
1092 unsigned HOST_WIDE_INT mask0, mask1;
1093 unsigned int i;
1095 unsigned int len = MAX (op0len, op1len);
1096 mask0 = -top_bit_of (op0, op0len, prec);
1097 mask1 = -top_bit_of (op1, op1len, prec);
1098 /* Add all of the explicitly defined elements. */
1100 for (i = 0; i < len; i++)
1102 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1103 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1104 x = o0 + o1 + carry;
1105 val[i] = x;
1106 old_carry = carry;
1107 carry = carry == 0 ? x < o0 : x <= o0;
1110 if (len * HOST_BITS_PER_WIDE_INT < prec)
1112 val[len] = mask0 + mask1 + carry;
1113 len++;
1114 if (overflow)
1115 *overflow = false;
1117 else if (overflow)
1119 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1120 if (sgn == SIGNED)
1122 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1123 *overflow = HOST_WIDE_INT (x << shift) < 0;
1125 else
1127 /* Put the MSB of X and O0 and in the top of the HWI. */
1128 x <<= shift;
1129 o0 <<= shift;
1130 if (old_carry)
1131 *overflow = (x <= o0);
1132 else
1133 *overflow = (x < o0);
1137 return canonize (val, len, prec);
1140 /* This is bogus. We should always return the precision and leave the
1141 caller to handle target dependencies. */
1142 static int
1143 clz_zero (unsigned int precision)
1145 unsigned int count;
1147 enum machine_mode mode = mode_for_size (precision, MODE_INT, 0);
1148 if (mode == BLKmode)
1149 mode_for_size (precision, MODE_PARTIAL_INT, 0);
1151 /* Even if the value at zero is undefined, we have to come up
1152 with some replacement. Seems good enough. */
1153 if (mode == BLKmode)
1154 count = precision;
1155 else if (!CLZ_DEFINED_VALUE_AT_ZERO (mode, count))
1156 count = precision;
1157 return count;
1160 /* This is bogus. We should always return the precision and leave the
1161 caller to handle target dependencies. */
1162 static int
1163 ctz_zero (unsigned int precision)
1165 unsigned int count;
1167 enum machine_mode mode = mode_for_size (precision, MODE_INT, 0);
1168 if (mode == BLKmode)
1169 mode_for_size (precision, MODE_PARTIAL_INT, 0);
1171 /* Even if the value at zero is undefined, we have to come up
1172 with some replacement. Seems good enough. */
1173 if (mode == BLKmode)
1174 count = precision;
1175 else if (!CTZ_DEFINED_VALUE_AT_ZERO (mode, count))
1176 count = precision;
1177 return count;
1180 /* Subroutines of the multiplication and division operations. Unpack
1181 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1182 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1183 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1184 static void
1185 wi_unpack (unsigned HOST_HALF_WIDE_INT *result,
1186 const unsigned HOST_WIDE_INT *input,
1187 unsigned int in_len, unsigned int out_len,
1188 unsigned int prec, signop sgn)
1190 unsigned int i;
1191 unsigned int j = 0;
1192 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1193 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1194 HOST_WIDE_INT mask;
1196 if (sgn == SIGNED)
1198 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1199 mask &= HALF_INT_MASK;
1201 else
1202 mask = 0;
1204 for (i = 0; i < in_len; i++)
1206 HOST_WIDE_INT x = input[i];
1207 if (i == blocks_needed - 1 && small_prec)
1209 if (sgn == SIGNED)
1210 x = sext_hwi (x, small_prec);
1211 else
1212 x = zext_hwi (x, small_prec);
1214 result[j++] = x;
1215 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1218 /* Smear the sign bit. */
1219 while (j < out_len)
1220 result[j++] = mask;
1223 /* The inverse of wi_unpack. IN_LEN is the the number of input
1224 blocks. The number of output blocks will be half this amount. */
1225 static void
1226 wi_pack (unsigned HOST_WIDE_INT *result,
1227 const unsigned HOST_HALF_WIDE_INT *input,
1228 unsigned int in_len)
1230 unsigned int i = 0;
1231 unsigned int j = 0;
1233 while (i + 2 < in_len)
1235 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1236 | ((unsigned HOST_WIDE_INT)input[i + 1]
1237 << HOST_BITS_PER_HALF_WIDE_INT);
1238 i += 2;
1241 /* Handle the case where in_len is odd. For this we zero extend. */
1242 if (in_len & 1)
1243 result[j++] = (unsigned HOST_WIDE_INT)input[i];
1244 else
1245 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1246 | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT);
1249 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1250 result is returned. If FULL is set, the entire result is returned
1251 in a mode that is twice the width of the inputs. However, that
1252 mode needs to exist if the value is to be usable. Clients that use
1253 FULL need to check for this.
1255 If HIGH or FULL are not set, throw away the upper half after the check
1256 is made to see if it overflows. Unfortunately there is no better
1257 way to check for overflow than to do this. OVERFLOW is assumed to
1258 be sticky so it should be initialized. SGN controls the signedness
1259 and is used to check overflow or if HIGH or FULL is set. */
1260 unsigned int
1261 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1,
1262 unsigned int op1len, const HOST_WIDE_INT *op2,
1263 unsigned int op2len, unsigned int prec, signop sgn,
1264 bool *overflow, bool high, bool full)
1266 unsigned HOST_WIDE_INT o0, o1, k, t;
1267 unsigned int i;
1268 unsigned int j;
1269 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1270 unsigned int half_blocks_needed = blocks_needed * 2;
1271 /* The sizes here are scaled to support a 2x largest mode by 2x
1272 largest mode yielding a 4x largest mode result. This is what is
1273 needed by vpn. */
1275 unsigned HOST_HALF_WIDE_INT
1276 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1277 unsigned HOST_HALF_WIDE_INT
1278 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1279 /* The '2' in 'R' is because we are internally doing a full
1280 multiply. */
1281 unsigned HOST_HALF_WIDE_INT
1282 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1283 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1285 /* If the top level routine did not really pass in an overflow, then
1286 just make sure that we never attempt to set it. */
1287 bool needs_overflow = (overflow != 0);
1288 if (needs_overflow)
1289 *overflow = false;
1291 /* If we need to check for overflow, we can only do half wide
1292 multiplies quickly because we need to look at the top bits to
1293 check for the overflow. */
1294 if ((high || full || needs_overflow)
1295 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1297 unsigned HOST_WIDE_INT r;
1299 if (sgn == SIGNED)
1301 o0 = sext_hwi (op1[0], prec);
1302 o1 = sext_hwi (op2[0], prec);
1304 else
1306 o0 = zext_hwi (op1[0], prec);
1307 o1 = zext_hwi (op2[0], prec);
1310 r = o0 * o1;
1311 if (needs_overflow)
1313 if (sgn == SIGNED)
1315 if (HOST_WIDE_INT (r) != sext_hwi (r, prec))
1316 *overflow = true;
1318 else
1320 if ((r >> prec) != 0)
1321 *overflow = true;
1324 val[0] = high ? r >> prec : r;
1325 return 1;
1328 /* We do unsigned mul and then correct it. */
1329 wi_unpack (u, (const unsigned HOST_WIDE_INT*)op1, op1len,
1330 half_blocks_needed, prec, SIGNED);
1331 wi_unpack (v, (const unsigned HOST_WIDE_INT*)op2, op2len,
1332 half_blocks_needed, prec, SIGNED);
1334 /* The 2 is for a full mult. */
1335 memset (r, 0, half_blocks_needed * 2
1336 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1338 for (j = 0; j < half_blocks_needed; j++)
1340 k = 0;
1341 for (i = 0; i < half_blocks_needed; i++)
1343 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1344 + r[i + j] + k);
1345 r[i + j] = t & HALF_INT_MASK;
1346 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1348 r[j + half_blocks_needed] = k;
1351 /* We did unsigned math above. For signed we must adjust the
1352 product (assuming we need to see that). */
1353 if (sgn == SIGNED && (full || high || needs_overflow))
1355 unsigned HOST_WIDE_INT b;
1356 if (op1[op1len-1] < 0)
1358 b = 0;
1359 for (i = 0; i < half_blocks_needed; i++)
1361 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1362 - (unsigned HOST_WIDE_INT)v[i] - b;
1363 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1364 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1367 if (op2[op2len-1] < 0)
1369 b = 0;
1370 for (i = 0; i < half_blocks_needed; i++)
1372 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1373 - (unsigned HOST_WIDE_INT)u[i] - b;
1374 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1375 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1380 if (needs_overflow)
1382 HOST_WIDE_INT top;
1384 /* For unsigned, overflow is true if any of the top bits are set.
1385 For signed, overflow is true if any of the top bits are not equal
1386 to the sign bit. */
1387 if (sgn == UNSIGNED)
1388 top = 0;
1389 else
1391 top = r[(half_blocks_needed) - 1];
1392 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1393 top &= mask;
1396 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1397 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1398 *overflow = true;
1401 if (full)
1403 /* compute [2prec] <- [prec] * [prec] */
1404 wi_pack ((unsigned HOST_WIDE_INT *) val, r, 2 * half_blocks_needed);
1405 return canonize (val, blocks_needed * 2, prec * 2);
1407 else if (high)
1409 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1410 wi_pack ((unsigned HOST_WIDE_INT *) val,
1411 &r[half_blocks_needed], half_blocks_needed);
1412 return canonize (val, blocks_needed, prec);
1414 else
1416 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1417 wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed);
1418 return canonize (val, blocks_needed, prec);
1422 /* Compute the population count of X. */
1424 wi::popcount (const wide_int_ref &x)
1426 unsigned int i;
1427 int count;
1429 if (x.precision == 0)
1430 return 0;
1432 /* The high order block is special if it is the last block and the
1433 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1434 have to clear out any ones above the precision before doing
1435 popcount on this block. */
1436 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1437 unsigned int stop = x.len;
1438 if (count < 0)
1440 count = popcount_hwi (x.uhigh () << -count);
1441 stop -= 1;
1443 else
1445 if (x.sign_mask () >= 0)
1446 count = 0;
1449 for (i = 0; i < stop; ++i)
1450 count += popcount_hwi (x.val[i]);
1452 return count;
1455 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1456 whether the result overflows when OP0 and OP1 are treated as having
1457 signedness SGN. Return the number of blocks in VAL. */
1458 unsigned int
1459 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1460 unsigned int op0len, const HOST_WIDE_INT *op1,
1461 unsigned int op1len, unsigned int prec,
1462 signop sgn, bool *overflow)
1464 unsigned HOST_WIDE_INT o0 = 0;
1465 unsigned HOST_WIDE_INT o1 = 0;
1466 unsigned HOST_WIDE_INT x = 0;
1467 /* We implement subtraction as an in place negate and add. Negation
1468 is just inversion and add 1, so we can do the add of 1 by just
1469 starting the borrow in of the first element at 1. */
1470 unsigned HOST_WIDE_INT borrow = 0;
1471 unsigned HOST_WIDE_INT old_borrow = 0;
1473 unsigned HOST_WIDE_INT mask0, mask1;
1474 unsigned int i;
1476 unsigned int len = MAX (op0len, op1len);
1477 mask0 = -top_bit_of (op0, op0len, prec);
1478 mask1 = -top_bit_of (op1, op1len, prec);
1480 /* Subtract all of the explicitly defined elements. */
1481 for (i = 0; i < len; i++)
1483 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1484 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1485 x = o0 - o1 - borrow;
1486 val[i] = x;
1487 old_borrow = borrow;
1488 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1491 if (len * HOST_BITS_PER_WIDE_INT < prec)
1493 val[len] = mask0 - mask1 - borrow;
1494 len++;
1495 if (overflow)
1496 *overflow = false;
1498 else if (overflow)
1500 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1501 if (sgn == SIGNED)
1503 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1504 *overflow = HOST_WIDE_INT (x << shift) < 0;
1506 else
1508 /* Put the MSB of X and O0 and in the top of the HWI. */
1509 x <<= shift;
1510 o0 <<= shift;
1511 if (old_borrow)
1512 *overflow = (x >= o0);
1513 else
1514 *overflow = (x > o0);
1518 return canonize (val, len, prec);
1523 * Division and Mod
1526 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1527 algorithm is a small modification of the algorithm in Hacker's
1528 Delight by Warren, which itself is a small modification of Knuth's
1529 algorithm. M is the number of significant elements of U however
1530 there needs to be at least one extra element of B_DIVIDEND
1531 allocated, N is the number of elements of B_DIVISOR. */
1532 static void
1533 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1534 unsigned HOST_HALF_WIDE_INT *b_remainder,
1535 unsigned HOST_HALF_WIDE_INT *b_dividend,
1536 unsigned HOST_HALF_WIDE_INT *b_divisor,
1537 unsigned int m, unsigned int n)
1539 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1540 HOST_WIDE_INT and stored in the lower bits of each word. This
1541 algorithm should work properly on both 32 and 64 bit
1542 machines. */
1543 unsigned HOST_WIDE_INT b
1544 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1545 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1546 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1547 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1548 HOST_WIDE_INT s, i, j, t, k;
1550 /* Single digit divisor. */
1551 if (n == 1)
1553 k = 0;
1554 for (j = m - 1; j >= 0; j--)
1556 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1557 k = ((k * b + b_dividend[j])
1558 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1559 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1561 b_remainder[0] = k;
1562 return;
1565 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1567 if (s)
1569 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1570 algorithm, we can overwrite b_dividend and b_divisor, so we do
1571 that. */
1572 for (i = n - 1; i > 0; i--)
1573 b_divisor[i] = (b_divisor[i] << s)
1574 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1575 b_divisor[0] = b_divisor[0] << s;
1577 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1578 for (i = m - 1; i > 0; i--)
1579 b_dividend[i] = (b_dividend[i] << s)
1580 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1581 b_dividend[0] = b_dividend[0] << s;
1584 /* Main loop. */
1585 for (j = m - n; j >= 0; j--)
1587 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1588 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1589 again:
1590 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1592 qhat -= 1;
1593 rhat += b_divisor[n-1];
1594 if (rhat < b)
1595 goto again;
1598 /* Multiply and subtract. */
1599 k = 0;
1600 for (i = 0; i < n; i++)
1602 p = qhat * b_divisor[i];
1603 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1604 b_dividend[i + j] = t;
1605 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1606 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1608 t = b_dividend[j+n] - k;
1609 b_dividend[j+n] = t;
1611 b_quotient[j] = qhat;
1612 if (t < 0)
1614 b_quotient[j] -= 1;
1615 k = 0;
1616 for (i = 0; i < n; i++)
1618 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1619 b_dividend[i+j] = t;
1620 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1622 b_dividend[j+n] += k;
1625 if (s)
1626 for (i = 0; i < n; i++)
1627 b_remainder[i] = (b_dividend[i] >> s)
1628 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1629 else
1630 for (i = 0; i < n; i++)
1631 b_remainder[i] = b_dividend[i];
1635 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1636 the result. If QUOTIENT is nonnull, store the value of the quotient
1637 there and return the number of blocks in it. The return value is
1638 not defined otherwise. If REMAINDER is nonnull, store the value
1639 of the remainder there and store the number of blocks in
1640 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1641 the division overflowed. */
1642 unsigned int
1643 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1644 HOST_WIDE_INT *remainder, const HOST_WIDE_INT *dividend,
1645 unsigned int dividend_len, unsigned int dividend_prec,
1646 const HOST_WIDE_INT *divisor, unsigned int divisor_len,
1647 unsigned int divisor_prec, signop sgn,
1648 bool *oflow)
1650 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1651 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1652 unsigned HOST_HALF_WIDE_INT
1653 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1654 unsigned HOST_HALF_WIDE_INT
1655 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1656 unsigned HOST_HALF_WIDE_INT
1657 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1658 unsigned HOST_HALF_WIDE_INT
1659 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1660 unsigned int m, n;
1661 HOST_WIDE_INT u0[WIDE_INT_MAX_ELTS];
1662 HOST_WIDE_INT u1[WIDE_INT_MAX_ELTS];
1663 bool dividend_neg = false;
1664 bool divisor_neg = false;
1665 bool overflow = false;
1667 if (divisor[0] == 0 && divisor_len == 1)
1668 overflow = true;
1670 /* The smallest signed number / -1 causes overflow. */
1671 if (sgn == SIGNED
1672 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1673 && divisor_len == 1)
1675 HOST_WIDE_INT divisor_low = divisor[0];
1676 if (divisor_prec < HOST_BITS_PER_WIDE_INT)
1677 divisor_low = sext_hwi (divisor_low, divisor_prec);
1678 unsigned HOST_WIDE_INT dividend_high = dividend[dividend_len - 1];
1679 dividend_high <<= -dividend_prec % HOST_BITS_PER_WIDE_INT;
1680 if (divisor_low == -1
1681 && HOST_WIDE_INT (dividend_high) == HOST_WIDE_INT_MIN)
1683 /* The smallest neg number is 100...00. The high word was
1684 checked above, now check the rest of the words are zero. */
1685 unsigned int i;
1686 bool all_zero = true;
1687 for (i = 0; i + 1 < dividend_len; i++)
1688 if (dividend[i] != 0)
1690 all_zero = false;
1691 break;
1693 if (all_zero)
1694 overflow = true;
1698 /* If overflow is set, just get out. There will only be grief by
1699 continuing. */
1700 if (overflow)
1702 if (remainder)
1704 *remainder_len = 1;
1705 remainder[0] = 0;
1707 if (oflow != 0)
1708 *oflow = true;
1709 if (quotient)
1710 quotient[0] = 0;
1711 return 1;
1714 if (oflow)
1715 *oflow = false;
1717 /* Do it on the host if you can. */
1718 if (dividend_prec <= HOST_BITS_PER_WIDE_INT
1719 && divisor_prec <= HOST_BITS_PER_WIDE_INT)
1721 if (sgn == SIGNED)
1723 HOST_WIDE_INT o0 = sext_hwi (dividend[0], dividend_prec);
1724 HOST_WIDE_INT o1 = sext_hwi (divisor[0], divisor_prec);
1726 if (quotient)
1727 quotient[0] = o0 / o1;
1728 if (remainder)
1730 remainder[0] = o0 % o1;
1731 *remainder_len = 1;
1734 else
1736 unsigned HOST_WIDE_INT o0 = zext_hwi (dividend[0], dividend_prec);
1737 unsigned HOST_WIDE_INT o1 = zext_hwi (divisor[0], divisor_prec);
1739 if (quotient)
1740 quotient[0] = o0 / o1;
1741 if (remainder)
1743 remainder[0] = o0 % o1;
1744 *remainder_len = 1;
1748 return 1;
1751 /* Make the divisor and dividend positive and remember what we
1752 did. */
1753 if (sgn == SIGNED)
1755 if (top_bit_of (dividend, dividend_len, dividend_prec))
1757 dividend_len = wi::sub_large (u0, zeros, 1, dividend, dividend_len,
1758 dividend_prec, UNSIGNED, 0);
1759 dividend = u0;
1760 dividend_neg = true;
1762 if (top_bit_of (divisor, divisor_len, divisor_prec))
1764 divisor_len = wi::sub_large (u1, zeros, 1, divisor, divisor_len,
1765 divisor_prec, UNSIGNED, 0);
1766 divisor = u1;
1767 divisor_neg = true;
1771 wi_unpack (b_dividend, (const unsigned HOST_WIDE_INT*)dividend,
1772 dividend_len, dividend_blocks_needed, dividend_prec, sgn);
1773 wi_unpack (b_divisor, (const unsigned HOST_WIDE_INT*)divisor,
1774 divisor_len, divisor_blocks_needed, divisor_prec, sgn);
1776 if (top_bit_of (dividend, dividend_len, dividend_prec) && sgn == SIGNED)
1777 m = dividend_blocks_needed;
1778 else
1779 m = 2 * dividend_len;
1781 if (top_bit_of (divisor, divisor_len, divisor_prec) && sgn == SIGNED)
1782 n = divisor_blocks_needed;
1783 else
1784 n = 2 * divisor_len;
1786 /* We need to find the top non zero block of b_divisor. At most the
1787 top two blocks are zero. */
1788 if (b_divisor[n - 1] == 0)
1789 n--;
1790 if (b_divisor[n - 1] == 0)
1791 n--;
1793 memset (b_quotient, 0, sizeof (b_quotient));
1795 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1797 unsigned int quotient_len = 0;
1798 if (quotient)
1800 wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m);
1801 quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec);
1802 /* The quotient is neg if exactly one of the divisor or dividend is
1803 neg. */
1804 if (dividend_neg != divisor_neg)
1805 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1806 quotient_len, dividend_prec,
1807 UNSIGNED, 0);
1810 if (remainder)
1812 wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n);
1813 *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec);
1814 /* The remainder is always the same sign as the dividend. */
1815 if (dividend_neg)
1816 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1817 *remainder_len, dividend_prec,
1818 UNSIGNED, 0);
1821 return quotient_len;
1825 * Shifting, rotating and extraction.
1828 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1829 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1830 unsigned int
1831 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1832 unsigned int xlen, unsigned int precision,
1833 unsigned int shift)
1835 /* Split the shift into a whole-block shift and a subblock shift. */
1836 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1837 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1839 /* The whole-block shift fills with zeros. */
1840 unsigned int len = BLOCKS_NEEDED (precision);
1841 for (unsigned int i = 0; i < skip; ++i)
1842 val[i] = 0;
1844 /* It's easier to handle the simple block case specially. */
1845 if (small_shift == 0)
1846 for (unsigned int i = skip; i < len; ++i)
1847 val[i] = safe_uhwi (xval, xlen, i - skip);
1848 else
1850 /* The first unfilled output block is a left shift of the first
1851 block in XVAL. The other output blocks contain bits from two
1852 consecutive input blocks. */
1853 unsigned HOST_WIDE_INT carry = 0;
1854 for (unsigned int i = skip; i < len; ++i)
1856 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1857 val[i] = (x << small_shift) | carry;
1858 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1861 return canonize (val, len, precision);
1864 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1865 number of blocks in VAL. The input has XPRECISION bits and the
1866 output has XPRECISION - SHIFT bits. */
1867 static unsigned int
1868 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1869 unsigned int xlen, unsigned int xprecision,
1870 unsigned int shift)
1872 /* Split the shift into a whole-block shift and a subblock shift. */
1873 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1874 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1876 /* Work out how many blocks are needed to store the significant bits
1877 (excluding the upper zeros or signs). */
1878 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1880 /* It's easier to handle the simple block case specially. */
1881 if (small_shift == 0)
1882 for (unsigned int i = 0; i < len; ++i)
1883 val[i] = safe_uhwi (xval, xlen, i + skip);
1884 else
1886 /* Each output block but the last is a combination of two input blocks.
1887 The last block is a right shift of the last block in XVAL. */
1888 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1889 for (unsigned int i = 0; i < len; ++i)
1891 val[i] = curr >> small_shift;
1892 curr = safe_uhwi (xval, xlen, i + skip + 1);
1893 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1896 return len;
1899 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1900 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1901 VAL has PRECISION bits. */
1902 unsigned int
1903 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1904 unsigned int xlen, unsigned int xprecision,
1905 unsigned int precision, unsigned int shift)
1907 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1909 /* The value we just created has precision XPRECISION - SHIFT.
1910 Zero-extend it to wider precisions. */
1911 if (precision > xprecision - shift)
1913 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1914 if (small_prec)
1915 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1916 else if (val[len - 1] < 0)
1918 /* Add a new block with a zero. */
1919 val[len++] = 0;
1920 return len;
1923 return canonize (val, len, precision);
1926 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1927 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1928 VAL has PRECISION bits. */
1929 unsigned int
1930 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1931 unsigned int xlen, unsigned int xprecision,
1932 unsigned int precision, unsigned int shift)
1934 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1936 /* The value we just created has precision XPRECISION - SHIFT.
1937 Sign-extend it to wider types. */
1938 if (precision > xprecision - shift)
1940 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1941 if (small_prec)
1942 val[len - 1] = sext_hwi (val[len - 1], small_prec);
1944 return canonize (val, len, precision);
1947 /* Return the number of leading (upper) zeros in X. */
1949 wi::clz (const wide_int_ref &x)
1951 /* Calculate how many bits there above the highest represented block. */
1952 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1954 unsigned HOST_WIDE_INT high = x.uhigh ();
1955 if (count < 0)
1956 /* The upper -COUNT bits of HIGH are not part of the value.
1957 Clear them out. */
1958 high = (high << -count) >> -count;
1959 else if (x.sign_mask () < 0)
1960 /* The upper bit is set, so there are no leading zeros. */
1961 return 0;
1963 /* Check whether the value is zero. */
1964 if (high == 0 && x.len == 1)
1965 return clz_zero (x.precision);
1967 /* We don't need to look below HIGH. Either HIGH is nonzero,
1968 or the top bit of the block below is nonzero; clz_hwi is
1969 HOST_BITS_PER_WIDE_INT in the latter case. */
1970 return count + clz_hwi (high);
1973 /* Return the number of redundant sign bits in X. (That is, the number
1974 of bits immediately below the sign bit that have the same value as
1975 the sign bit.) */
1977 wi::clrsb (const wide_int_ref &x)
1979 /* Calculate how many bits there above the highest represented block. */
1980 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1982 unsigned HOST_WIDE_INT high = x.uhigh ();
1983 unsigned HOST_WIDE_INT mask = -1;
1984 if (count < 0)
1986 /* The upper -COUNT bits of HIGH are not part of the value.
1987 Clear them from both MASK and HIGH. */
1988 mask >>= -count;
1989 high &= mask;
1992 /* If the top bit is 1, count the number of leading 1s. If the top
1993 bit is zero, count the number of leading zeros. */
1994 if (high > mask / 2)
1995 high ^= mask;
1997 /* There are no sign bits below the top block, so we don't need to look
1998 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
1999 HIGH is 0. */
2000 return count + clz_hwi (high) - 1;
2003 /* Return the number of trailing (lower) zeros in X. */
2005 wi::ctz (const wide_int_ref &x)
2007 if (x.len == 1 && x.ulow () == 0)
2008 return ctz_zero (x.precision);
2010 /* Having dealt with the zero case, there must be a block with a
2011 nonzero bit. We don't care about the bits above the first 1. */
2012 unsigned int i = 0;
2013 while (x.val[i] == 0)
2014 ++i;
2015 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2018 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2019 return -1. */
2021 wi::exact_log2 (const wide_int_ref &x)
2023 /* 0-precision values can only hold 0. */
2024 if (x.precision == 0)
2025 return -1;
2027 /* Reject cases where there are implicit -1 blocks above HIGH. */
2028 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2029 return -1;
2031 /* Set CRUX to the index of the entry that should be nonzero.
2032 If the top block is zero then the next lowest block (if any)
2033 must have the high bit set. */
2034 unsigned int crux = x.len - 1;
2035 if (crux > 0 && x.val[crux] == 0)
2036 crux -= 1;
2038 /* Check that all lower blocks are zero. */
2039 for (unsigned int i = 0; i < crux; ++i)
2040 if (x.val[i] != 0)
2041 return -1;
2043 /* Get a zero-extended form of block CRUX. */
2044 unsigned HOST_WIDE_INT hwi = x.val[crux];
2045 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2046 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2048 /* Now it's down to whether HWI is a power of 2. */
2049 int res = ::exact_log2 (hwi);
2050 if (res >= 0)
2051 res += crux * HOST_BITS_PER_WIDE_INT;
2052 return res;
2055 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2057 wi::floor_log2 (const wide_int_ref &x)
2059 return x.precision - 1 - clz (x);
2062 /* Return the index of the first (lowest) set bit in X, counting from 1.
2063 Return 0 if X is 0. */
2065 wi::ffs (const wide_int_ref &x)
2067 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2070 /* Return true if sign-extending X to have precision PRECISION would give
2071 the minimum signed value at that precision. */
2072 bool
2073 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2075 return ctz (x) + 1 == int (precision);
2078 /* Return true if X represents the minimum signed value. */
2079 bool
2080 wi::only_sign_bit_p (const wide_int_ref &x)
2082 return only_sign_bit_p (x, x.precision);
2086 * Private utilities.
2089 void gt_ggc_mx (widest_int *) { }
2090 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2091 void gt_pch_nx (widest_int *) { }
2094 * Private debug printing routines.
2096 #ifdef DEBUG_WIDE_INT
2097 /* The debugging routines print results of wide operations into the
2098 dump files of the respective passes in which they were called. */
2099 static char *
2100 dumpa (const HOST_WIDE_INT *val, unsigned int len, unsigned int prec, char *buf)
2102 int i;
2103 unsigned int l;
2104 const char * sep = "";
2106 l = sprintf (buf, "[%d (", prec);
2107 for (i = len - 1; i >= 0; i--)
2109 l += sprintf (&buf[l], "%s" HOST_WIDE_INT_PRINT_HEX, sep, val[i]);
2110 sep = " ";
2113 gcc_assert (len != 0);
2115 l += sprintf (&buf[l], ")]");
2117 gcc_assert (l < MAX_SIZE);
2118 return buf;
2122 #endif
2124 #if 0
2125 /* The debugging routines print results of wide operations into the
2126 dump files of the respective passes in which they were called. */
2127 char *
2128 wide_int_ro::dump (char* buf) const
2130 int i;
2131 unsigned int l;
2132 const char * sep = "";
2134 l = sprintf (buf, "[%d (", precision);
2135 for (i = len - 1; i >= 0; i--)
2137 l += sprintf (&buf[l], "%s" HOST_WIDE_INT_PRINT_HEX, sep, val[i]);
2138 sep = " ";
2141 gcc_assert (len != 0);
2143 l += sprintf (&buf[l], ")]");
2145 gcc_assert (l < MAX_SIZE);
2146 return buf;
2148 #endif