[AArch64 costs 18/18] Dump a message if we are unable to cost an insn.
[official-gcc.git] / gcc / wide-int.cc
blob69a15bcd148b3cf21c3b2d4e19cf9e1818f2aa1d
1 /* Operations with very long integers.
2 Copyright (C) 2012-2013 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hwint.h"
26 #include "wide-int.h"
27 #include "tree.h"
28 #include "dumpfile.h"
30 #if GCC_VERSION >= 3000
31 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
32 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
33 typedef unsigned HOST_WIDE_INT UWtype;
34 typedef unsigned int UQItype __attribute__ ((mode (QI)));
35 typedef unsigned int USItype __attribute__ ((mode (SI)));
36 typedef unsigned int UDItype __attribute__ ((mode (DI)));
37 #include "longlong.h"
38 #endif
40 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
43 * Internal utilities.
46 /* Quantities to deal with values that hold half of a wide int. Used
47 in multiply and divide. */
48 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
50 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
51 #define BLOCKS_NEEDED(PREC) \
52 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
53 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
55 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
56 based on the top existing bit of VAL. */
58 static unsigned HOST_WIDE_INT
59 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
61 return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
64 /* Convert the integer in VAL to canonical form, returning its new length.
65 LEN is the number of blocks currently in VAL and PRECISION is the number
66 of bits in the integer it represents.
68 This function only changes the representation, not the value. */
69 static unsigned int
70 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
72 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
73 HOST_WIDE_INT top;
74 int i;
76 if (len > blocks_needed)
77 len = blocks_needed;
79 if (len == 1)
80 return len;
82 top = val[len - 1];
83 if (len * HOST_BITS_PER_WIDE_INT > precision)
84 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
85 if (top != 0 && top != (HOST_WIDE_INT)-1)
86 return len;
88 /* At this point we know that the top is either 0 or -1. Find the
89 first block that is not a copy of this. */
90 for (i = len - 2; i >= 0; i--)
92 HOST_WIDE_INT x = val[i];
93 if (x != top)
95 if (SIGN_MASK (x) == top)
96 return i + 1;
98 /* We need an extra block because the top bit block i does
99 not match the extension. */
100 return i + 2;
104 /* The number is 0 or -1. */
105 return 1;
109 * Conversion routines in and out of wide_int.
112 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
113 result for an integer with precision PRECISION. Return the length
114 of VAL (after any canonization. */
115 unsigned int
116 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
117 unsigned int xlen, unsigned int precision, bool need_canon)
119 for (unsigned i = 0; i < xlen; i++)
120 val[i] = xval[i];
121 return need_canon ? canonize (val, xlen, precision) : xlen;
124 /* Construct a wide int from a buffer of length LEN. BUFFER will be
125 read according to byte endianess and word endianess of the target.
126 Only the lower BUFFER_LEN bytes of the result are set; the remaining
127 high bytes are cleared. */
128 wide_int
129 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
131 unsigned int precision = buffer_len * BITS_PER_UNIT;
132 wide_int result = wide_int::create (precision);
133 unsigned int words = buffer_len / UNITS_PER_WORD;
135 /* We have to clear all the bits ourself, as we merely or in values
136 below. */
137 unsigned int len = BLOCKS_NEEDED (precision);
138 HOST_WIDE_INT *val = result.write_val ();
139 for (unsigned int i = 0; i < len; ++i)
140 val[i] = 0;
142 for (unsigned int byte = 0; byte < buffer_len; byte++)
144 unsigned int offset;
145 unsigned int index;
146 unsigned int bitpos = byte * BITS_PER_UNIT;
147 unsigned HOST_WIDE_INT value;
149 if (buffer_len > UNITS_PER_WORD)
151 unsigned int word = byte / UNITS_PER_WORD;
153 if (WORDS_BIG_ENDIAN)
154 word = (words - 1) - word;
156 offset = word * UNITS_PER_WORD;
158 if (BYTES_BIG_ENDIAN)
159 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
160 else
161 offset += byte % UNITS_PER_WORD;
163 else
164 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
166 value = (unsigned HOST_WIDE_INT) buffer[offset];
168 index = bitpos / HOST_BITS_PER_WIDE_INT;
169 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
172 result.set_len (canonize (val, len, precision));
174 return result;
177 /* Sets RESULT from X, the sign is taken according to SGN. */
178 void
179 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
181 int len = x.get_len ();
182 const HOST_WIDE_INT *v = x.get_val ();
183 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
185 if (wi::neg_p (x, sgn))
187 /* We use ones complement to avoid -x80..0 edge case that -
188 won't work on. */
189 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
190 for (int i = 0; i < len; i++)
191 t[i] = ~v[i];
192 if (excess > 0)
193 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
194 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
195 mpz_com (result, result);
197 else if (excess > 0)
199 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
200 for (int i = 0; i < len - 1; i++)
201 t[i] = v[i];
202 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
203 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
205 else
206 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
209 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
210 values of VAL will be wrapped; otherwise, they will be set to the
211 appropriate minimum or maximum TYPE bound. */
212 wide_int
213 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
215 size_t count, numb;
216 int prec = TYPE_PRECISION (type);
217 wide_int res = wide_int::create (prec);
219 if (!wrap)
221 mpz_t min, max;
223 mpz_init (min);
224 mpz_init (max);
225 get_type_static_bounds (type, min, max);
227 if (mpz_cmp (x, min) < 0)
228 mpz_set (x, min);
229 else if (mpz_cmp (x, max) > 0)
230 mpz_set (x, max);
232 mpz_clear (min);
233 mpz_clear (max);
236 /* Determine the number of unsigned HOST_WIDE_INTs that are required
237 for representing the value. The code to calculate count is
238 extracted from the GMP manual, section "Integer Import and Export":
239 http://gmplib.org/manual/Integer-Import-and-Export.html */
240 numb = 8 * sizeof(HOST_WIDE_INT);
241 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
242 HOST_WIDE_INT *val = res.write_val ();
243 mpz_export (val, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
244 if (count < 1)
246 val[0] = 0;
247 count = 1;
249 res.set_len (count);
251 if (mpz_sgn (x) < 0)
252 res = -res;
254 return res;
258 * Largest and smallest values in a mode.
261 /* Return the largest SGNed number that is representable in PRECISION bits.
263 TODO: There is still code from the double_int era that trys to
264 make up for the fact that double int's could not represent the
265 min and max values of all types. This code should be removed
266 because the min and max values can always be represented in
267 wide_ints and int-csts. */
268 wide_int
269 wi::max_value (unsigned int precision, signop sgn)
271 gcc_checking_assert (precision != 0);
272 if (sgn == UNSIGNED)
273 /* The unsigned max is just all ones. */
274 return shwi (-1, precision);
275 else
276 /* The signed max is all ones except the top bit. This must be
277 explicitly represented. */
278 return mask (precision - 1, false, precision);
281 /* Return the largest SGNed number that is representable in PRECISION bits. */
282 wide_int
283 wi::min_value (unsigned int precision, signop sgn)
285 gcc_checking_assert (precision != 0);
286 if (sgn == UNSIGNED)
287 return uhwi (0, precision);
288 else
289 /* The signed min is all zeros except the top bit. This must be
290 explicitly represented. */
291 return wi::set_bit_in_zero (precision - 1, precision);
295 * Public utilities.
298 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
299 signedness SGN, to an integer that has PRECISION bits. Store the blocks
300 in VAL and return the number of blocks used.
302 This function can handle both extension (PRECISION > XPRECISION)
303 and truncation (PRECISION < XPRECISION). */
304 unsigned int
305 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
306 unsigned int xlen, unsigned int xprecision,
307 unsigned int precision, signop sgn)
309 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
310 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
311 for (unsigned i = 0; i < len; i++)
312 val[i] = xval[i];
314 if (precision > xprecision)
316 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
318 /* Expanding. */
319 if (sgn == UNSIGNED)
321 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
322 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
323 else if (val[len - 1] < 0)
325 while (len < BLOCKS_NEEDED (xprecision))
326 val[len++] = -1;
327 if (small_xprecision)
328 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
329 else
330 val[len++] = 0;
333 else
335 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
336 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
339 len = canonize (val, len, precision);
341 return len;
344 /* This function hides the fact that we cannot rely on the bits beyond
345 the precision. This issue comes up in the relational comparisions
346 where we do allow comparisons of values of different precisions. */
347 static inline HOST_WIDE_INT
348 selt (const HOST_WIDE_INT *a, unsigned int len,
349 unsigned int blocks_needed, unsigned int small_prec,
350 unsigned int index, signop sgn)
352 HOST_WIDE_INT val;
353 if (index < len)
354 val = a[index];
355 else if (index < blocks_needed || sgn == SIGNED)
356 /* Signed or within the precision. */
357 val = SIGN_MASK (a[len - 1]);
358 else
359 /* Unsigned extension beyond the precision. */
360 val = 0;
362 if (small_prec && index == blocks_needed - 1)
363 return (sgn == SIGNED
364 ? sext_hwi (val, small_prec)
365 : zext_hwi (val, small_prec));
366 else
367 return val;
370 /* Find the highest bit represented in a wide int. This will in
371 general have the same value as the sign bit. */
372 static inline HOST_WIDE_INT
373 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
375 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
376 unsigned HOST_WIDE_INT val = a[len - 1];
377 if (excess > 0)
378 val <<= excess;
379 return val >> (HOST_BITS_PER_WIDE_INT - 1);
383 * Comparisons, note that only equality is an operator. The other
384 * comparisons cannot be operators since they are inherently signed or
385 * unsigned and C++ has no such operators.
388 /* Return true if OP0 == OP1. */
389 bool
390 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
391 const HOST_WIDE_INT *op1, unsigned int op1len,
392 unsigned int prec)
394 int l0 = op0len - 1;
395 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
397 if (op0len != op1len)
398 return false;
400 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
402 /* It does not matter if we zext or sext here, we just have to
403 do both the same way. */
404 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
405 return false;
406 l0--;
409 while (l0 >= 0)
410 if (op0[l0] != op1[l0])
411 return false;
412 else
413 l0--;
415 return true;
418 /* Return true if OP0 < OP1 using signed comparisons. */
419 bool
420 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
421 unsigned int precision,
422 const HOST_WIDE_INT *op1, unsigned int op1len)
424 HOST_WIDE_INT s0, s1;
425 unsigned HOST_WIDE_INT u0, u1;
426 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
427 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
428 int l = MAX (op0len - 1, op1len - 1);
430 /* Only the top block is compared as signed. The rest are unsigned
431 comparisons. */
432 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
433 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
434 if (s0 < s1)
435 return true;
436 if (s0 > s1)
437 return false;
439 l--;
440 while (l >= 0)
442 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
443 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
445 if (u0 < u1)
446 return true;
447 if (u0 > u1)
448 return false;
449 l--;
452 return false;
455 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
456 signed compares. */
458 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
459 unsigned int precision,
460 const HOST_WIDE_INT *op1, unsigned int op1len)
462 HOST_WIDE_INT s0, s1;
463 unsigned HOST_WIDE_INT u0, u1;
464 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
465 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
466 int l = MAX (op0len - 1, op1len - 1);
468 /* Only the top block is compared as signed. The rest are unsigned
469 comparisons. */
470 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
471 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
472 if (s0 < s1)
473 return -1;
474 if (s0 > s1)
475 return 1;
477 l--;
478 while (l >= 0)
480 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
481 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
483 if (u0 < u1)
484 return -1;
485 if (u0 > u1)
486 return 1;
487 l--;
490 return 0;
493 /* Return true if OP0 < OP1 using unsigned comparisons. */
494 bool
495 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
496 unsigned int precision,
497 const HOST_WIDE_INT *op1, unsigned int op1len)
499 unsigned HOST_WIDE_INT x0;
500 unsigned HOST_WIDE_INT x1;
501 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
502 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
503 int l = MAX (op0len - 1, op1len - 1);
505 while (l >= 0)
507 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
508 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
509 if (x0 < x1)
510 return true;
511 if (x0 > x1)
512 return false;
513 l--;
516 return false;
519 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
520 unsigned compares. */
522 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
523 unsigned int precision,
524 const HOST_WIDE_INT *op1, unsigned int op1len)
526 unsigned HOST_WIDE_INT x0;
527 unsigned HOST_WIDE_INT x1;
528 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
529 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
530 int l = MAX (op0len - 1, op1len - 1);
532 while (l >= 0)
534 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
535 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
536 if (x0 < x1)
537 return -1;
538 if (x0 > x1)
539 return 1;
540 l--;
543 return 0;
547 * Extension.
550 /* Sign-extend the number represented by XVAL and XLEN into VAL,
551 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
552 and VAL have PRECISION bits. */
553 unsigned int
554 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
555 unsigned int xlen, unsigned int precision, unsigned int offset)
557 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
558 /* Extending beyond the precision is a no-op. If we have only stored
559 OFFSET bits or fewer, the rest are already signs. */
560 if (offset >= precision || len >= xlen)
562 for (unsigned i = 0; i < xlen; ++i)
563 val[i] = xval[i];
564 return xlen;
566 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
567 for (unsigned int i = 0; i < len; i++)
568 val[i] = xval[i];
569 if (suboffset > 0)
571 val[len] = sext_hwi (xval[len], suboffset);
572 len += 1;
574 return canonize (val, len, precision);
577 /* Zero-extend the number represented by XVAL and XLEN into VAL,
578 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
579 and VAL have PRECISION bits. */
580 unsigned int
581 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
582 unsigned int xlen, unsigned int precision, unsigned int offset)
584 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
585 /* Extending beyond the precision is a no-op. If we have only stored
586 OFFSET bits or fewer, and the upper stored bit is zero, then there
587 is nothing to do. */
588 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
590 for (unsigned i = 0; i < xlen; ++i)
591 val[i] = xval[i];
592 return xlen;
594 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
595 for (unsigned int i = 0; i < len; i++)
596 val[i] = i < xlen ? xval[i] : -1;
597 if (suboffset > 0)
598 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
599 else
600 val[len] = 0;
601 return canonize (val, len + 1, precision);
605 * Masking, inserting, shifting, rotating.
608 /* Insert WIDTH bits from Y into X starting at START. */
609 wide_int
610 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
611 unsigned int width)
613 wide_int result;
614 wide_int mask;
615 wide_int tmp;
617 unsigned int precision = x.get_precision ();
618 if (start >= precision)
619 return x;
621 gcc_checking_assert (precision >= width);
623 if (start + width >= precision)
624 width = precision - start;
626 mask = wi::shifted_mask (start, width, false, precision);
627 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
628 result = tmp & mask;
630 tmp = wi::bit_and_not (x, mask);
631 result = result | tmp;
633 return result;
636 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
637 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
638 bits. */
639 unsigned int
640 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
641 unsigned int xlen, unsigned int precision, unsigned int bit)
643 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
644 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
646 if (block + 1 >= xlen)
648 /* The operation either affects the last current block or needs
649 a new block. */
650 unsigned int len = block + 1;
651 for (unsigned int i = 0; i < len; i++)
652 val[i] = safe_uhwi (xval, xlen, i);
653 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
655 /* If the bit we just set is at the msb of the block, make sure
656 that any higher bits are zeros. */
657 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
658 val[len++] = 0;
659 return len;
661 else
663 for (unsigned int i = 0; i < xlen; i++)
664 val[i] = xval[i];
665 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
666 return canonize (val, xlen, precision);
670 /* bswap THIS. */
671 wide_int
672 wide_int_storage::bswap () const
674 wide_int result = wide_int::create (precision);
675 unsigned int i, s;
676 unsigned int len = BLOCKS_NEEDED (precision);
677 unsigned int xlen = get_len ();
678 const HOST_WIDE_INT *xval = get_val ();
679 HOST_WIDE_INT *val = result.write_val ();
681 /* This is not a well defined operation if the precision is not a
682 multiple of 8. */
683 gcc_assert ((precision & 0x7) == 0);
685 for (i = 0; i < len; i++)
686 val[i] = 0;
688 /* Only swap the bytes that are not the padding. */
689 for (s = 0; s < precision; s += 8)
691 unsigned int d = precision - s - 8;
692 unsigned HOST_WIDE_INT byte;
694 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
695 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
697 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
699 block = d / HOST_BITS_PER_WIDE_INT;
700 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
702 val[block] |= byte << offset;
705 result.set_len (canonize (val, len, precision));
706 return result;
709 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
710 above that up to PREC are zeros. The result is inverted if NEGATE
711 is true. Return the number of blocks in VAL. */
712 unsigned int
713 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
714 unsigned int prec)
716 if (width >= prec)
718 val[0] = negate ? 0 : -1;
719 return 1;
721 else if (width == 0)
723 val[0] = negate ? -1 : 0;
724 return 1;
727 unsigned int i = 0;
728 while (i < width / HOST_BITS_PER_WIDE_INT)
729 val[i++] = negate ? 0 : -1;
731 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
732 if (shift != 0)
734 HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
735 val[i++] = negate ? ~last : last;
737 else
738 val[i++] = negate ? -1 : 0;
740 return i;
743 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
744 bits are ones, and the bits above that up to PREC are zeros. The result
745 is inverted if NEGATE is true. Return the number of blocks in VAL. */
746 unsigned int
747 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
748 bool negate, unsigned int prec)
750 if (start >= prec || width == 0)
752 val[0] = negate ? -1 : 0;
753 return 1;
756 if (width > prec - start)
757 width = prec - start;
758 unsigned int end = start + width;
760 unsigned int i = 0;
761 while (i < start / HOST_BITS_PER_WIDE_INT)
762 val[i++] = negate ? -1 : 0;
764 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
765 if (shift)
767 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
768 shift += width;
769 if (shift < HOST_BITS_PER_WIDE_INT)
771 /* case 000111000 */
772 block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
773 val[i++] = negate ? ~block : block;
774 return i;
776 else
777 /* ...111000 */
778 val[i++] = negate ? block : ~block;
781 while (i < end / HOST_BITS_PER_WIDE_INT)
782 /* 1111111 */
783 val[i++] = negate ? 0 : -1;
785 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
786 if (shift != 0)
788 /* 000011111 */
789 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
790 val[i++] = negate ? ~block : block;
792 else if (end < prec)
793 val[i++] = negate ? -1 : 0;
795 return i;
799 * logical operations.
802 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
803 unsigned int
804 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
805 unsigned int op0len, const HOST_WIDE_INT *op1,
806 unsigned int op1len, unsigned int prec)
808 int l0 = op0len - 1;
809 int l1 = op1len - 1;
810 bool need_canon = true;
812 unsigned int len = MAX (op0len, op1len);
813 if (l0 > l1)
815 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
816 if (op1mask == 0)
818 l0 = l1;
819 len = l1 + 1;
821 else
823 need_canon = false;
824 while (l0 > l1)
826 val[l0] = op0[l0];
827 l0--;
831 else if (l1 > l0)
833 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
834 if (op0mask == 0)
835 len = l0 + 1;
836 else
838 need_canon = false;
839 while (l1 > l0)
841 val[l1] = op1[l1];
842 l1--;
847 while (l0 >= 0)
849 val[l0] = op0[l0] & op1[l0];
850 l0--;
853 if (need_canon)
854 len = canonize (val, len, prec);
856 return len;
859 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
860 unsigned int
861 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
862 unsigned int op0len, const HOST_WIDE_INT *op1,
863 unsigned int op1len, unsigned int prec)
865 wide_int result;
866 int l0 = op0len - 1;
867 int l1 = op1len - 1;
868 bool need_canon = true;
870 unsigned int len = MAX (op0len, op1len);
871 if (l0 > l1)
873 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
874 if (op1mask != 0)
876 l0 = l1;
877 len = l1 + 1;
879 else
881 need_canon = false;
882 while (l0 > l1)
884 val[l0] = op0[l0];
885 l0--;
889 else if (l1 > l0)
891 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
892 if (op0mask == 0)
893 len = l0 + 1;
894 else
896 need_canon = false;
897 while (l1 > l0)
899 val[l1] = ~op1[l1];
900 l1--;
905 while (l0 >= 0)
907 val[l0] = op0[l0] & ~op1[l0];
908 l0--;
911 if (need_canon)
912 len = canonize (val, len, prec);
914 return len;
917 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
918 unsigned int
919 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
920 unsigned int op0len, const HOST_WIDE_INT *op1,
921 unsigned int op1len, unsigned int prec)
923 wide_int result;
924 int l0 = op0len - 1;
925 int l1 = op1len - 1;
926 bool need_canon = true;
928 unsigned int len = MAX (op0len, op1len);
929 if (l0 > l1)
931 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
932 if (op1mask != 0)
934 l0 = l1;
935 len = l1 + 1;
937 else
939 need_canon = false;
940 while (l0 > l1)
942 val[l0] = op0[l0];
943 l0--;
947 else if (l1 > l0)
949 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
950 if (op0mask != 0)
951 len = l0 + 1;
952 else
954 need_canon = false;
955 while (l1 > l0)
957 val[l1] = op1[l1];
958 l1--;
963 while (l0 >= 0)
965 val[l0] = op0[l0] | op1[l0];
966 l0--;
969 if (need_canon)
970 len = canonize (val, len, prec);
972 return len;
975 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
976 unsigned int
977 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
978 unsigned int op0len, const HOST_WIDE_INT *op1,
979 unsigned int op1len, unsigned int prec)
981 wide_int result;
982 int l0 = op0len - 1;
983 int l1 = op1len - 1;
984 bool need_canon = true;
986 unsigned int len = MAX (op0len, op1len);
987 if (l0 > l1)
989 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
990 if (op1mask == 0)
992 l0 = l1;
993 len = l1 + 1;
995 else
997 need_canon = false;
998 while (l0 > l1)
1000 val[l0] = op0[l0];
1001 l0--;
1005 else if (l1 > l0)
1007 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1008 if (op0mask != 0)
1009 len = l0 + 1;
1010 else
1012 need_canon = false;
1013 while (l1 > l0)
1015 val[l1] = ~op1[l1];
1016 l1--;
1021 while (l0 >= 0)
1023 val[l0] = op0[l0] | ~op1[l0];
1024 l0--;
1027 if (need_canon)
1028 len = canonize (val, len, prec);
1030 return len;
1033 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1034 unsigned int
1035 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1036 unsigned int op0len, const HOST_WIDE_INT *op1,
1037 unsigned int op1len, unsigned int prec)
1039 wide_int result;
1040 int l0 = op0len - 1;
1041 int l1 = op1len - 1;
1043 unsigned int len = MAX (op0len, op1len);
1044 if (l0 > l1)
1046 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1047 while (l0 > l1)
1049 val[l0] = op0[l0] ^ op1mask;
1050 l0--;
1054 if (l1 > l0)
1056 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1057 while (l1 > l0)
1059 val[l1] = op0mask ^ op1[l1];
1060 l1--;
1064 while (l0 >= 0)
1066 val[l0] = op0[l0] ^ op1[l0];
1067 l0--;
1070 return canonize (val, len, prec);
1074 * math
1077 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1078 whether the result overflows when OP0 and OP1 are treated as having
1079 signedness SGN. Return the number of blocks in VAL. */
1080 unsigned int
1081 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1082 unsigned int op0len, const HOST_WIDE_INT *op1,
1083 unsigned int op1len, unsigned int prec,
1084 signop sgn, bool *overflow)
1086 unsigned HOST_WIDE_INT o0 = 0;
1087 unsigned HOST_WIDE_INT o1 = 0;
1088 unsigned HOST_WIDE_INT x = 0;
1089 unsigned HOST_WIDE_INT carry = 0;
1090 unsigned HOST_WIDE_INT old_carry = 0;
1091 unsigned HOST_WIDE_INT mask0, mask1;
1092 unsigned int i;
1094 unsigned int len = MAX (op0len, op1len);
1095 mask0 = -top_bit_of (op0, op0len, prec);
1096 mask1 = -top_bit_of (op1, op1len, prec);
1097 /* Add all of the explicitly defined elements. */
1099 for (i = 0; i < len; i++)
1101 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1102 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1103 x = o0 + o1 + carry;
1104 val[i] = x;
1105 old_carry = carry;
1106 carry = carry == 0 ? x < o0 : x <= o0;
1109 if (len * HOST_BITS_PER_WIDE_INT < prec)
1111 val[len] = mask0 + mask1 + carry;
1112 len++;
1113 if (overflow)
1114 *overflow = false;
1116 else if (overflow)
1118 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1119 if (sgn == SIGNED)
1121 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1122 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1124 else
1126 /* Put the MSB of X and O0 and in the top of the HWI. */
1127 x <<= shift;
1128 o0 <<= shift;
1129 if (old_carry)
1130 *overflow = (x <= o0);
1131 else
1132 *overflow = (x < o0);
1136 return canonize (val, len, prec);
1139 /* Subroutines of the multiplication and division operations. Unpack
1140 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1141 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1142 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1143 static void
1144 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1145 unsigned int in_len, unsigned int out_len,
1146 unsigned int prec, signop sgn)
1148 unsigned int i;
1149 unsigned int j = 0;
1150 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1151 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1152 HOST_WIDE_INT mask;
1154 if (sgn == SIGNED)
1156 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1157 mask &= HALF_INT_MASK;
1159 else
1160 mask = 0;
1162 for (i = 0; i < blocks_needed - 1; i++)
1164 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1165 result[j++] = x;
1166 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1169 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1170 if (small_prec)
1172 if (sgn == SIGNED)
1173 x = sext_hwi (x, small_prec);
1174 else
1175 x = zext_hwi (x, small_prec);
1177 result[j++] = x;
1178 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1180 /* Smear the sign bit. */
1181 while (j < out_len)
1182 result[j++] = mask;
1185 /* The inverse of wi_unpack. IN_LEN is the the number of input
1186 blocks. The number of output blocks will be half this amount. */
1187 static void
1188 wi_pack (unsigned HOST_WIDE_INT *result,
1189 const unsigned HOST_HALF_WIDE_INT *input,
1190 unsigned int in_len)
1192 unsigned int i = 0;
1193 unsigned int j = 0;
1195 while (i + 2 < in_len)
1197 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1198 | ((unsigned HOST_WIDE_INT)input[i + 1]
1199 << HOST_BITS_PER_HALF_WIDE_INT);
1200 i += 2;
1203 /* Handle the case where in_len is odd. For this we zero extend. */
1204 if (in_len & 1)
1205 result[j++] = (unsigned HOST_WIDE_INT)input[i];
1206 else
1207 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1208 | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT);
1211 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1212 result is returned.
1214 If HIGH is not set, throw away the upper half after the check is
1215 made to see if it overflows. Unfortunately there is no better way
1216 to check for overflow than to do this. If OVERFLOW is nonnull,
1217 record in *OVERFLOW whether the result overflowed. SGN controls
1218 the signedness and is used to check overflow or if HIGH is set. */
1219 unsigned int
1220 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1221 unsigned int op1len, const HOST_WIDE_INT *op2val,
1222 unsigned int op2len, unsigned int prec, signop sgn,
1223 bool *overflow, bool high)
1225 unsigned HOST_WIDE_INT o0, o1, k, t;
1226 unsigned int i;
1227 unsigned int j;
1228 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1229 unsigned int half_blocks_needed = blocks_needed * 2;
1230 /* The sizes here are scaled to support a 2x largest mode by 2x
1231 largest mode yielding a 4x largest mode result. This is what is
1232 needed by vpn. */
1234 unsigned HOST_HALF_WIDE_INT
1235 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1236 unsigned HOST_HALF_WIDE_INT
1237 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1238 /* The '2' in 'R' is because we are internally doing a full
1239 multiply. */
1240 unsigned HOST_HALF_WIDE_INT
1241 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1242 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1244 /* If the top level routine did not really pass in an overflow, then
1245 just make sure that we never attempt to set it. */
1246 bool needs_overflow = (overflow != 0);
1247 if (needs_overflow)
1248 *overflow = false;
1250 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1251 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1253 /* This is a surprisingly common case, so do it first. */
1254 if (op1 == 0 || op2 == 0)
1256 val[0] = 0;
1257 return 1;
1260 #ifdef umul_ppmm
1261 if (sgn == UNSIGNED)
1263 /* If the inputs are single HWIs and the output has room for at
1264 least two HWIs, we can use umul_ppmm directly. */
1265 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1266 && wi::fits_uhwi_p (op1)
1267 && wi::fits_uhwi_p (op2))
1269 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1270 return 1 + (val[1] != 0 || val[0] < 0);
1272 /* Likewise if the output is a full single HWI, except that the
1273 upper HWI of the result is only used for determining overflow.
1274 (We handle this case inline when overflow isn't needed.) */
1275 else if (prec == HOST_BITS_PER_WIDE_INT)
1277 unsigned HOST_WIDE_INT upper;
1278 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1279 if (needs_overflow)
1280 *overflow = (upper != 0);
1281 return 1;
1284 #endif
1286 /* Handle multiplications by 1. */
1287 if (op1 == 1)
1289 for (i = 0; i < op2len; i++)
1290 val[i] = op2val[i];
1291 return op2len;
1293 if (op2 == 1)
1295 for (i = 0; i < op1len; i++)
1296 val[i] = op1val[i];
1297 return op1len;
1300 /* If we need to check for overflow, we can only do half wide
1301 multiplies quickly because we need to look at the top bits to
1302 check for the overflow. */
1303 if ((high || needs_overflow)
1304 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1306 unsigned HOST_WIDE_INT r;
1308 if (sgn == SIGNED)
1310 o0 = op1.to_shwi ();
1311 o1 = op2.to_shwi ();
1313 else
1315 o0 = op1.to_uhwi ();
1316 o1 = op2.to_uhwi ();
1319 r = o0 * o1;
1320 if (needs_overflow)
1322 if (sgn == SIGNED)
1324 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1325 *overflow = true;
1327 else
1329 if ((r >> prec) != 0)
1330 *overflow = true;
1333 val[0] = high ? r >> prec : r;
1334 return 1;
1337 /* We do unsigned mul and then correct it. */
1338 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1339 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1341 /* The 2 is for a full mult. */
1342 memset (r, 0, half_blocks_needed * 2
1343 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1345 for (j = 0; j < half_blocks_needed; j++)
1347 k = 0;
1348 for (i = 0; i < half_blocks_needed; i++)
1350 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1351 + r[i + j] + k);
1352 r[i + j] = t & HALF_INT_MASK;
1353 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1355 r[j + half_blocks_needed] = k;
1358 /* We did unsigned math above. For signed we must adjust the
1359 product (assuming we need to see that). */
1360 if (sgn == SIGNED && (high || needs_overflow))
1362 unsigned HOST_WIDE_INT b;
1363 if (wi::neg_p (op1))
1365 b = 0;
1366 for (i = 0; i < half_blocks_needed; i++)
1368 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1369 - (unsigned HOST_WIDE_INT)v[i] - b;
1370 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1371 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1374 if (wi::neg_p (op2))
1376 b = 0;
1377 for (i = 0; i < half_blocks_needed; i++)
1379 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1380 - (unsigned HOST_WIDE_INT)u[i] - b;
1381 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1382 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1387 if (needs_overflow)
1389 HOST_WIDE_INT top;
1391 /* For unsigned, overflow is true if any of the top bits are set.
1392 For signed, overflow is true if any of the top bits are not equal
1393 to the sign bit. */
1394 if (sgn == UNSIGNED)
1395 top = 0;
1396 else
1398 top = r[(half_blocks_needed) - 1];
1399 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1400 top &= mask;
1403 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1404 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1405 *overflow = true;
1408 if (high)
1410 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1411 wi_pack ((unsigned HOST_WIDE_INT *) val,
1412 &r[half_blocks_needed], half_blocks_needed);
1413 return canonize (val, blocks_needed, prec);
1415 else
1417 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1418 wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed);
1419 return canonize (val, blocks_needed, prec);
1423 /* Compute the population count of X. */
1425 wi::popcount (const wide_int_ref &x)
1427 unsigned int i;
1428 int count;
1430 /* The high order block is special if it is the last block and the
1431 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1432 have to clear out any ones above the precision before doing
1433 popcount on this block. */
1434 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1435 unsigned int stop = x.len;
1436 if (count < 0)
1438 count = popcount_hwi (x.uhigh () << -count);
1439 stop -= 1;
1441 else
1443 if (x.sign_mask () >= 0)
1444 count = 0;
1447 for (i = 0; i < stop; ++i)
1448 count += popcount_hwi (x.val[i]);
1450 return count;
1453 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1454 whether the result overflows when OP0 and OP1 are treated as having
1455 signedness SGN. Return the number of blocks in VAL. */
1456 unsigned int
1457 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1458 unsigned int op0len, const HOST_WIDE_INT *op1,
1459 unsigned int op1len, unsigned int prec,
1460 signop sgn, bool *overflow)
1462 unsigned HOST_WIDE_INT o0 = 0;
1463 unsigned HOST_WIDE_INT o1 = 0;
1464 unsigned HOST_WIDE_INT x = 0;
1465 /* We implement subtraction as an in place negate and add. Negation
1466 is just inversion and add 1, so we can do the add of 1 by just
1467 starting the borrow in of the first element at 1. */
1468 unsigned HOST_WIDE_INT borrow = 0;
1469 unsigned HOST_WIDE_INT old_borrow = 0;
1471 unsigned HOST_WIDE_INT mask0, mask1;
1472 unsigned int i;
1474 unsigned int len = MAX (op0len, op1len);
1475 mask0 = -top_bit_of (op0, op0len, prec);
1476 mask1 = -top_bit_of (op1, op1len, prec);
1478 /* Subtract all of the explicitly defined elements. */
1479 for (i = 0; i < len; i++)
1481 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1482 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1483 x = o0 - o1 - borrow;
1484 val[i] = x;
1485 old_borrow = borrow;
1486 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1489 if (len * HOST_BITS_PER_WIDE_INT < prec)
1491 val[len] = mask0 - mask1 - borrow;
1492 len++;
1493 if (overflow)
1494 *overflow = false;
1496 else if (overflow)
1498 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1499 if (sgn == SIGNED)
1501 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1502 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1504 else
1506 /* Put the MSB of X and O0 and in the top of the HWI. */
1507 x <<= shift;
1508 o0 <<= shift;
1509 if (old_borrow)
1510 *overflow = (x >= o0);
1511 else
1512 *overflow = (x > o0);
1516 return canonize (val, len, prec);
1521 * Division and Mod
1524 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1525 algorithm is a small modification of the algorithm in Hacker's
1526 Delight by Warren, which itself is a small modification of Knuth's
1527 algorithm. M is the number of significant elements of U however
1528 there needs to be at least one extra element of B_DIVIDEND
1529 allocated, N is the number of elements of B_DIVISOR. */
1530 static void
1531 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1532 unsigned HOST_HALF_WIDE_INT *b_remainder,
1533 unsigned HOST_HALF_WIDE_INT *b_dividend,
1534 unsigned HOST_HALF_WIDE_INT *b_divisor,
1535 int m, int n)
1537 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1538 HOST_WIDE_INT and stored in the lower bits of each word. This
1539 algorithm should work properly on both 32 and 64 bit
1540 machines. */
1541 unsigned HOST_WIDE_INT b
1542 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1543 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1544 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1545 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1546 HOST_WIDE_INT t, k;
1547 int i, j, s;
1549 /* Single digit divisor. */
1550 if (n == 1)
1552 k = 0;
1553 for (j = m - 1; j >= 0; j--)
1555 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1556 k = ((k * b + b_dividend[j])
1557 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1558 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1560 b_remainder[0] = k;
1561 return;
1564 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1566 if (s)
1568 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1569 algorithm, we can overwrite b_dividend and b_divisor, so we do
1570 that. */
1571 for (i = n - 1; i > 0; i--)
1572 b_divisor[i] = (b_divisor[i] << s)
1573 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1574 b_divisor[0] = b_divisor[0] << s;
1576 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1577 for (i = m - 1; i > 0; i--)
1578 b_dividend[i] = (b_dividend[i] << s)
1579 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1580 b_dividend[0] = b_dividend[0] << s;
1583 /* Main loop. */
1584 for (j = m - n; j >= 0; j--)
1586 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1587 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1588 again:
1589 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1591 qhat -= 1;
1592 rhat += b_divisor[n-1];
1593 if (rhat < b)
1594 goto again;
1597 /* Multiply and subtract. */
1598 k = 0;
1599 for (i = 0; i < n; i++)
1601 p = qhat * b_divisor[i];
1602 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1603 b_dividend[i + j] = t;
1604 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1605 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1607 t = b_dividend[j+n] - k;
1608 b_dividend[j+n] = t;
1610 b_quotient[j] = qhat;
1611 if (t < 0)
1613 b_quotient[j] -= 1;
1614 k = 0;
1615 for (i = 0; i < n; i++)
1617 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1618 b_dividend[i+j] = t;
1619 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1621 b_dividend[j+n] += k;
1624 if (s)
1625 for (i = 0; i < n; i++)
1626 b_remainder[i] = (b_dividend[i] >> s)
1627 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1628 else
1629 for (i = 0; i < n; i++)
1630 b_remainder[i] = b_dividend[i];
1634 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1635 the result. If QUOTIENT is nonnull, store the value of the quotient
1636 there and return the number of blocks in it. The return value is
1637 not defined otherwise. If REMAINDER is nonnull, store the value
1638 of the remainder there and store the number of blocks in
1639 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1640 the division overflowed. */
1641 unsigned int
1642 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1643 HOST_WIDE_INT *remainder,
1644 const HOST_WIDE_INT *dividend_val,
1645 unsigned int dividend_len, unsigned int dividend_prec,
1646 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1647 unsigned int divisor_prec, signop sgn,
1648 bool *oflow)
1650 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1651 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1652 unsigned HOST_HALF_WIDE_INT
1653 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1654 unsigned HOST_HALF_WIDE_INT
1655 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1656 unsigned HOST_HALF_WIDE_INT
1657 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1658 unsigned HOST_HALF_WIDE_INT
1659 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1660 unsigned int m, n;
1661 bool dividend_neg = false;
1662 bool divisor_neg = false;
1663 bool overflow = false;
1664 wide_int neg_dividend, neg_divisor;
1666 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1667 dividend_prec);
1668 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1669 divisor_prec);
1670 if (divisor == 0)
1671 overflow = true;
1673 /* The smallest signed number / -1 causes overflow. The dividend_len
1674 check is for speed rather than correctness. */
1675 if (sgn == SIGNED
1676 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1677 && divisor == -1
1678 && wi::only_sign_bit_p (dividend))
1679 overflow = true;
1681 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1682 (signed min / -1) has the same representation as the orignal dividend.
1683 We have traditionally made division by zero act as division by one,
1684 so there too we use the original dividend. */
1685 if (overflow)
1687 if (remainder)
1689 *remainder_len = 1;
1690 remainder[0] = 0;
1692 if (oflow != 0)
1693 *oflow = true;
1694 if (quotient)
1695 for (unsigned int i = 0; i < dividend_len; ++i)
1696 quotient[i] = dividend_val[i];
1697 return dividend_len;
1700 if (oflow)
1701 *oflow = false;
1703 /* Do it on the host if you can. */
1704 if (sgn == SIGNED
1705 && wi::fits_shwi_p (dividend)
1706 && wi::fits_shwi_p (divisor))
1708 HOST_WIDE_INT o0 = dividend.to_shwi ();
1709 HOST_WIDE_INT o1 = divisor.to_shwi ();
1711 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1713 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1714 if (quotient)
1716 quotient[0] = HOST_WIDE_INT_MIN;
1717 quotient[1] = 0;
1719 if (remainder)
1721 remainder[0] = 0;
1722 *remainder_len = 1;
1724 return 2;
1726 else
1728 if (quotient)
1729 quotient[0] = o0 / o1;
1730 if (remainder)
1732 remainder[0] = o0 % o1;
1733 *remainder_len = 1;
1735 return 1;
1739 if (sgn == UNSIGNED
1740 && wi::fits_uhwi_p (dividend)
1741 && wi::fits_uhwi_p (divisor))
1743 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1744 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1746 if (quotient)
1747 quotient[0] = o0 / o1;
1748 if (remainder)
1750 remainder[0] = o0 % o1;
1751 *remainder_len = 1;
1753 return 1;
1756 /* Make the divisor and dividend positive and remember what we
1757 did. */
1758 if (sgn == SIGNED)
1760 if (wi::neg_p (dividend))
1762 neg_dividend = -dividend;
1763 dividend = neg_dividend;
1764 dividend_neg = true;
1766 if (wi::neg_p (divisor))
1768 neg_divisor = -divisor;
1769 divisor = neg_divisor;
1770 divisor_neg = true;
1774 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1775 dividend_blocks_needed, dividend_prec, sgn);
1776 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1777 divisor_blocks_needed, divisor_prec, sgn);
1779 m = dividend_blocks_needed;
1780 while (m > 1 && b_dividend[m - 1] == 0)
1781 m--;
1783 n = divisor_blocks_needed;
1784 while (n > 1 && b_divisor[n - 1] == 0)
1785 n--;
1787 memset (b_quotient, 0, sizeof (b_quotient));
1789 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1791 unsigned int quotient_len = 0;
1792 if (quotient)
1794 wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m);
1795 quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec);
1796 /* The quotient is neg if exactly one of the divisor or dividend is
1797 neg. */
1798 if (dividend_neg != divisor_neg)
1799 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1800 quotient_len, dividend_prec,
1801 UNSIGNED, 0);
1804 if (remainder)
1806 wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n);
1807 *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec);
1808 /* The remainder is always the same sign as the dividend. */
1809 if (dividend_neg)
1810 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1811 *remainder_len, dividend_prec,
1812 UNSIGNED, 0);
1815 return quotient_len;
1819 * Shifting, rotating and extraction.
1822 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1823 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1824 unsigned int
1825 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1826 unsigned int xlen, unsigned int precision,
1827 unsigned int shift)
1829 /* Split the shift into a whole-block shift and a subblock shift. */
1830 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1831 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1833 /* The whole-block shift fills with zeros. */
1834 unsigned int len = BLOCKS_NEEDED (precision);
1835 for (unsigned int i = 0; i < skip; ++i)
1836 val[i] = 0;
1838 /* It's easier to handle the simple block case specially. */
1839 if (small_shift == 0)
1840 for (unsigned int i = skip; i < len; ++i)
1841 val[i] = safe_uhwi (xval, xlen, i - skip);
1842 else
1844 /* The first unfilled output block is a left shift of the first
1845 block in XVAL. The other output blocks contain bits from two
1846 consecutive input blocks. */
1847 unsigned HOST_WIDE_INT carry = 0;
1848 for (unsigned int i = skip; i < len; ++i)
1850 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1851 val[i] = (x << small_shift) | carry;
1852 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1855 return canonize (val, len, precision);
1858 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1859 number of blocks in VAL. The input has XPRECISION bits and the
1860 output has XPRECISION - SHIFT bits. */
1861 static unsigned int
1862 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1863 unsigned int xlen, unsigned int xprecision,
1864 unsigned int shift)
1866 /* Split the shift into a whole-block shift and a subblock shift. */
1867 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1868 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1870 /* Work out how many blocks are needed to store the significant bits
1871 (excluding the upper zeros or signs). */
1872 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1874 /* It's easier to handle the simple block case specially. */
1875 if (small_shift == 0)
1876 for (unsigned int i = 0; i < len; ++i)
1877 val[i] = safe_uhwi (xval, xlen, i + skip);
1878 else
1880 /* Each output block but the last is a combination of two input blocks.
1881 The last block is a right shift of the last block in XVAL. */
1882 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1883 for (unsigned int i = 0; i < len; ++i)
1885 val[i] = curr >> small_shift;
1886 curr = safe_uhwi (xval, xlen, i + skip + 1);
1887 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1890 return len;
1893 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1894 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1895 VAL has PRECISION bits. */
1896 unsigned int
1897 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1898 unsigned int xlen, unsigned int xprecision,
1899 unsigned int precision, unsigned int shift)
1901 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1903 /* The value we just created has precision XPRECISION - SHIFT.
1904 Zero-extend it to wider precisions. */
1905 if (precision > xprecision - shift)
1907 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1908 if (small_prec)
1909 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1910 else if (val[len - 1] < 0)
1912 /* Add a new block with a zero. */
1913 val[len++] = 0;
1914 return len;
1917 return canonize (val, len, precision);
1920 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1921 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1922 VAL has PRECISION bits. */
1923 unsigned int
1924 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1925 unsigned int xlen, unsigned int xprecision,
1926 unsigned int precision, unsigned int shift)
1928 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1930 /* The value we just created has precision XPRECISION - SHIFT.
1931 Sign-extend it to wider types. */
1932 if (precision > xprecision - shift)
1934 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1935 if (small_prec)
1936 val[len - 1] = sext_hwi (val[len - 1], small_prec);
1938 return canonize (val, len, precision);
1941 /* Return the number of leading (upper) zeros in X. */
1943 wi::clz (const wide_int_ref &x)
1945 /* Calculate how many bits there above the highest represented block. */
1946 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1948 unsigned HOST_WIDE_INT high = x.uhigh ();
1949 if (count < 0)
1950 /* The upper -COUNT bits of HIGH are not part of the value.
1951 Clear them out. */
1952 high = (high << -count) >> -count;
1953 else if (x.sign_mask () < 0)
1954 /* The upper bit is set, so there are no leading zeros. */
1955 return 0;
1957 /* We don't need to look below HIGH. Either HIGH is nonzero,
1958 or the top bit of the block below is nonzero; clz_hwi is
1959 HOST_BITS_PER_WIDE_INT in the latter case. */
1960 return count + clz_hwi (high);
1963 /* Return the number of redundant sign bits in X. (That is, the number
1964 of bits immediately below the sign bit that have the same value as
1965 the sign bit.) */
1967 wi::clrsb (const wide_int_ref &x)
1969 /* Calculate how many bits there above the highest represented block. */
1970 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1972 unsigned HOST_WIDE_INT high = x.uhigh ();
1973 unsigned HOST_WIDE_INT mask = -1;
1974 if (count < 0)
1976 /* The upper -COUNT bits of HIGH are not part of the value.
1977 Clear them from both MASK and HIGH. */
1978 mask >>= -count;
1979 high &= mask;
1982 /* If the top bit is 1, count the number of leading 1s. If the top
1983 bit is zero, count the number of leading zeros. */
1984 if (high > mask / 2)
1985 high ^= mask;
1987 /* There are no sign bits below the top block, so we don't need to look
1988 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
1989 HIGH is 0. */
1990 return count + clz_hwi (high) - 1;
1993 /* Return the number of trailing (lower) zeros in X. */
1995 wi::ctz (const wide_int_ref &x)
1997 if (x.len == 1 && x.ulow () == 0)
1998 return x.precision;
2000 /* Having dealt with the zero case, there must be a block with a
2001 nonzero bit. We don't care about the bits above the first 1. */
2002 unsigned int i = 0;
2003 while (x.val[i] == 0)
2004 ++i;
2005 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2008 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2009 return -1. */
2011 wi::exact_log2 (const wide_int_ref &x)
2013 /* Reject cases where there are implicit -1 blocks above HIGH. */
2014 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2015 return -1;
2017 /* Set CRUX to the index of the entry that should be nonzero.
2018 If the top block is zero then the next lowest block (if any)
2019 must have the high bit set. */
2020 unsigned int crux = x.len - 1;
2021 if (crux > 0 && x.val[crux] == 0)
2022 crux -= 1;
2024 /* Check that all lower blocks are zero. */
2025 for (unsigned int i = 0; i < crux; ++i)
2026 if (x.val[i] != 0)
2027 return -1;
2029 /* Get a zero-extended form of block CRUX. */
2030 unsigned HOST_WIDE_INT hwi = x.val[crux];
2031 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2032 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2034 /* Now it's down to whether HWI is a power of 2. */
2035 int res = ::exact_log2 (hwi);
2036 if (res >= 0)
2037 res += crux * HOST_BITS_PER_WIDE_INT;
2038 return res;
2041 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2043 wi::floor_log2 (const wide_int_ref &x)
2045 return x.precision - 1 - clz (x);
2048 /* Return the index of the first (lowest) set bit in X, counting from 1.
2049 Return 0 if X is 0. */
2051 wi::ffs (const wide_int_ref &x)
2053 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2056 /* Return true if sign-extending X to have precision PRECISION would give
2057 the minimum signed value at that precision. */
2058 bool
2059 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2061 return ctz (x) + 1 == int (precision);
2064 /* Return true if X represents the minimum signed value. */
2065 bool
2066 wi::only_sign_bit_p (const wide_int_ref &x)
2068 return only_sign_bit_p (x, x.precision);
2072 * Private utilities.
2075 void gt_ggc_mx (widest_int *) { }
2076 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2077 void gt_pch_nx (widest_int *) { }
2079 template void wide_int::dump () const;
2080 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2081 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2082 template void offset_int::dump () const;
2083 template void widest_int::dump () const;