gcc/testsuite/
[official-gcc.git] / gcc / wide-int.cc
blob3bfae2c9cd346243e1e5d2f92dc0efb9c1c8de79
1 /* Operations with very long integers.
2 Copyright (C) 2012-2013 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hwint.h"
26 #include "wide-int.h"
27 #include "tree.h"
28 #include "dumpfile.h"
30 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
31 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__))
32 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
33 typedef unsigned HOST_WIDE_INT UWtype;
34 typedef unsigned int UQItype __attribute__ ((mode (QI)));
35 typedef unsigned int USItype __attribute__ ((mode (SI)));
36 typedef unsigned int UDItype __attribute__ ((mode (DI)));
37 #if W_TYPE_SIZE == 32
38 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
39 #else
40 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
41 #endif
42 #include "longlong.h"
43 #endif
45 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
48 * Internal utilities.
51 /* Quantities to deal with values that hold half of a wide int. Used
52 in multiply and divide. */
53 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
55 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
56 #define BLOCKS_NEEDED(PREC) \
57 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
58 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
60 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
61 based on the top existing bit of VAL. */
63 static unsigned HOST_WIDE_INT
64 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
66 return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
69 /* Convert the integer in VAL to canonical form, returning its new length.
70 LEN is the number of blocks currently in VAL and PRECISION is the number
71 of bits in the integer it represents.
73 This function only changes the representation, not the value. */
74 static unsigned int
75 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
77 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
78 HOST_WIDE_INT top;
79 int i;
81 if (len > blocks_needed)
82 len = blocks_needed;
84 if (len == 1)
85 return len;
87 top = val[len - 1];
88 if (len * HOST_BITS_PER_WIDE_INT > precision)
89 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
90 if (top != 0 && top != (HOST_WIDE_INT)-1)
91 return len;
93 /* At this point we know that the top is either 0 or -1. Find the
94 first block that is not a copy of this. */
95 for (i = len - 2; i >= 0; i--)
97 HOST_WIDE_INT x = val[i];
98 if (x != top)
100 if (SIGN_MASK (x) == top)
101 return i + 1;
103 /* We need an extra block because the top bit block i does
104 not match the extension. */
105 return i + 2;
109 /* The number is 0 or -1. */
110 return 1;
114 * Conversion routines in and out of wide_int.
117 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
118 result for an integer with precision PRECISION. Return the length
119 of VAL (after any canonization. */
120 unsigned int
121 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
122 unsigned int xlen, unsigned int precision, bool need_canon)
124 for (unsigned i = 0; i < xlen; i++)
125 val[i] = xval[i];
126 return need_canon ? canonize (val, xlen, precision) : xlen;
129 /* Construct a wide int from a buffer of length LEN. BUFFER will be
130 read according to byte endianess and word endianess of the target.
131 Only the lower BUFFER_LEN bytes of the result are set; the remaining
132 high bytes are cleared. */
133 wide_int
134 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
136 unsigned int precision = buffer_len * BITS_PER_UNIT;
137 wide_int result = wide_int::create (precision);
138 unsigned int words = buffer_len / UNITS_PER_WORD;
140 /* We have to clear all the bits ourself, as we merely or in values
141 below. */
142 unsigned int len = BLOCKS_NEEDED (precision);
143 HOST_WIDE_INT *val = result.write_val ();
144 for (unsigned int i = 0; i < len; ++i)
145 val[i] = 0;
147 for (unsigned int byte = 0; byte < buffer_len; byte++)
149 unsigned int offset;
150 unsigned int index;
151 unsigned int bitpos = byte * BITS_PER_UNIT;
152 unsigned HOST_WIDE_INT value;
154 if (buffer_len > UNITS_PER_WORD)
156 unsigned int word = byte / UNITS_PER_WORD;
158 if (WORDS_BIG_ENDIAN)
159 word = (words - 1) - word;
161 offset = word * UNITS_PER_WORD;
163 if (BYTES_BIG_ENDIAN)
164 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
165 else
166 offset += byte % UNITS_PER_WORD;
168 else
169 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
171 value = (unsigned HOST_WIDE_INT) buffer[offset];
173 index = bitpos / HOST_BITS_PER_WIDE_INT;
174 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
177 result.set_len (canonize (val, len, precision));
179 return result;
182 /* Sets RESULT from X, the sign is taken according to SGN. */
183 void
184 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
186 int len = x.get_len ();
187 const HOST_WIDE_INT *v = x.get_val ();
188 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
190 if (wi::neg_p (x, sgn))
192 /* We use ones complement to avoid -x80..0 edge case that -
193 won't work on. */
194 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
195 for (int i = 0; i < len; i++)
196 t[i] = ~v[i];
197 if (excess > 0)
198 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
199 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
200 mpz_com (result, result);
202 else if (excess > 0)
204 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
205 for (int i = 0; i < len - 1; i++)
206 t[i] = v[i];
207 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
208 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
210 else
211 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
214 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
215 values of VAL will be wrapped; otherwise, they will be set to the
216 appropriate minimum or maximum TYPE bound. */
217 wide_int
218 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
220 size_t count, numb;
221 int prec = TYPE_PRECISION (type);
222 wide_int res = wide_int::create (prec);
224 if (!wrap)
226 mpz_t min, max;
228 mpz_init (min);
229 mpz_init (max);
230 get_type_static_bounds (type, min, max);
232 if (mpz_cmp (x, min) < 0)
233 mpz_set (x, min);
234 else if (mpz_cmp (x, max) > 0)
235 mpz_set (x, max);
237 mpz_clear (min);
238 mpz_clear (max);
241 /* Determine the number of unsigned HOST_WIDE_INTs that are required
242 for representing the value. The code to calculate count is
243 extracted from the GMP manual, section "Integer Import and Export":
244 http://gmplib.org/manual/Integer-Import-and-Export.html */
245 numb = 8 * sizeof(HOST_WIDE_INT);
246 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
247 HOST_WIDE_INT *val = res.write_val ();
248 mpz_export (val, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
249 if (count < 1)
251 val[0] = 0;
252 count = 1;
254 res.set_len (count);
256 if (mpz_sgn (x) < 0)
257 res = -res;
259 return res;
263 * Largest and smallest values in a mode.
266 /* Return the largest SGNed number that is representable in PRECISION bits.
268 TODO: There is still code from the double_int era that trys to
269 make up for the fact that double int's could not represent the
270 min and max values of all types. This code should be removed
271 because the min and max values can always be represented in
272 wide_ints and int-csts. */
273 wide_int
274 wi::max_value (unsigned int precision, signop sgn)
276 gcc_checking_assert (precision != 0);
277 if (sgn == UNSIGNED)
278 /* The unsigned max is just all ones. */
279 return shwi (-1, precision);
280 else
281 /* The signed max is all ones except the top bit. This must be
282 explicitly represented. */
283 return mask (precision - 1, false, precision);
286 /* Return the largest SGNed number that is representable in PRECISION bits. */
287 wide_int
288 wi::min_value (unsigned int precision, signop sgn)
290 gcc_checking_assert (precision != 0);
291 if (sgn == UNSIGNED)
292 return uhwi (0, precision);
293 else
294 /* The signed min is all zeros except the top bit. This must be
295 explicitly represented. */
296 return wi::set_bit_in_zero (precision - 1, precision);
300 * Public utilities.
303 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
304 signedness SGN, to an integer that has PRECISION bits. Store the blocks
305 in VAL and return the number of blocks used.
307 This function can handle both extension (PRECISION > XPRECISION)
308 and truncation (PRECISION < XPRECISION). */
309 unsigned int
310 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
311 unsigned int xlen, unsigned int xprecision,
312 unsigned int precision, signop sgn)
314 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
315 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
316 for (unsigned i = 0; i < len; i++)
317 val[i] = xval[i];
319 if (precision > xprecision)
321 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
323 /* Expanding. */
324 if (sgn == UNSIGNED)
326 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
327 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
328 else if (val[len - 1] < 0)
330 while (len < BLOCKS_NEEDED (xprecision))
331 val[len++] = -1;
332 if (small_xprecision)
333 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
334 else
335 val[len++] = 0;
338 else
340 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
341 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
344 len = canonize (val, len, precision);
346 return len;
349 /* This function hides the fact that we cannot rely on the bits beyond
350 the precision. This issue comes up in the relational comparisions
351 where we do allow comparisons of values of different precisions. */
352 static inline HOST_WIDE_INT
353 selt (const HOST_WIDE_INT *a, unsigned int len,
354 unsigned int blocks_needed, unsigned int small_prec,
355 unsigned int index, signop sgn)
357 HOST_WIDE_INT val;
358 if (index < len)
359 val = a[index];
360 else if (index < blocks_needed || sgn == SIGNED)
361 /* Signed or within the precision. */
362 val = SIGN_MASK (a[len - 1]);
363 else
364 /* Unsigned extension beyond the precision. */
365 val = 0;
367 if (small_prec && index == blocks_needed - 1)
368 return (sgn == SIGNED
369 ? sext_hwi (val, small_prec)
370 : zext_hwi (val, small_prec));
371 else
372 return val;
375 /* Find the highest bit represented in a wide int. This will in
376 general have the same value as the sign bit. */
377 static inline HOST_WIDE_INT
378 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
380 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
381 unsigned HOST_WIDE_INT val = a[len - 1];
382 if (excess > 0)
383 val <<= excess;
384 return val >> (HOST_BITS_PER_WIDE_INT - 1);
388 * Comparisons, note that only equality is an operator. The other
389 * comparisons cannot be operators since they are inherently signed or
390 * unsigned and C++ has no such operators.
393 /* Return true if OP0 == OP1. */
394 bool
395 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
396 const HOST_WIDE_INT *op1, unsigned int op1len,
397 unsigned int prec)
399 int l0 = op0len - 1;
400 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
402 if (op0len != op1len)
403 return false;
405 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
407 /* It does not matter if we zext or sext here, we just have to
408 do both the same way. */
409 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
410 return false;
411 l0--;
414 while (l0 >= 0)
415 if (op0[l0] != op1[l0])
416 return false;
417 else
418 l0--;
420 return true;
423 /* Return true if OP0 < OP1 using signed comparisons. */
424 bool
425 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
426 unsigned int precision,
427 const HOST_WIDE_INT *op1, unsigned int op1len)
429 HOST_WIDE_INT s0, s1;
430 unsigned HOST_WIDE_INT u0, u1;
431 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
432 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
433 int l = MAX (op0len - 1, op1len - 1);
435 /* Only the top block is compared as signed. The rest are unsigned
436 comparisons. */
437 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
438 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
439 if (s0 < s1)
440 return true;
441 if (s0 > s1)
442 return false;
444 l--;
445 while (l >= 0)
447 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
448 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
450 if (u0 < u1)
451 return true;
452 if (u0 > u1)
453 return false;
454 l--;
457 return false;
460 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
461 signed compares. */
463 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
464 unsigned int precision,
465 const HOST_WIDE_INT *op1, unsigned int op1len)
467 HOST_WIDE_INT s0, s1;
468 unsigned HOST_WIDE_INT u0, u1;
469 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
470 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
471 int l = MAX (op0len - 1, op1len - 1);
473 /* Only the top block is compared as signed. The rest are unsigned
474 comparisons. */
475 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
476 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
477 if (s0 < s1)
478 return -1;
479 if (s0 > s1)
480 return 1;
482 l--;
483 while (l >= 0)
485 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
486 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
488 if (u0 < u1)
489 return -1;
490 if (u0 > u1)
491 return 1;
492 l--;
495 return 0;
498 /* Return true if OP0 < OP1 using unsigned comparisons. */
499 bool
500 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
501 unsigned int precision,
502 const HOST_WIDE_INT *op1, unsigned int op1len)
504 unsigned HOST_WIDE_INT x0;
505 unsigned HOST_WIDE_INT x1;
506 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
507 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
508 int l = MAX (op0len - 1, op1len - 1);
510 while (l >= 0)
512 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
513 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
514 if (x0 < x1)
515 return true;
516 if (x0 > x1)
517 return false;
518 l--;
521 return false;
524 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
525 unsigned compares. */
527 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
528 unsigned int precision,
529 const HOST_WIDE_INT *op1, unsigned int op1len)
531 unsigned HOST_WIDE_INT x0;
532 unsigned HOST_WIDE_INT x1;
533 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
534 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
535 int l = MAX (op0len - 1, op1len - 1);
537 while (l >= 0)
539 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
540 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
541 if (x0 < x1)
542 return -1;
543 if (x0 > x1)
544 return 1;
545 l--;
548 return 0;
552 * Extension.
555 /* Sign-extend the number represented by XVAL and XLEN into VAL,
556 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
557 and VAL have PRECISION bits. */
558 unsigned int
559 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
560 unsigned int xlen, unsigned int precision, unsigned int offset)
562 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
563 /* Extending beyond the precision is a no-op. If we have only stored
564 OFFSET bits or fewer, the rest are already signs. */
565 if (offset >= precision || len >= xlen)
567 for (unsigned i = 0; i < xlen; ++i)
568 val[i] = xval[i];
569 return xlen;
571 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
572 for (unsigned int i = 0; i < len; i++)
573 val[i] = xval[i];
574 if (suboffset > 0)
576 val[len] = sext_hwi (xval[len], suboffset);
577 len += 1;
579 return canonize (val, len, precision);
582 /* Zero-extend the number represented by XVAL and XLEN into VAL,
583 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
584 and VAL have PRECISION bits. */
585 unsigned int
586 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
587 unsigned int xlen, unsigned int precision, unsigned int offset)
589 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
590 /* Extending beyond the precision is a no-op. If we have only stored
591 OFFSET bits or fewer, and the upper stored bit is zero, then there
592 is nothing to do. */
593 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
595 for (unsigned i = 0; i < xlen; ++i)
596 val[i] = xval[i];
597 return xlen;
599 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
600 for (unsigned int i = 0; i < len; i++)
601 val[i] = i < xlen ? xval[i] : -1;
602 if (suboffset > 0)
603 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
604 else
605 val[len] = 0;
606 return canonize (val, len + 1, precision);
610 * Masking, inserting, shifting, rotating.
613 /* Insert WIDTH bits from Y into X starting at START. */
614 wide_int
615 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
616 unsigned int width)
618 wide_int result;
619 wide_int mask;
620 wide_int tmp;
622 unsigned int precision = x.get_precision ();
623 if (start >= precision)
624 return x;
626 gcc_checking_assert (precision >= width);
628 if (start + width >= precision)
629 width = precision - start;
631 mask = wi::shifted_mask (start, width, false, precision);
632 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
633 result = tmp & mask;
635 tmp = wi::bit_and_not (x, mask);
636 result = result | tmp;
638 return result;
641 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
642 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
643 bits. */
644 unsigned int
645 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
646 unsigned int xlen, unsigned int precision, unsigned int bit)
648 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
649 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
651 if (block + 1 >= xlen)
653 /* The operation either affects the last current block or needs
654 a new block. */
655 unsigned int len = block + 1;
656 for (unsigned int i = 0; i < len; i++)
657 val[i] = safe_uhwi (xval, xlen, i);
658 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
660 /* If the bit we just set is at the msb of the block, make sure
661 that any higher bits are zeros. */
662 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
663 val[len++] = 0;
664 return len;
666 else
668 for (unsigned int i = 0; i < xlen; i++)
669 val[i] = xval[i];
670 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
671 return canonize (val, xlen, precision);
675 /* bswap THIS. */
676 wide_int
677 wide_int_storage::bswap () const
679 wide_int result = wide_int::create (precision);
680 unsigned int i, s;
681 unsigned int len = BLOCKS_NEEDED (precision);
682 unsigned int xlen = get_len ();
683 const HOST_WIDE_INT *xval = get_val ();
684 HOST_WIDE_INT *val = result.write_val ();
686 /* This is not a well defined operation if the precision is not a
687 multiple of 8. */
688 gcc_assert ((precision & 0x7) == 0);
690 for (i = 0; i < len; i++)
691 val[i] = 0;
693 /* Only swap the bytes that are not the padding. */
694 for (s = 0; s < precision; s += 8)
696 unsigned int d = precision - s - 8;
697 unsigned HOST_WIDE_INT byte;
699 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
700 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
702 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
704 block = d / HOST_BITS_PER_WIDE_INT;
705 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
707 val[block] |= byte << offset;
710 result.set_len (canonize (val, len, precision));
711 return result;
714 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
715 above that up to PREC are zeros. The result is inverted if NEGATE
716 is true. Return the number of blocks in VAL. */
717 unsigned int
718 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
719 unsigned int prec)
721 if (width >= prec)
723 val[0] = negate ? 0 : -1;
724 return 1;
726 else if (width == 0)
728 val[0] = negate ? -1 : 0;
729 return 1;
732 unsigned int i = 0;
733 while (i < width / HOST_BITS_PER_WIDE_INT)
734 val[i++] = negate ? 0 : -1;
736 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
737 if (shift != 0)
739 HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
740 val[i++] = negate ? ~last : last;
742 else
743 val[i++] = negate ? -1 : 0;
745 return i;
748 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
749 bits are ones, and the bits above that up to PREC are zeros. The result
750 is inverted if NEGATE is true. Return the number of blocks in VAL. */
751 unsigned int
752 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
753 bool negate, unsigned int prec)
755 if (start >= prec || width == 0)
757 val[0] = negate ? -1 : 0;
758 return 1;
761 if (width > prec - start)
762 width = prec - start;
763 unsigned int end = start + width;
765 unsigned int i = 0;
766 while (i < start / HOST_BITS_PER_WIDE_INT)
767 val[i++] = negate ? -1 : 0;
769 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
770 if (shift)
772 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
773 shift += width;
774 if (shift < HOST_BITS_PER_WIDE_INT)
776 /* case 000111000 */
777 block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
778 val[i++] = negate ? ~block : block;
779 return i;
781 else
782 /* ...111000 */
783 val[i++] = negate ? block : ~block;
786 while (i < end / HOST_BITS_PER_WIDE_INT)
787 /* 1111111 */
788 val[i++] = negate ? 0 : -1;
790 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
791 if (shift != 0)
793 /* 000011111 */
794 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
795 val[i++] = negate ? ~block : block;
797 else if (end < prec)
798 val[i++] = negate ? -1 : 0;
800 return i;
804 * logical operations.
807 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
808 unsigned int
809 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
810 unsigned int op0len, const HOST_WIDE_INT *op1,
811 unsigned int op1len, unsigned int prec)
813 int l0 = op0len - 1;
814 int l1 = op1len - 1;
815 bool need_canon = true;
817 unsigned int len = MAX (op0len, op1len);
818 if (l0 > l1)
820 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
821 if (op1mask == 0)
823 l0 = l1;
824 len = l1 + 1;
826 else
828 need_canon = false;
829 while (l0 > l1)
831 val[l0] = op0[l0];
832 l0--;
836 else if (l1 > l0)
838 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
839 if (op0mask == 0)
840 len = l0 + 1;
841 else
843 need_canon = false;
844 while (l1 > l0)
846 val[l1] = op1[l1];
847 l1--;
852 while (l0 >= 0)
854 val[l0] = op0[l0] & op1[l0];
855 l0--;
858 if (need_canon)
859 len = canonize (val, len, prec);
861 return len;
864 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
865 unsigned int
866 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
867 unsigned int op0len, const HOST_WIDE_INT *op1,
868 unsigned int op1len, unsigned int prec)
870 wide_int result;
871 int l0 = op0len - 1;
872 int l1 = op1len - 1;
873 bool need_canon = true;
875 unsigned int len = MAX (op0len, op1len);
876 if (l0 > l1)
878 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
879 if (op1mask != 0)
881 l0 = l1;
882 len = l1 + 1;
884 else
886 need_canon = false;
887 while (l0 > l1)
889 val[l0] = op0[l0];
890 l0--;
894 else if (l1 > l0)
896 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
897 if (op0mask == 0)
898 len = l0 + 1;
899 else
901 need_canon = false;
902 while (l1 > l0)
904 val[l1] = ~op1[l1];
905 l1--;
910 while (l0 >= 0)
912 val[l0] = op0[l0] & ~op1[l0];
913 l0--;
916 if (need_canon)
917 len = canonize (val, len, prec);
919 return len;
922 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
923 unsigned int
924 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
925 unsigned int op0len, const HOST_WIDE_INT *op1,
926 unsigned int op1len, unsigned int prec)
928 wide_int result;
929 int l0 = op0len - 1;
930 int l1 = op1len - 1;
931 bool need_canon = true;
933 unsigned int len = MAX (op0len, op1len);
934 if (l0 > l1)
936 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
937 if (op1mask != 0)
939 l0 = l1;
940 len = l1 + 1;
942 else
944 need_canon = false;
945 while (l0 > l1)
947 val[l0] = op0[l0];
948 l0--;
952 else if (l1 > l0)
954 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
955 if (op0mask != 0)
956 len = l0 + 1;
957 else
959 need_canon = false;
960 while (l1 > l0)
962 val[l1] = op1[l1];
963 l1--;
968 while (l0 >= 0)
970 val[l0] = op0[l0] | op1[l0];
971 l0--;
974 if (need_canon)
975 len = canonize (val, len, prec);
977 return len;
980 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
981 unsigned int
982 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
983 unsigned int op0len, const HOST_WIDE_INT *op1,
984 unsigned int op1len, unsigned int prec)
986 wide_int result;
987 int l0 = op0len - 1;
988 int l1 = op1len - 1;
989 bool need_canon = true;
991 unsigned int len = MAX (op0len, op1len);
992 if (l0 > l1)
994 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
995 if (op1mask == 0)
997 l0 = l1;
998 len = l1 + 1;
1000 else
1002 need_canon = false;
1003 while (l0 > l1)
1005 val[l0] = op0[l0];
1006 l0--;
1010 else if (l1 > l0)
1012 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1013 if (op0mask != 0)
1014 len = l0 + 1;
1015 else
1017 need_canon = false;
1018 while (l1 > l0)
1020 val[l1] = ~op1[l1];
1021 l1--;
1026 while (l0 >= 0)
1028 val[l0] = op0[l0] | ~op1[l0];
1029 l0--;
1032 if (need_canon)
1033 len = canonize (val, len, prec);
1035 return len;
1038 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1039 unsigned int
1040 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1041 unsigned int op0len, const HOST_WIDE_INT *op1,
1042 unsigned int op1len, unsigned int prec)
1044 wide_int result;
1045 int l0 = op0len - 1;
1046 int l1 = op1len - 1;
1048 unsigned int len = MAX (op0len, op1len);
1049 if (l0 > l1)
1051 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1052 while (l0 > l1)
1054 val[l0] = op0[l0] ^ op1mask;
1055 l0--;
1059 if (l1 > l0)
1061 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1062 while (l1 > l0)
1064 val[l1] = op0mask ^ op1[l1];
1065 l1--;
1069 while (l0 >= 0)
1071 val[l0] = op0[l0] ^ op1[l0];
1072 l0--;
1075 return canonize (val, len, prec);
1079 * math
1082 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1083 whether the result overflows when OP0 and OP1 are treated as having
1084 signedness SGN. Return the number of blocks in VAL. */
1085 unsigned int
1086 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1087 unsigned int op0len, const HOST_WIDE_INT *op1,
1088 unsigned int op1len, unsigned int prec,
1089 signop sgn, bool *overflow)
1091 unsigned HOST_WIDE_INT o0 = 0;
1092 unsigned HOST_WIDE_INT o1 = 0;
1093 unsigned HOST_WIDE_INT x = 0;
1094 unsigned HOST_WIDE_INT carry = 0;
1095 unsigned HOST_WIDE_INT old_carry = 0;
1096 unsigned HOST_WIDE_INT mask0, mask1;
1097 unsigned int i;
1099 unsigned int len = MAX (op0len, op1len);
1100 mask0 = -top_bit_of (op0, op0len, prec);
1101 mask1 = -top_bit_of (op1, op1len, prec);
1102 /* Add all of the explicitly defined elements. */
1104 for (i = 0; i < len; i++)
1106 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1107 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1108 x = o0 + o1 + carry;
1109 val[i] = x;
1110 old_carry = carry;
1111 carry = carry == 0 ? x < o0 : x <= o0;
1114 if (len * HOST_BITS_PER_WIDE_INT < prec)
1116 val[len] = mask0 + mask1 + carry;
1117 len++;
1118 if (overflow)
1119 *overflow = false;
1121 else if (overflow)
1123 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1124 if (sgn == SIGNED)
1126 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1127 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1129 else
1131 /* Put the MSB of X and O0 and in the top of the HWI. */
1132 x <<= shift;
1133 o0 <<= shift;
1134 if (old_carry)
1135 *overflow = (x <= o0);
1136 else
1137 *overflow = (x < o0);
1141 return canonize (val, len, prec);
1144 /* Subroutines of the multiplication and division operations. Unpack
1145 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1146 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1147 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1148 static void
1149 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1150 unsigned int in_len, unsigned int out_len,
1151 unsigned int prec, signop sgn)
1153 unsigned int i;
1154 unsigned int j = 0;
1155 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1156 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1157 HOST_WIDE_INT mask;
1159 if (sgn == SIGNED)
1161 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1162 mask &= HALF_INT_MASK;
1164 else
1165 mask = 0;
1167 for (i = 0; i < blocks_needed - 1; i++)
1169 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1170 result[j++] = x;
1171 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1174 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1175 if (small_prec)
1177 if (sgn == SIGNED)
1178 x = sext_hwi (x, small_prec);
1179 else
1180 x = zext_hwi (x, small_prec);
1182 result[j++] = x;
1183 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1185 /* Smear the sign bit. */
1186 while (j < out_len)
1187 result[j++] = mask;
1190 /* The inverse of wi_unpack. IN_LEN is the the number of input
1191 blocks. The number of output blocks will be half this amount. */
1192 static void
1193 wi_pack (unsigned HOST_WIDE_INT *result,
1194 const unsigned HOST_HALF_WIDE_INT *input,
1195 unsigned int in_len)
1197 unsigned int i = 0;
1198 unsigned int j = 0;
1200 while (i + 2 < in_len)
1202 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1203 | ((unsigned HOST_WIDE_INT)input[i + 1]
1204 << HOST_BITS_PER_HALF_WIDE_INT);
1205 i += 2;
1208 /* Handle the case where in_len is odd. For this we zero extend. */
1209 if (in_len & 1)
1210 result[j++] = (unsigned HOST_WIDE_INT)input[i];
1211 else
1212 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1213 | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT);
1216 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1217 result is returned.
1219 If HIGH is not set, throw away the upper half after the check is
1220 made to see if it overflows. Unfortunately there is no better way
1221 to check for overflow than to do this. If OVERFLOW is nonnull,
1222 record in *OVERFLOW whether the result overflowed. SGN controls
1223 the signedness and is used to check overflow or if HIGH is set. */
1224 unsigned int
1225 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1226 unsigned int op1len, const HOST_WIDE_INT *op2val,
1227 unsigned int op2len, unsigned int prec, signop sgn,
1228 bool *overflow, bool high)
1230 unsigned HOST_WIDE_INT o0, o1, k, t;
1231 unsigned int i;
1232 unsigned int j;
1233 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1234 unsigned int half_blocks_needed = blocks_needed * 2;
1235 /* The sizes here are scaled to support a 2x largest mode by 2x
1236 largest mode yielding a 4x largest mode result. This is what is
1237 needed by vpn. */
1239 unsigned HOST_HALF_WIDE_INT
1240 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1241 unsigned HOST_HALF_WIDE_INT
1242 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1243 /* The '2' in 'R' is because we are internally doing a full
1244 multiply. */
1245 unsigned HOST_HALF_WIDE_INT
1246 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1247 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1249 /* If the top level routine did not really pass in an overflow, then
1250 just make sure that we never attempt to set it. */
1251 bool needs_overflow = (overflow != 0);
1252 if (needs_overflow)
1253 *overflow = false;
1255 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1256 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1258 /* This is a surprisingly common case, so do it first. */
1259 if (op1 == 0 || op2 == 0)
1261 val[0] = 0;
1262 return 1;
1265 #ifdef umul_ppmm
1266 if (sgn == UNSIGNED)
1268 /* If the inputs are single HWIs and the output has room for at
1269 least two HWIs, we can use umul_ppmm directly. */
1270 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1271 && wi::fits_uhwi_p (op1)
1272 && wi::fits_uhwi_p (op2))
1274 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1275 return 1 + (val[1] != 0 || val[0] < 0);
1277 /* Likewise if the output is a full single HWI, except that the
1278 upper HWI of the result is only used for determining overflow.
1279 (We handle this case inline when overflow isn't needed.) */
1280 else if (prec == HOST_BITS_PER_WIDE_INT)
1282 unsigned HOST_WIDE_INT upper;
1283 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1284 if (needs_overflow)
1285 *overflow = (upper != 0);
1286 return 1;
1289 #endif
1291 /* Handle multiplications by 1. */
1292 if (op1 == 1)
1294 for (i = 0; i < op2len; i++)
1295 val[i] = op2val[i];
1296 return op2len;
1298 if (op2 == 1)
1300 for (i = 0; i < op1len; i++)
1301 val[i] = op1val[i];
1302 return op1len;
1305 /* If we need to check for overflow, we can only do half wide
1306 multiplies quickly because we need to look at the top bits to
1307 check for the overflow. */
1308 if ((high || needs_overflow)
1309 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1311 unsigned HOST_WIDE_INT r;
1313 if (sgn == SIGNED)
1315 o0 = op1.to_shwi ();
1316 o1 = op2.to_shwi ();
1318 else
1320 o0 = op1.to_uhwi ();
1321 o1 = op2.to_uhwi ();
1324 r = o0 * o1;
1325 if (needs_overflow)
1327 if (sgn == SIGNED)
1329 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1330 *overflow = true;
1332 else
1334 if ((r >> prec) != 0)
1335 *overflow = true;
1338 val[0] = high ? r >> prec : r;
1339 return 1;
1342 /* We do unsigned mul and then correct it. */
1343 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1344 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1346 /* The 2 is for a full mult. */
1347 memset (r, 0, half_blocks_needed * 2
1348 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1350 for (j = 0; j < half_blocks_needed; j++)
1352 k = 0;
1353 for (i = 0; i < half_blocks_needed; i++)
1355 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1356 + r[i + j] + k);
1357 r[i + j] = t & HALF_INT_MASK;
1358 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1360 r[j + half_blocks_needed] = k;
1363 /* We did unsigned math above. For signed we must adjust the
1364 product (assuming we need to see that). */
1365 if (sgn == SIGNED && (high || needs_overflow))
1367 unsigned HOST_WIDE_INT b;
1368 if (wi::neg_p (op1))
1370 b = 0;
1371 for (i = 0; i < half_blocks_needed; i++)
1373 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1374 - (unsigned HOST_WIDE_INT)v[i] - b;
1375 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1376 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1379 if (wi::neg_p (op2))
1381 b = 0;
1382 for (i = 0; i < half_blocks_needed; i++)
1384 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1385 - (unsigned HOST_WIDE_INT)u[i] - b;
1386 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1387 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1392 if (needs_overflow)
1394 HOST_WIDE_INT top;
1396 /* For unsigned, overflow is true if any of the top bits are set.
1397 For signed, overflow is true if any of the top bits are not equal
1398 to the sign bit. */
1399 if (sgn == UNSIGNED)
1400 top = 0;
1401 else
1403 top = r[(half_blocks_needed) - 1];
1404 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1405 top &= mask;
1408 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1409 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1410 *overflow = true;
1413 if (high)
1415 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1416 wi_pack ((unsigned HOST_WIDE_INT *) val,
1417 &r[half_blocks_needed], half_blocks_needed);
1418 return canonize (val, blocks_needed, prec);
1420 else
1422 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1423 wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed);
1424 return canonize (val, blocks_needed, prec);
1428 /* Compute the population count of X. */
1430 wi::popcount (const wide_int_ref &x)
1432 unsigned int i;
1433 int count;
1435 /* The high order block is special if it is the last block and the
1436 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1437 have to clear out any ones above the precision before doing
1438 popcount on this block. */
1439 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1440 unsigned int stop = x.len;
1441 if (count < 0)
1443 count = popcount_hwi (x.uhigh () << -count);
1444 stop -= 1;
1446 else
1448 if (x.sign_mask () >= 0)
1449 count = 0;
1452 for (i = 0; i < stop; ++i)
1453 count += popcount_hwi (x.val[i]);
1455 return count;
1458 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1459 whether the result overflows when OP0 and OP1 are treated as having
1460 signedness SGN. Return the number of blocks in VAL. */
1461 unsigned int
1462 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1463 unsigned int op0len, const HOST_WIDE_INT *op1,
1464 unsigned int op1len, unsigned int prec,
1465 signop sgn, bool *overflow)
1467 unsigned HOST_WIDE_INT o0 = 0;
1468 unsigned HOST_WIDE_INT o1 = 0;
1469 unsigned HOST_WIDE_INT x = 0;
1470 /* We implement subtraction as an in place negate and add. Negation
1471 is just inversion and add 1, so we can do the add of 1 by just
1472 starting the borrow in of the first element at 1. */
1473 unsigned HOST_WIDE_INT borrow = 0;
1474 unsigned HOST_WIDE_INT old_borrow = 0;
1476 unsigned HOST_WIDE_INT mask0, mask1;
1477 unsigned int i;
1479 unsigned int len = MAX (op0len, op1len);
1480 mask0 = -top_bit_of (op0, op0len, prec);
1481 mask1 = -top_bit_of (op1, op1len, prec);
1483 /* Subtract all of the explicitly defined elements. */
1484 for (i = 0; i < len; i++)
1486 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1487 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1488 x = o0 - o1 - borrow;
1489 val[i] = x;
1490 old_borrow = borrow;
1491 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1494 if (len * HOST_BITS_PER_WIDE_INT < prec)
1496 val[len] = mask0 - mask1 - borrow;
1497 len++;
1498 if (overflow)
1499 *overflow = false;
1501 else if (overflow)
1503 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1504 if (sgn == SIGNED)
1506 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1507 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1509 else
1511 /* Put the MSB of X and O0 and in the top of the HWI. */
1512 x <<= shift;
1513 o0 <<= shift;
1514 if (old_borrow)
1515 *overflow = (x >= o0);
1516 else
1517 *overflow = (x > o0);
1521 return canonize (val, len, prec);
1526 * Division and Mod
1529 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1530 algorithm is a small modification of the algorithm in Hacker's
1531 Delight by Warren, which itself is a small modification of Knuth's
1532 algorithm. M is the number of significant elements of U however
1533 there needs to be at least one extra element of B_DIVIDEND
1534 allocated, N is the number of elements of B_DIVISOR. */
1535 static void
1536 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1537 unsigned HOST_HALF_WIDE_INT *b_remainder,
1538 unsigned HOST_HALF_WIDE_INT *b_dividend,
1539 unsigned HOST_HALF_WIDE_INT *b_divisor,
1540 int m, int n)
1542 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1543 HOST_WIDE_INT and stored in the lower bits of each word. This
1544 algorithm should work properly on both 32 and 64 bit
1545 machines. */
1546 unsigned HOST_WIDE_INT b
1547 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1548 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1549 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1550 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1551 HOST_WIDE_INT t, k;
1552 int i, j, s;
1554 /* Single digit divisor. */
1555 if (n == 1)
1557 k = 0;
1558 for (j = m - 1; j >= 0; j--)
1560 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1561 k = ((k * b + b_dividend[j])
1562 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1563 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1565 b_remainder[0] = k;
1566 return;
1569 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1571 if (s)
1573 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1574 algorithm, we can overwrite b_dividend and b_divisor, so we do
1575 that. */
1576 for (i = n - 1; i > 0; i--)
1577 b_divisor[i] = (b_divisor[i] << s)
1578 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1579 b_divisor[0] = b_divisor[0] << s;
1581 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1582 for (i = m - 1; i > 0; i--)
1583 b_dividend[i] = (b_dividend[i] << s)
1584 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1585 b_dividend[0] = b_dividend[0] << s;
1588 /* Main loop. */
1589 for (j = m - n; j >= 0; j--)
1591 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1592 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1593 again:
1594 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1596 qhat -= 1;
1597 rhat += b_divisor[n-1];
1598 if (rhat < b)
1599 goto again;
1602 /* Multiply and subtract. */
1603 k = 0;
1604 for (i = 0; i < n; i++)
1606 p = qhat * b_divisor[i];
1607 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1608 b_dividend[i + j] = t;
1609 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1610 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1612 t = b_dividend[j+n] - k;
1613 b_dividend[j+n] = t;
1615 b_quotient[j] = qhat;
1616 if (t < 0)
1618 b_quotient[j] -= 1;
1619 k = 0;
1620 for (i = 0; i < n; i++)
1622 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1623 b_dividend[i+j] = t;
1624 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1626 b_dividend[j+n] += k;
1629 if (s)
1630 for (i = 0; i < n; i++)
1631 b_remainder[i] = (b_dividend[i] >> s)
1632 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1633 else
1634 for (i = 0; i < n; i++)
1635 b_remainder[i] = b_dividend[i];
1639 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1640 the result. If QUOTIENT is nonnull, store the value of the quotient
1641 there and return the number of blocks in it. The return value is
1642 not defined otherwise. If REMAINDER is nonnull, store the value
1643 of the remainder there and store the number of blocks in
1644 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1645 the division overflowed. */
1646 unsigned int
1647 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1648 HOST_WIDE_INT *remainder,
1649 const HOST_WIDE_INT *dividend_val,
1650 unsigned int dividend_len, unsigned int dividend_prec,
1651 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1652 unsigned int divisor_prec, signop sgn,
1653 bool *oflow)
1655 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1656 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1657 unsigned HOST_HALF_WIDE_INT
1658 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1659 unsigned HOST_HALF_WIDE_INT
1660 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1661 unsigned HOST_HALF_WIDE_INT
1662 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1663 unsigned HOST_HALF_WIDE_INT
1664 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1665 unsigned int m, n;
1666 bool dividend_neg = false;
1667 bool divisor_neg = false;
1668 bool overflow = false;
1669 wide_int neg_dividend, neg_divisor;
1671 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1672 dividend_prec);
1673 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1674 divisor_prec);
1675 if (divisor == 0)
1676 overflow = true;
1678 /* The smallest signed number / -1 causes overflow. The dividend_len
1679 check is for speed rather than correctness. */
1680 if (sgn == SIGNED
1681 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1682 && divisor == -1
1683 && wi::only_sign_bit_p (dividend))
1684 overflow = true;
1686 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1687 (signed min / -1) has the same representation as the orignal dividend.
1688 We have traditionally made division by zero act as division by one,
1689 so there too we use the original dividend. */
1690 if (overflow)
1692 if (remainder)
1694 *remainder_len = 1;
1695 remainder[0] = 0;
1697 if (oflow != 0)
1698 *oflow = true;
1699 if (quotient)
1700 for (unsigned int i = 0; i < dividend_len; ++i)
1701 quotient[i] = dividend_val[i];
1702 return dividend_len;
1705 if (oflow)
1706 *oflow = false;
1708 /* Do it on the host if you can. */
1709 if (sgn == SIGNED
1710 && wi::fits_shwi_p (dividend)
1711 && wi::fits_shwi_p (divisor))
1713 HOST_WIDE_INT o0 = dividend.to_shwi ();
1714 HOST_WIDE_INT o1 = divisor.to_shwi ();
1716 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1718 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1719 if (quotient)
1721 quotient[0] = HOST_WIDE_INT_MIN;
1722 quotient[1] = 0;
1724 if (remainder)
1726 remainder[0] = 0;
1727 *remainder_len = 1;
1729 return 2;
1731 else
1733 if (quotient)
1734 quotient[0] = o0 / o1;
1735 if (remainder)
1737 remainder[0] = o0 % o1;
1738 *remainder_len = 1;
1740 return 1;
1744 if (sgn == UNSIGNED
1745 && wi::fits_uhwi_p (dividend)
1746 && wi::fits_uhwi_p (divisor))
1748 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1749 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1751 if (quotient)
1752 quotient[0] = o0 / o1;
1753 if (remainder)
1755 remainder[0] = o0 % o1;
1756 *remainder_len = 1;
1758 return 1;
1761 /* Make the divisor and dividend positive and remember what we
1762 did. */
1763 if (sgn == SIGNED)
1765 if (wi::neg_p (dividend))
1767 neg_dividend = -dividend;
1768 dividend = neg_dividend;
1769 dividend_neg = true;
1771 if (wi::neg_p (divisor))
1773 neg_divisor = -divisor;
1774 divisor = neg_divisor;
1775 divisor_neg = true;
1779 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1780 dividend_blocks_needed, dividend_prec, sgn);
1781 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1782 divisor_blocks_needed, divisor_prec, sgn);
1784 m = dividend_blocks_needed;
1785 while (m > 1 && b_dividend[m - 1] == 0)
1786 m--;
1788 n = divisor_blocks_needed;
1789 while (n > 1 && b_divisor[n - 1] == 0)
1790 n--;
1792 memset (b_quotient, 0, sizeof (b_quotient));
1794 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1796 unsigned int quotient_len = 0;
1797 if (quotient)
1799 wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m);
1800 quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec);
1801 /* The quotient is neg if exactly one of the divisor or dividend is
1802 neg. */
1803 if (dividend_neg != divisor_neg)
1804 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1805 quotient_len, dividend_prec,
1806 UNSIGNED, 0);
1809 if (remainder)
1811 wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n);
1812 *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec);
1813 /* The remainder is always the same sign as the dividend. */
1814 if (dividend_neg)
1815 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1816 *remainder_len, dividend_prec,
1817 UNSIGNED, 0);
1820 return quotient_len;
1824 * Shifting, rotating and extraction.
1827 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1828 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1829 unsigned int
1830 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1831 unsigned int xlen, unsigned int precision,
1832 unsigned int shift)
1834 /* Split the shift into a whole-block shift and a subblock shift. */
1835 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1836 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1838 /* The whole-block shift fills with zeros. */
1839 unsigned int len = BLOCKS_NEEDED (precision);
1840 for (unsigned int i = 0; i < skip; ++i)
1841 val[i] = 0;
1843 /* It's easier to handle the simple block case specially. */
1844 if (small_shift == 0)
1845 for (unsigned int i = skip; i < len; ++i)
1846 val[i] = safe_uhwi (xval, xlen, i - skip);
1847 else
1849 /* The first unfilled output block is a left shift of the first
1850 block in XVAL. The other output blocks contain bits from two
1851 consecutive input blocks. */
1852 unsigned HOST_WIDE_INT carry = 0;
1853 for (unsigned int i = skip; i < len; ++i)
1855 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1856 val[i] = (x << small_shift) | carry;
1857 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1860 return canonize (val, len, precision);
1863 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1864 number of blocks in VAL. The input has XPRECISION bits and the
1865 output has XPRECISION - SHIFT bits. */
1866 static unsigned int
1867 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1868 unsigned int xlen, unsigned int xprecision,
1869 unsigned int shift)
1871 /* Split the shift into a whole-block shift and a subblock shift. */
1872 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1873 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1875 /* Work out how many blocks are needed to store the significant bits
1876 (excluding the upper zeros or signs). */
1877 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1879 /* It's easier to handle the simple block case specially. */
1880 if (small_shift == 0)
1881 for (unsigned int i = 0; i < len; ++i)
1882 val[i] = safe_uhwi (xval, xlen, i + skip);
1883 else
1885 /* Each output block but the last is a combination of two input blocks.
1886 The last block is a right shift of the last block in XVAL. */
1887 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1888 for (unsigned int i = 0; i < len; ++i)
1890 val[i] = curr >> small_shift;
1891 curr = safe_uhwi (xval, xlen, i + skip + 1);
1892 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1895 return len;
1898 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1899 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1900 VAL has PRECISION bits. */
1901 unsigned int
1902 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1903 unsigned int xlen, unsigned int xprecision,
1904 unsigned int precision, unsigned int shift)
1906 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1908 /* The value we just created has precision XPRECISION - SHIFT.
1909 Zero-extend it to wider precisions. */
1910 if (precision > xprecision - shift)
1912 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1913 if (small_prec)
1914 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1915 else if (val[len - 1] < 0)
1917 /* Add a new block with a zero. */
1918 val[len++] = 0;
1919 return len;
1922 return canonize (val, len, precision);
1925 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1926 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1927 VAL has PRECISION bits. */
1928 unsigned int
1929 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1930 unsigned int xlen, unsigned int xprecision,
1931 unsigned int precision, unsigned int shift)
1933 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1935 /* The value we just created has precision XPRECISION - SHIFT.
1936 Sign-extend it to wider types. */
1937 if (precision > xprecision - shift)
1939 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1940 if (small_prec)
1941 val[len - 1] = sext_hwi (val[len - 1], small_prec);
1943 return canonize (val, len, precision);
1946 /* Return the number of leading (upper) zeros in X. */
1948 wi::clz (const wide_int_ref &x)
1950 /* Calculate how many bits there above the highest represented block. */
1951 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1953 unsigned HOST_WIDE_INT high = x.uhigh ();
1954 if (count < 0)
1955 /* The upper -COUNT bits of HIGH are not part of the value.
1956 Clear them out. */
1957 high = (high << -count) >> -count;
1958 else if (x.sign_mask () < 0)
1959 /* The upper bit is set, so there are no leading zeros. */
1960 return 0;
1962 /* We don't need to look below HIGH. Either HIGH is nonzero,
1963 or the top bit of the block below is nonzero; clz_hwi is
1964 HOST_BITS_PER_WIDE_INT in the latter case. */
1965 return count + clz_hwi (high);
1968 /* Return the number of redundant sign bits in X. (That is, the number
1969 of bits immediately below the sign bit that have the same value as
1970 the sign bit.) */
1972 wi::clrsb (const wide_int_ref &x)
1974 /* Calculate how many bits there above the highest represented block. */
1975 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1977 unsigned HOST_WIDE_INT high = x.uhigh ();
1978 unsigned HOST_WIDE_INT mask = -1;
1979 if (count < 0)
1981 /* The upper -COUNT bits of HIGH are not part of the value.
1982 Clear them from both MASK and HIGH. */
1983 mask >>= -count;
1984 high &= mask;
1987 /* If the top bit is 1, count the number of leading 1s. If the top
1988 bit is zero, count the number of leading zeros. */
1989 if (high > mask / 2)
1990 high ^= mask;
1992 /* There are no sign bits below the top block, so we don't need to look
1993 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
1994 HIGH is 0. */
1995 return count + clz_hwi (high) - 1;
1998 /* Return the number of trailing (lower) zeros in X. */
2000 wi::ctz (const wide_int_ref &x)
2002 if (x.len == 1 && x.ulow () == 0)
2003 return x.precision;
2005 /* Having dealt with the zero case, there must be a block with a
2006 nonzero bit. We don't care about the bits above the first 1. */
2007 unsigned int i = 0;
2008 while (x.val[i] == 0)
2009 ++i;
2010 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2013 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2014 return -1. */
2016 wi::exact_log2 (const wide_int_ref &x)
2018 /* Reject cases where there are implicit -1 blocks above HIGH. */
2019 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2020 return -1;
2022 /* Set CRUX to the index of the entry that should be nonzero.
2023 If the top block is zero then the next lowest block (if any)
2024 must have the high bit set. */
2025 unsigned int crux = x.len - 1;
2026 if (crux > 0 && x.val[crux] == 0)
2027 crux -= 1;
2029 /* Check that all lower blocks are zero. */
2030 for (unsigned int i = 0; i < crux; ++i)
2031 if (x.val[i] != 0)
2032 return -1;
2034 /* Get a zero-extended form of block CRUX. */
2035 unsigned HOST_WIDE_INT hwi = x.val[crux];
2036 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2037 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2039 /* Now it's down to whether HWI is a power of 2. */
2040 int res = ::exact_log2 (hwi);
2041 if (res >= 0)
2042 res += crux * HOST_BITS_PER_WIDE_INT;
2043 return res;
2046 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2048 wi::floor_log2 (const wide_int_ref &x)
2050 return x.precision - 1 - clz (x);
2053 /* Return the index of the first (lowest) set bit in X, counting from 1.
2054 Return 0 if X is 0. */
2056 wi::ffs (const wide_int_ref &x)
2058 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2061 /* Return true if sign-extending X to have precision PRECISION would give
2062 the minimum signed value at that precision. */
2063 bool
2064 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2066 return ctz (x) + 1 == int (precision);
2069 /* Return true if X represents the minimum signed value. */
2070 bool
2071 wi::only_sign_bit_p (const wide_int_ref &x)
2073 return only_sign_bit_p (x, x.precision);
2077 * Private utilities.
2080 void gt_ggc_mx (widest_int *) { }
2081 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2082 void gt_pch_nx (widest_int *) { }
2084 template void wide_int::dump () const;
2085 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2086 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2087 template void offset_int::dump () const;
2088 template void widest_int::dump () const;