* lto-symtab.c (lto_varpool_replace_node): Merge TLS models.
[official-gcc.git] / gcc / wide-int.cc
blobfd7cbb4c3843514d4b81ab2d8d15cf66b6e3c89e
1 /* Operations with very long integers.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hwint.h"
26 #include "wide-int.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "inchash.h"
35 #include "tree.h"
36 #include "dumpfile.h"
39 #define HOST_BITS_PER_HALF_WIDE_INT 32
40 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
41 # define HOST_HALF_WIDE_INT long
42 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
43 # define HOST_HALF_WIDE_INT int
44 #else
45 #error Please add support for HOST_HALF_WIDE_INT
46 #endif
48 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
49 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
50 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
51 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
52 typedef unsigned HOST_WIDE_INT UWtype;
53 typedef unsigned int UQItype __attribute__ ((mode (QI)));
54 typedef unsigned int USItype __attribute__ ((mode (SI)));
55 typedef unsigned int UDItype __attribute__ ((mode (DI)));
56 #if W_TYPE_SIZE == 32
57 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
58 #else
59 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
60 #endif
61 #include "longlong.h"
62 #endif
64 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
67 * Internal utilities.
70 /* Quantities to deal with values that hold half of a wide int. Used
71 in multiply and divide. */
72 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
74 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
75 #define BLOCKS_NEEDED(PREC) \
76 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
77 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
79 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
80 based on the top existing bit of VAL. */
82 static unsigned HOST_WIDE_INT
83 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
85 return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
88 /* Convert the integer in VAL to canonical form, returning its new length.
89 LEN is the number of blocks currently in VAL and PRECISION is the number
90 of bits in the integer it represents.
92 This function only changes the representation, not the value. */
93 static unsigned int
94 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
96 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
97 HOST_WIDE_INT top;
98 int i;
100 if (len > blocks_needed)
101 len = blocks_needed;
103 if (len == 1)
104 return len;
106 top = val[len - 1];
107 if (len * HOST_BITS_PER_WIDE_INT > precision)
108 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
109 if (top != 0 && top != (HOST_WIDE_INT)-1)
110 return len;
112 /* At this point we know that the top is either 0 or -1. Find the
113 first block that is not a copy of this. */
114 for (i = len - 2; i >= 0; i--)
116 HOST_WIDE_INT x = val[i];
117 if (x != top)
119 if (SIGN_MASK (x) == top)
120 return i + 1;
122 /* We need an extra block because the top bit block i does
123 not match the extension. */
124 return i + 2;
128 /* The number is 0 or -1. */
129 return 1;
133 * Conversion routines in and out of wide_int.
136 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
137 result for an integer with precision PRECISION. Return the length
138 of VAL (after any canonization. */
139 unsigned int
140 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
141 unsigned int xlen, unsigned int precision, bool need_canon)
143 for (unsigned i = 0; i < xlen; i++)
144 val[i] = xval[i];
145 return need_canon ? canonize (val, xlen, precision) : xlen;
148 /* Construct a wide int from a buffer of length LEN. BUFFER will be
149 read according to byte endianess and word endianess of the target.
150 Only the lower BUFFER_LEN bytes of the result are set; the remaining
151 high bytes are cleared. */
152 wide_int
153 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
155 unsigned int precision = buffer_len * BITS_PER_UNIT;
156 wide_int result = wide_int::create (precision);
157 unsigned int words = buffer_len / UNITS_PER_WORD;
159 /* We have to clear all the bits ourself, as we merely or in values
160 below. */
161 unsigned int len = BLOCKS_NEEDED (precision);
162 HOST_WIDE_INT *val = result.write_val ();
163 for (unsigned int i = 0; i < len; ++i)
164 val[i] = 0;
166 for (unsigned int byte = 0; byte < buffer_len; byte++)
168 unsigned int offset;
169 unsigned int index;
170 unsigned int bitpos = byte * BITS_PER_UNIT;
171 unsigned HOST_WIDE_INT value;
173 if (buffer_len > UNITS_PER_WORD)
175 unsigned int word = byte / UNITS_PER_WORD;
177 if (WORDS_BIG_ENDIAN)
178 word = (words - 1) - word;
180 offset = word * UNITS_PER_WORD;
182 if (BYTES_BIG_ENDIAN)
183 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
184 else
185 offset += byte % UNITS_PER_WORD;
187 else
188 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
190 value = (unsigned HOST_WIDE_INT) buffer[offset];
192 index = bitpos / HOST_BITS_PER_WIDE_INT;
193 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
196 result.set_len (canonize (val, len, precision));
198 return result;
201 /* Sets RESULT from X, the sign is taken according to SGN. */
202 void
203 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
205 int len = x.get_len ();
206 const HOST_WIDE_INT *v = x.get_val ();
207 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
209 if (wi::neg_p (x, sgn))
211 /* We use ones complement to avoid -x80..0 edge case that -
212 won't work on. */
213 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
214 for (int i = 0; i < len; i++)
215 t[i] = ~v[i];
216 if (excess > 0)
217 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
218 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
219 mpz_com (result, result);
221 else if (excess > 0)
223 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
224 for (int i = 0; i < len - 1; i++)
225 t[i] = v[i];
226 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
227 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
229 else
230 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
233 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
234 values of VAL will be wrapped; otherwise, they will be set to the
235 appropriate minimum or maximum TYPE bound. */
236 wide_int
237 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
239 size_t count, numb;
240 int prec = TYPE_PRECISION (type);
241 wide_int res = wide_int::create (prec);
243 if (!wrap)
245 mpz_t min, max;
247 mpz_init (min);
248 mpz_init (max);
249 get_type_static_bounds (type, min, max);
251 if (mpz_cmp (x, min) < 0)
252 mpz_set (x, min);
253 else if (mpz_cmp (x, max) > 0)
254 mpz_set (x, max);
256 mpz_clear (min);
257 mpz_clear (max);
260 /* Determine the number of unsigned HOST_WIDE_INTs that are required
261 for representing the value. The code to calculate count is
262 extracted from the GMP manual, section "Integer Import and Export":
263 http://gmplib.org/manual/Integer-Import-and-Export.html */
264 numb = 8 * sizeof(HOST_WIDE_INT);
265 count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
266 HOST_WIDE_INT *val = res.write_val ();
267 mpz_export (val, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
268 if (count < 1)
270 val[0] = 0;
271 count = 1;
273 res.set_len (count);
275 if (mpz_sgn (x) < 0)
276 res = -res;
278 return res;
282 * Largest and smallest values in a mode.
285 /* Return the largest SGNed number that is representable in PRECISION bits.
287 TODO: There is still code from the double_int era that trys to
288 make up for the fact that double int's could not represent the
289 min and max values of all types. This code should be removed
290 because the min and max values can always be represented in
291 wide_ints and int-csts. */
292 wide_int
293 wi::max_value (unsigned int precision, signop sgn)
295 gcc_checking_assert (precision != 0);
296 if (sgn == UNSIGNED)
297 /* The unsigned max is just all ones. */
298 return shwi (-1, precision);
299 else
300 /* The signed max is all ones except the top bit. This must be
301 explicitly represented. */
302 return mask (precision - 1, false, precision);
305 /* Return the largest SGNed number that is representable in PRECISION bits. */
306 wide_int
307 wi::min_value (unsigned int precision, signop sgn)
309 gcc_checking_assert (precision != 0);
310 if (sgn == UNSIGNED)
311 return uhwi (0, precision);
312 else
313 /* The signed min is all zeros except the top bit. This must be
314 explicitly represented. */
315 return wi::set_bit_in_zero (precision - 1, precision);
319 * Public utilities.
322 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
323 signedness SGN, to an integer that has PRECISION bits. Store the blocks
324 in VAL and return the number of blocks used.
326 This function can handle both extension (PRECISION > XPRECISION)
327 and truncation (PRECISION < XPRECISION). */
328 unsigned int
329 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
330 unsigned int xlen, unsigned int xprecision,
331 unsigned int precision, signop sgn)
333 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
334 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
335 for (unsigned i = 0; i < len; i++)
336 val[i] = xval[i];
338 if (precision > xprecision)
340 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
342 /* Expanding. */
343 if (sgn == UNSIGNED)
345 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
346 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
347 else if (val[len - 1] < 0)
349 while (len < BLOCKS_NEEDED (xprecision))
350 val[len++] = -1;
351 if (small_xprecision)
352 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
353 else
354 val[len++] = 0;
357 else
359 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
360 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
363 len = canonize (val, len, precision);
365 return len;
368 /* This function hides the fact that we cannot rely on the bits beyond
369 the precision. This issue comes up in the relational comparisions
370 where we do allow comparisons of values of different precisions. */
371 static inline HOST_WIDE_INT
372 selt (const HOST_WIDE_INT *a, unsigned int len,
373 unsigned int blocks_needed, unsigned int small_prec,
374 unsigned int index, signop sgn)
376 HOST_WIDE_INT val;
377 if (index < len)
378 val = a[index];
379 else if (index < blocks_needed || sgn == SIGNED)
380 /* Signed or within the precision. */
381 val = SIGN_MASK (a[len - 1]);
382 else
383 /* Unsigned extension beyond the precision. */
384 val = 0;
386 if (small_prec && index == blocks_needed - 1)
387 return (sgn == SIGNED
388 ? sext_hwi (val, small_prec)
389 : zext_hwi (val, small_prec));
390 else
391 return val;
394 /* Find the highest bit represented in a wide int. This will in
395 general have the same value as the sign bit. */
396 static inline HOST_WIDE_INT
397 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
399 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
400 unsigned HOST_WIDE_INT val = a[len - 1];
401 if (excess > 0)
402 val <<= excess;
403 return val >> (HOST_BITS_PER_WIDE_INT - 1);
407 * Comparisons, note that only equality is an operator. The other
408 * comparisons cannot be operators since they are inherently signed or
409 * unsigned and C++ has no such operators.
412 /* Return true if OP0 == OP1. */
413 bool
414 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
415 const HOST_WIDE_INT *op1, unsigned int op1len,
416 unsigned int prec)
418 int l0 = op0len - 1;
419 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
421 if (op0len != op1len)
422 return false;
424 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
426 /* It does not matter if we zext or sext here, we just have to
427 do both the same way. */
428 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
429 return false;
430 l0--;
433 while (l0 >= 0)
434 if (op0[l0] != op1[l0])
435 return false;
436 else
437 l0--;
439 return true;
442 /* Return true if OP0 < OP1 using signed comparisons. */
443 bool
444 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
445 unsigned int precision,
446 const HOST_WIDE_INT *op1, unsigned int op1len)
448 HOST_WIDE_INT s0, s1;
449 unsigned HOST_WIDE_INT u0, u1;
450 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
451 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
452 int l = MAX (op0len - 1, op1len - 1);
454 /* Only the top block is compared as signed. The rest are unsigned
455 comparisons. */
456 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
457 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
458 if (s0 < s1)
459 return true;
460 if (s0 > s1)
461 return false;
463 l--;
464 while (l >= 0)
466 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
467 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
469 if (u0 < u1)
470 return true;
471 if (u0 > u1)
472 return false;
473 l--;
476 return false;
479 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
480 signed compares. */
482 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
483 unsigned int precision,
484 const HOST_WIDE_INT *op1, unsigned int op1len)
486 HOST_WIDE_INT s0, s1;
487 unsigned HOST_WIDE_INT u0, u1;
488 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
489 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
490 int l = MAX (op0len - 1, op1len - 1);
492 /* Only the top block is compared as signed. The rest are unsigned
493 comparisons. */
494 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
495 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
496 if (s0 < s1)
497 return -1;
498 if (s0 > s1)
499 return 1;
501 l--;
502 while (l >= 0)
504 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
505 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
507 if (u0 < u1)
508 return -1;
509 if (u0 > u1)
510 return 1;
511 l--;
514 return 0;
517 /* Return true if OP0 < OP1 using unsigned comparisons. */
518 bool
519 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
520 unsigned int precision,
521 const HOST_WIDE_INT *op1, unsigned int op1len)
523 unsigned HOST_WIDE_INT x0;
524 unsigned HOST_WIDE_INT x1;
525 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
526 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
527 int l = MAX (op0len - 1, op1len - 1);
529 while (l >= 0)
531 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
532 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
533 if (x0 < x1)
534 return true;
535 if (x0 > x1)
536 return false;
537 l--;
540 return false;
543 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
544 unsigned compares. */
546 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
547 unsigned int precision,
548 const HOST_WIDE_INT *op1, unsigned int op1len)
550 unsigned HOST_WIDE_INT x0;
551 unsigned HOST_WIDE_INT x1;
552 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
553 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
554 int l = MAX (op0len - 1, op1len - 1);
556 while (l >= 0)
558 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
559 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
560 if (x0 < x1)
561 return -1;
562 if (x0 > x1)
563 return 1;
564 l--;
567 return 0;
571 * Extension.
574 /* Sign-extend the number represented by XVAL and XLEN into VAL,
575 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
576 and VAL have PRECISION bits. */
577 unsigned int
578 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
579 unsigned int xlen, unsigned int precision, unsigned int offset)
581 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
582 /* Extending beyond the precision is a no-op. If we have only stored
583 OFFSET bits or fewer, the rest are already signs. */
584 if (offset >= precision || len >= xlen)
586 for (unsigned i = 0; i < xlen; ++i)
587 val[i] = xval[i];
588 return xlen;
590 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
591 for (unsigned int i = 0; i < len; i++)
592 val[i] = xval[i];
593 if (suboffset > 0)
595 val[len] = sext_hwi (xval[len], suboffset);
596 len += 1;
598 return canonize (val, len, precision);
601 /* Zero-extend the number represented by XVAL and XLEN into VAL,
602 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
603 and VAL have PRECISION bits. */
604 unsigned int
605 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
606 unsigned int xlen, unsigned int precision, unsigned int offset)
608 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
609 /* Extending beyond the precision is a no-op. If we have only stored
610 OFFSET bits or fewer, and the upper stored bit is zero, then there
611 is nothing to do. */
612 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
614 for (unsigned i = 0; i < xlen; ++i)
615 val[i] = xval[i];
616 return xlen;
618 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
619 for (unsigned int i = 0; i < len; i++)
620 val[i] = i < xlen ? xval[i] : -1;
621 if (suboffset > 0)
622 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
623 else
624 val[len] = 0;
625 return canonize (val, len + 1, precision);
629 * Masking, inserting, shifting, rotating.
632 /* Insert WIDTH bits from Y into X starting at START. */
633 wide_int
634 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
635 unsigned int width)
637 wide_int result;
638 wide_int mask;
639 wide_int tmp;
641 unsigned int precision = x.get_precision ();
642 if (start >= precision)
643 return x;
645 gcc_checking_assert (precision >= width);
647 if (start + width >= precision)
648 width = precision - start;
650 mask = wi::shifted_mask (start, width, false, precision);
651 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
652 result = tmp & mask;
654 tmp = wi::bit_and_not (x, mask);
655 result = result | tmp;
657 return result;
660 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
661 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
662 bits. */
663 unsigned int
664 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
665 unsigned int xlen, unsigned int precision, unsigned int bit)
667 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
668 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
670 if (block + 1 >= xlen)
672 /* The operation either affects the last current block or needs
673 a new block. */
674 unsigned int len = block + 1;
675 for (unsigned int i = 0; i < len; i++)
676 val[i] = safe_uhwi (xval, xlen, i);
677 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
679 /* If the bit we just set is at the msb of the block, make sure
680 that any higher bits are zeros. */
681 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
682 val[len++] = 0;
683 return len;
685 else
687 for (unsigned int i = 0; i < xlen; i++)
688 val[i] = xval[i];
689 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
690 return canonize (val, xlen, precision);
694 /* bswap THIS. */
695 wide_int
696 wide_int_storage::bswap () const
698 wide_int result = wide_int::create (precision);
699 unsigned int i, s;
700 unsigned int len = BLOCKS_NEEDED (precision);
701 unsigned int xlen = get_len ();
702 const HOST_WIDE_INT *xval = get_val ();
703 HOST_WIDE_INT *val = result.write_val ();
705 /* This is not a well defined operation if the precision is not a
706 multiple of 8. */
707 gcc_assert ((precision & 0x7) == 0);
709 for (i = 0; i < len; i++)
710 val[i] = 0;
712 /* Only swap the bytes that are not the padding. */
713 for (s = 0; s < precision; s += 8)
715 unsigned int d = precision - s - 8;
716 unsigned HOST_WIDE_INT byte;
718 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
719 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
721 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
723 block = d / HOST_BITS_PER_WIDE_INT;
724 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
726 val[block] |= byte << offset;
729 result.set_len (canonize (val, len, precision));
730 return result;
733 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
734 above that up to PREC are zeros. The result is inverted if NEGATE
735 is true. Return the number of blocks in VAL. */
736 unsigned int
737 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
738 unsigned int prec)
740 if (width >= prec)
742 val[0] = negate ? 0 : -1;
743 return 1;
745 else if (width == 0)
747 val[0] = negate ? -1 : 0;
748 return 1;
751 unsigned int i = 0;
752 while (i < width / HOST_BITS_PER_WIDE_INT)
753 val[i++] = negate ? 0 : -1;
755 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
756 if (shift != 0)
758 HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
759 val[i++] = negate ? ~last : last;
761 else
762 val[i++] = negate ? -1 : 0;
764 return i;
767 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
768 bits are ones, and the bits above that up to PREC are zeros. The result
769 is inverted if NEGATE is true. Return the number of blocks in VAL. */
770 unsigned int
771 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
772 bool negate, unsigned int prec)
774 if (start >= prec || width == 0)
776 val[0] = negate ? -1 : 0;
777 return 1;
780 if (width > prec - start)
781 width = prec - start;
782 unsigned int end = start + width;
784 unsigned int i = 0;
785 while (i < start / HOST_BITS_PER_WIDE_INT)
786 val[i++] = negate ? -1 : 0;
788 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
789 if (shift)
791 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
792 shift += width;
793 if (shift < HOST_BITS_PER_WIDE_INT)
795 /* case 000111000 */
796 block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
797 val[i++] = negate ? ~block : block;
798 return i;
800 else
801 /* ...111000 */
802 val[i++] = negate ? block : ~block;
805 while (i < end / HOST_BITS_PER_WIDE_INT)
806 /* 1111111 */
807 val[i++] = negate ? 0 : -1;
809 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
810 if (shift != 0)
812 /* 000011111 */
813 HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
814 val[i++] = negate ? ~block : block;
816 else if (end < prec)
817 val[i++] = negate ? -1 : 0;
819 return i;
823 * logical operations.
826 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
827 unsigned int
828 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
829 unsigned int op0len, const HOST_WIDE_INT *op1,
830 unsigned int op1len, unsigned int prec)
832 int l0 = op0len - 1;
833 int l1 = op1len - 1;
834 bool need_canon = true;
836 unsigned int len = MAX (op0len, op1len);
837 if (l0 > l1)
839 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
840 if (op1mask == 0)
842 l0 = l1;
843 len = l1 + 1;
845 else
847 need_canon = false;
848 while (l0 > l1)
850 val[l0] = op0[l0];
851 l0--;
855 else if (l1 > l0)
857 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
858 if (op0mask == 0)
859 len = l0 + 1;
860 else
862 need_canon = false;
863 while (l1 > l0)
865 val[l1] = op1[l1];
866 l1--;
871 while (l0 >= 0)
873 val[l0] = op0[l0] & op1[l0];
874 l0--;
877 if (need_canon)
878 len = canonize (val, len, prec);
880 return len;
883 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
884 unsigned int
885 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
886 unsigned int op0len, const HOST_WIDE_INT *op1,
887 unsigned int op1len, unsigned int prec)
889 wide_int result;
890 int l0 = op0len - 1;
891 int l1 = op1len - 1;
892 bool need_canon = true;
894 unsigned int len = MAX (op0len, op1len);
895 if (l0 > l1)
897 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
898 if (op1mask != 0)
900 l0 = l1;
901 len = l1 + 1;
903 else
905 need_canon = false;
906 while (l0 > l1)
908 val[l0] = op0[l0];
909 l0--;
913 else if (l1 > l0)
915 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
916 if (op0mask == 0)
917 len = l0 + 1;
918 else
920 need_canon = false;
921 while (l1 > l0)
923 val[l1] = ~op1[l1];
924 l1--;
929 while (l0 >= 0)
931 val[l0] = op0[l0] & ~op1[l0];
932 l0--;
935 if (need_canon)
936 len = canonize (val, len, prec);
938 return len;
941 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
942 unsigned int
943 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
944 unsigned int op0len, const HOST_WIDE_INT *op1,
945 unsigned int op1len, unsigned int prec)
947 wide_int result;
948 int l0 = op0len - 1;
949 int l1 = op1len - 1;
950 bool need_canon = true;
952 unsigned int len = MAX (op0len, op1len);
953 if (l0 > l1)
955 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
956 if (op1mask != 0)
958 l0 = l1;
959 len = l1 + 1;
961 else
963 need_canon = false;
964 while (l0 > l1)
966 val[l0] = op0[l0];
967 l0--;
971 else if (l1 > l0)
973 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
974 if (op0mask != 0)
975 len = l0 + 1;
976 else
978 need_canon = false;
979 while (l1 > l0)
981 val[l1] = op1[l1];
982 l1--;
987 while (l0 >= 0)
989 val[l0] = op0[l0] | op1[l0];
990 l0--;
993 if (need_canon)
994 len = canonize (val, len, prec);
996 return len;
999 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1000 unsigned int
1001 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1002 unsigned int op0len, const HOST_WIDE_INT *op1,
1003 unsigned int op1len, unsigned int prec)
1005 wide_int result;
1006 int l0 = op0len - 1;
1007 int l1 = op1len - 1;
1008 bool need_canon = true;
1010 unsigned int len = MAX (op0len, op1len);
1011 if (l0 > l1)
1013 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1014 if (op1mask == 0)
1016 l0 = l1;
1017 len = l1 + 1;
1019 else
1021 need_canon = false;
1022 while (l0 > l1)
1024 val[l0] = op0[l0];
1025 l0--;
1029 else if (l1 > l0)
1031 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1032 if (op0mask != 0)
1033 len = l0 + 1;
1034 else
1036 need_canon = false;
1037 while (l1 > l0)
1039 val[l1] = ~op1[l1];
1040 l1--;
1045 while (l0 >= 0)
1047 val[l0] = op0[l0] | ~op1[l0];
1048 l0--;
1051 if (need_canon)
1052 len = canonize (val, len, prec);
1054 return len;
1057 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1058 unsigned int
1059 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1060 unsigned int op0len, const HOST_WIDE_INT *op1,
1061 unsigned int op1len, unsigned int prec)
1063 wide_int result;
1064 int l0 = op0len - 1;
1065 int l1 = op1len - 1;
1067 unsigned int len = MAX (op0len, op1len);
1068 if (l0 > l1)
1070 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1071 while (l0 > l1)
1073 val[l0] = op0[l0] ^ op1mask;
1074 l0--;
1078 if (l1 > l0)
1080 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1081 while (l1 > l0)
1083 val[l1] = op0mask ^ op1[l1];
1084 l1--;
1088 while (l0 >= 0)
1090 val[l0] = op0[l0] ^ op1[l0];
1091 l0--;
1094 return canonize (val, len, prec);
1098 * math
1101 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1102 whether the result overflows when OP0 and OP1 are treated as having
1103 signedness SGN. Return the number of blocks in VAL. */
1104 unsigned int
1105 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1106 unsigned int op0len, const HOST_WIDE_INT *op1,
1107 unsigned int op1len, unsigned int prec,
1108 signop sgn, bool *overflow)
1110 unsigned HOST_WIDE_INT o0 = 0;
1111 unsigned HOST_WIDE_INT o1 = 0;
1112 unsigned HOST_WIDE_INT x = 0;
1113 unsigned HOST_WIDE_INT carry = 0;
1114 unsigned HOST_WIDE_INT old_carry = 0;
1115 unsigned HOST_WIDE_INT mask0, mask1;
1116 unsigned int i;
1118 unsigned int len = MAX (op0len, op1len);
1119 mask0 = -top_bit_of (op0, op0len, prec);
1120 mask1 = -top_bit_of (op1, op1len, prec);
1121 /* Add all of the explicitly defined elements. */
1123 for (i = 0; i < len; i++)
1125 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1126 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1127 x = o0 + o1 + carry;
1128 val[i] = x;
1129 old_carry = carry;
1130 carry = carry == 0 ? x < o0 : x <= o0;
1133 if (len * HOST_BITS_PER_WIDE_INT < prec)
1135 val[len] = mask0 + mask1 + carry;
1136 len++;
1137 if (overflow)
1138 *overflow = false;
1140 else if (overflow)
1142 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1143 if (sgn == SIGNED)
1145 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1146 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1148 else
1150 /* Put the MSB of X and O0 and in the top of the HWI. */
1151 x <<= shift;
1152 o0 <<= shift;
1153 if (old_carry)
1154 *overflow = (x <= o0);
1155 else
1156 *overflow = (x < o0);
1160 return canonize (val, len, prec);
1163 /* Subroutines of the multiplication and division operations. Unpack
1164 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1165 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1166 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1167 static void
1168 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1169 unsigned int in_len, unsigned int out_len,
1170 unsigned int prec, signop sgn)
1172 unsigned int i;
1173 unsigned int j = 0;
1174 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1175 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1176 HOST_WIDE_INT mask;
1178 if (sgn == SIGNED)
1180 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1181 mask &= HALF_INT_MASK;
1183 else
1184 mask = 0;
1186 for (i = 0; i < blocks_needed - 1; i++)
1188 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1189 result[j++] = x;
1190 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1193 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1194 if (small_prec)
1196 if (sgn == SIGNED)
1197 x = sext_hwi (x, small_prec);
1198 else
1199 x = zext_hwi (x, small_prec);
1201 result[j++] = x;
1202 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1204 /* Smear the sign bit. */
1205 while (j < out_len)
1206 result[j++] = mask;
1209 /* The inverse of wi_unpack. IN_LEN is the the number of input
1210 blocks. The number of output blocks will be half this amount. */
1211 static void
1212 wi_pack (unsigned HOST_WIDE_INT *result,
1213 const unsigned HOST_HALF_WIDE_INT *input,
1214 unsigned int in_len)
1216 unsigned int i = 0;
1217 unsigned int j = 0;
1219 while (i + 2 < in_len)
1221 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1222 | ((unsigned HOST_WIDE_INT)input[i + 1]
1223 << HOST_BITS_PER_HALF_WIDE_INT);
1224 i += 2;
1227 /* Handle the case where in_len is odd. For this we zero extend. */
1228 if (in_len & 1)
1229 result[j++] = (unsigned HOST_WIDE_INT)input[i];
1230 else
1231 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1232 | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT);
1235 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1236 result is returned.
1238 If HIGH is not set, throw away the upper half after the check is
1239 made to see if it overflows. Unfortunately there is no better way
1240 to check for overflow than to do this. If OVERFLOW is nonnull,
1241 record in *OVERFLOW whether the result overflowed. SGN controls
1242 the signedness and is used to check overflow or if HIGH is set. */
1243 unsigned int
1244 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1245 unsigned int op1len, const HOST_WIDE_INT *op2val,
1246 unsigned int op2len, unsigned int prec, signop sgn,
1247 bool *overflow, bool high)
1249 unsigned HOST_WIDE_INT o0, o1, k, t;
1250 unsigned int i;
1251 unsigned int j;
1252 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1253 unsigned int half_blocks_needed = blocks_needed * 2;
1254 /* The sizes here are scaled to support a 2x largest mode by 2x
1255 largest mode yielding a 4x largest mode result. This is what is
1256 needed by vpn. */
1258 unsigned HOST_HALF_WIDE_INT
1259 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1260 unsigned HOST_HALF_WIDE_INT
1261 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1262 /* The '2' in 'R' is because we are internally doing a full
1263 multiply. */
1264 unsigned HOST_HALF_WIDE_INT
1265 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1266 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1268 /* If the top level routine did not really pass in an overflow, then
1269 just make sure that we never attempt to set it. */
1270 bool needs_overflow = (overflow != 0);
1271 if (needs_overflow)
1272 *overflow = false;
1274 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1275 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1277 /* This is a surprisingly common case, so do it first. */
1278 if (op1 == 0 || op2 == 0)
1280 val[0] = 0;
1281 return 1;
1284 #ifdef umul_ppmm
1285 if (sgn == UNSIGNED)
1287 /* If the inputs are single HWIs and the output has room for at
1288 least two HWIs, we can use umul_ppmm directly. */
1289 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1290 && wi::fits_uhwi_p (op1)
1291 && wi::fits_uhwi_p (op2))
1293 /* This case never overflows. */
1294 if (high)
1296 val[0] = 0;
1297 return 1;
1299 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1300 return 1 + (val[1] != 0 || val[0] < 0);
1302 /* Likewise if the output is a full single HWI, except that the
1303 upper HWI of the result is only used for determining overflow.
1304 (We handle this case inline when overflow isn't needed.) */
1305 else if (prec == HOST_BITS_PER_WIDE_INT)
1307 unsigned HOST_WIDE_INT upper;
1308 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1309 if (needs_overflow)
1310 *overflow = (upper != 0);
1311 if (high)
1312 val[0] = upper;
1313 return 1;
1316 #endif
1318 /* Handle multiplications by 1. */
1319 if (op1 == 1)
1321 if (high)
1323 val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1324 return 1;
1326 for (i = 0; i < op2len; i++)
1327 val[i] = op2val[i];
1328 return op2len;
1330 if (op2 == 1)
1332 if (high)
1334 val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1335 return 1;
1337 for (i = 0; i < op1len; i++)
1338 val[i] = op1val[i];
1339 return op1len;
1342 /* If we need to check for overflow, we can only do half wide
1343 multiplies quickly because we need to look at the top bits to
1344 check for the overflow. */
1345 if ((high || needs_overflow)
1346 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1348 unsigned HOST_WIDE_INT r;
1350 if (sgn == SIGNED)
1352 o0 = op1.to_shwi ();
1353 o1 = op2.to_shwi ();
1355 else
1357 o0 = op1.to_uhwi ();
1358 o1 = op2.to_uhwi ();
1361 r = o0 * o1;
1362 if (needs_overflow)
1364 if (sgn == SIGNED)
1366 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1367 *overflow = true;
1369 else
1371 if ((r >> prec) != 0)
1372 *overflow = true;
1375 val[0] = high ? r >> prec : r;
1376 return 1;
1379 /* We do unsigned mul and then correct it. */
1380 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1381 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1383 /* The 2 is for a full mult. */
1384 memset (r, 0, half_blocks_needed * 2
1385 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1387 for (j = 0; j < half_blocks_needed; j++)
1389 k = 0;
1390 for (i = 0; i < half_blocks_needed; i++)
1392 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1393 + r[i + j] + k);
1394 r[i + j] = t & HALF_INT_MASK;
1395 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1397 r[j + half_blocks_needed] = k;
1400 /* We did unsigned math above. For signed we must adjust the
1401 product (assuming we need to see that). */
1402 if (sgn == SIGNED && (high || needs_overflow))
1404 unsigned HOST_WIDE_INT b;
1405 if (wi::neg_p (op1))
1407 b = 0;
1408 for (i = 0; i < half_blocks_needed; i++)
1410 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1411 - (unsigned HOST_WIDE_INT)v[i] - b;
1412 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1413 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1416 if (wi::neg_p (op2))
1418 b = 0;
1419 for (i = 0; i < half_blocks_needed; i++)
1421 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1422 - (unsigned HOST_WIDE_INT)u[i] - b;
1423 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1424 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1429 if (needs_overflow)
1431 HOST_WIDE_INT top;
1433 /* For unsigned, overflow is true if any of the top bits are set.
1434 For signed, overflow is true if any of the top bits are not equal
1435 to the sign bit. */
1436 if (sgn == UNSIGNED)
1437 top = 0;
1438 else
1440 top = r[(half_blocks_needed) - 1];
1441 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1442 top &= mask;
1445 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1446 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1447 *overflow = true;
1450 if (high)
1452 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1453 wi_pack ((unsigned HOST_WIDE_INT *) val,
1454 &r[half_blocks_needed], half_blocks_needed);
1455 return canonize (val, blocks_needed, prec);
1457 else
1459 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1460 wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed);
1461 return canonize (val, blocks_needed, prec);
1465 /* Compute the population count of X. */
1467 wi::popcount (const wide_int_ref &x)
1469 unsigned int i;
1470 int count;
1472 /* The high order block is special if it is the last block and the
1473 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1474 have to clear out any ones above the precision before doing
1475 popcount on this block. */
1476 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1477 unsigned int stop = x.len;
1478 if (count < 0)
1480 count = popcount_hwi (x.uhigh () << -count);
1481 stop -= 1;
1483 else
1485 if (x.sign_mask () >= 0)
1486 count = 0;
1489 for (i = 0; i < stop; ++i)
1490 count += popcount_hwi (x.val[i]);
1492 return count;
1495 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1496 whether the result overflows when OP0 and OP1 are treated as having
1497 signedness SGN. Return the number of blocks in VAL. */
1498 unsigned int
1499 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1500 unsigned int op0len, const HOST_WIDE_INT *op1,
1501 unsigned int op1len, unsigned int prec,
1502 signop sgn, bool *overflow)
1504 unsigned HOST_WIDE_INT o0 = 0;
1505 unsigned HOST_WIDE_INT o1 = 0;
1506 unsigned HOST_WIDE_INT x = 0;
1507 /* We implement subtraction as an in place negate and add. Negation
1508 is just inversion and add 1, so we can do the add of 1 by just
1509 starting the borrow in of the first element at 1. */
1510 unsigned HOST_WIDE_INT borrow = 0;
1511 unsigned HOST_WIDE_INT old_borrow = 0;
1513 unsigned HOST_WIDE_INT mask0, mask1;
1514 unsigned int i;
1516 unsigned int len = MAX (op0len, op1len);
1517 mask0 = -top_bit_of (op0, op0len, prec);
1518 mask1 = -top_bit_of (op1, op1len, prec);
1520 /* Subtract all of the explicitly defined elements. */
1521 for (i = 0; i < len; i++)
1523 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1524 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1525 x = o0 - o1 - borrow;
1526 val[i] = x;
1527 old_borrow = borrow;
1528 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1531 if (len * HOST_BITS_PER_WIDE_INT < prec)
1533 val[len] = mask0 - mask1 - borrow;
1534 len++;
1535 if (overflow)
1536 *overflow = false;
1538 else if (overflow)
1540 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1541 if (sgn == SIGNED)
1543 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1544 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1546 else
1548 /* Put the MSB of X and O0 and in the top of the HWI. */
1549 x <<= shift;
1550 o0 <<= shift;
1551 if (old_borrow)
1552 *overflow = (x >= o0);
1553 else
1554 *overflow = (x > o0);
1558 return canonize (val, len, prec);
1563 * Division and Mod
1566 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1567 algorithm is a small modification of the algorithm in Hacker's
1568 Delight by Warren, which itself is a small modification of Knuth's
1569 algorithm. M is the number of significant elements of U however
1570 there needs to be at least one extra element of B_DIVIDEND
1571 allocated, N is the number of elements of B_DIVISOR. */
1572 static void
1573 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1574 unsigned HOST_HALF_WIDE_INT *b_remainder,
1575 unsigned HOST_HALF_WIDE_INT *b_dividend,
1576 unsigned HOST_HALF_WIDE_INT *b_divisor,
1577 int m, int n)
1579 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1580 HOST_WIDE_INT and stored in the lower bits of each word. This
1581 algorithm should work properly on both 32 and 64 bit
1582 machines. */
1583 unsigned HOST_WIDE_INT b
1584 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1585 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1586 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1587 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1588 HOST_WIDE_INT t, k;
1589 int i, j, s;
1591 /* Single digit divisor. */
1592 if (n == 1)
1594 k = 0;
1595 for (j = m - 1; j >= 0; j--)
1597 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1598 k = ((k * b + b_dividend[j])
1599 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1600 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1602 b_remainder[0] = k;
1603 return;
1606 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1608 if (s)
1610 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1611 algorithm, we can overwrite b_dividend and b_divisor, so we do
1612 that. */
1613 for (i = n - 1; i > 0; i--)
1614 b_divisor[i] = (b_divisor[i] << s)
1615 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1616 b_divisor[0] = b_divisor[0] << s;
1618 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1619 for (i = m - 1; i > 0; i--)
1620 b_dividend[i] = (b_dividend[i] << s)
1621 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1622 b_dividend[0] = b_dividend[0] << s;
1625 /* Main loop. */
1626 for (j = m - n; j >= 0; j--)
1628 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1629 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1630 again:
1631 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1633 qhat -= 1;
1634 rhat += b_divisor[n-1];
1635 if (rhat < b)
1636 goto again;
1639 /* Multiply and subtract. */
1640 k = 0;
1641 for (i = 0; i < n; i++)
1643 p = qhat * b_divisor[i];
1644 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1645 b_dividend[i + j] = t;
1646 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1647 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1649 t = b_dividend[j+n] - k;
1650 b_dividend[j+n] = t;
1652 b_quotient[j] = qhat;
1653 if (t < 0)
1655 b_quotient[j] -= 1;
1656 k = 0;
1657 for (i = 0; i < n; i++)
1659 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1660 b_dividend[i+j] = t;
1661 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1663 b_dividend[j+n] += k;
1666 if (s)
1667 for (i = 0; i < n; i++)
1668 b_remainder[i] = (b_dividend[i] >> s)
1669 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1670 else
1671 for (i = 0; i < n; i++)
1672 b_remainder[i] = b_dividend[i];
1676 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1677 the result. If QUOTIENT is nonnull, store the value of the quotient
1678 there and return the number of blocks in it. The return value is
1679 not defined otherwise. If REMAINDER is nonnull, store the value
1680 of the remainder there and store the number of blocks in
1681 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1682 the division overflowed. */
1683 unsigned int
1684 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1685 HOST_WIDE_INT *remainder,
1686 const HOST_WIDE_INT *dividend_val,
1687 unsigned int dividend_len, unsigned int dividend_prec,
1688 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1689 unsigned int divisor_prec, signop sgn,
1690 bool *oflow)
1692 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1693 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1694 unsigned HOST_HALF_WIDE_INT
1695 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1696 unsigned HOST_HALF_WIDE_INT
1697 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1698 unsigned HOST_HALF_WIDE_INT
1699 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1700 unsigned HOST_HALF_WIDE_INT
1701 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1702 unsigned int m, n;
1703 bool dividend_neg = false;
1704 bool divisor_neg = false;
1705 bool overflow = false;
1706 wide_int neg_dividend, neg_divisor;
1708 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1709 dividend_prec);
1710 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1711 divisor_prec);
1712 if (divisor == 0)
1713 overflow = true;
1715 /* The smallest signed number / -1 causes overflow. The dividend_len
1716 check is for speed rather than correctness. */
1717 if (sgn == SIGNED
1718 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1719 && divisor == -1
1720 && wi::only_sign_bit_p (dividend))
1721 overflow = true;
1723 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1724 (signed min / -1) has the same representation as the orignal dividend.
1725 We have traditionally made division by zero act as division by one,
1726 so there too we use the original dividend. */
1727 if (overflow)
1729 if (remainder)
1731 *remainder_len = 1;
1732 remainder[0] = 0;
1734 if (oflow != 0)
1735 *oflow = true;
1736 if (quotient)
1737 for (unsigned int i = 0; i < dividend_len; ++i)
1738 quotient[i] = dividend_val[i];
1739 return dividend_len;
1742 if (oflow)
1743 *oflow = false;
1745 /* Do it on the host if you can. */
1746 if (sgn == SIGNED
1747 && wi::fits_shwi_p (dividend)
1748 && wi::fits_shwi_p (divisor))
1750 HOST_WIDE_INT o0 = dividend.to_shwi ();
1751 HOST_WIDE_INT o1 = divisor.to_shwi ();
1753 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1755 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1756 if (quotient)
1758 quotient[0] = HOST_WIDE_INT_MIN;
1759 quotient[1] = 0;
1761 if (remainder)
1763 remainder[0] = 0;
1764 *remainder_len = 1;
1766 return 2;
1768 else
1770 if (quotient)
1771 quotient[0] = o0 / o1;
1772 if (remainder)
1774 remainder[0] = o0 % o1;
1775 *remainder_len = 1;
1777 return 1;
1781 if (sgn == UNSIGNED
1782 && wi::fits_uhwi_p (dividend)
1783 && wi::fits_uhwi_p (divisor))
1785 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1786 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1788 if (quotient)
1789 quotient[0] = o0 / o1;
1790 if (remainder)
1792 remainder[0] = o0 % o1;
1793 *remainder_len = 1;
1795 return 1;
1798 /* Make the divisor and dividend positive and remember what we
1799 did. */
1800 if (sgn == SIGNED)
1802 if (wi::neg_p (dividend))
1804 neg_dividend = -dividend;
1805 dividend = neg_dividend;
1806 dividend_neg = true;
1808 if (wi::neg_p (divisor))
1810 neg_divisor = -divisor;
1811 divisor = neg_divisor;
1812 divisor_neg = true;
1816 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1817 dividend_blocks_needed, dividend_prec, sgn);
1818 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1819 divisor_blocks_needed, divisor_prec, sgn);
1821 m = dividend_blocks_needed;
1822 b_dividend[m] = 0;
1823 while (m > 1 && b_dividend[m - 1] == 0)
1824 m--;
1826 n = divisor_blocks_needed;
1827 while (n > 1 && b_divisor[n - 1] == 0)
1828 n--;
1830 memset (b_quotient, 0, sizeof (b_quotient));
1832 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1834 unsigned int quotient_len = 0;
1835 if (quotient)
1837 wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m);
1838 quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec);
1839 /* The quotient is neg if exactly one of the divisor or dividend is
1840 neg. */
1841 if (dividend_neg != divisor_neg)
1842 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1843 quotient_len, dividend_prec,
1844 UNSIGNED, 0);
1847 if (remainder)
1849 wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n);
1850 *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec);
1851 /* The remainder is always the same sign as the dividend. */
1852 if (dividend_neg)
1853 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1854 *remainder_len, dividend_prec,
1855 UNSIGNED, 0);
1858 return quotient_len;
1862 * Shifting, rotating and extraction.
1865 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1866 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1867 unsigned int
1868 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1869 unsigned int xlen, unsigned int precision,
1870 unsigned int shift)
1872 /* Split the shift into a whole-block shift and a subblock shift. */
1873 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1874 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1876 /* The whole-block shift fills with zeros. */
1877 unsigned int len = BLOCKS_NEEDED (precision);
1878 for (unsigned int i = 0; i < skip; ++i)
1879 val[i] = 0;
1881 /* It's easier to handle the simple block case specially. */
1882 if (small_shift == 0)
1883 for (unsigned int i = skip; i < len; ++i)
1884 val[i] = safe_uhwi (xval, xlen, i - skip);
1885 else
1887 /* The first unfilled output block is a left shift of the first
1888 block in XVAL. The other output blocks contain bits from two
1889 consecutive input blocks. */
1890 unsigned HOST_WIDE_INT carry = 0;
1891 for (unsigned int i = skip; i < len; ++i)
1893 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1894 val[i] = (x << small_shift) | carry;
1895 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1898 return canonize (val, len, precision);
1901 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1902 number of blocks in VAL. The input has XPRECISION bits and the
1903 output has XPRECISION - SHIFT bits. */
1904 static unsigned int
1905 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1906 unsigned int xlen, unsigned int xprecision,
1907 unsigned int shift)
1909 /* Split the shift into a whole-block shift and a subblock shift. */
1910 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1911 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1913 /* Work out how many blocks are needed to store the significant bits
1914 (excluding the upper zeros or signs). */
1915 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1917 /* It's easier to handle the simple block case specially. */
1918 if (small_shift == 0)
1919 for (unsigned int i = 0; i < len; ++i)
1920 val[i] = safe_uhwi (xval, xlen, i + skip);
1921 else
1923 /* Each output block but the last is a combination of two input blocks.
1924 The last block is a right shift of the last block in XVAL. */
1925 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1926 for (unsigned int i = 0; i < len; ++i)
1928 val[i] = curr >> small_shift;
1929 curr = safe_uhwi (xval, xlen, i + skip + 1);
1930 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1933 return len;
1936 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1937 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1938 VAL has PRECISION bits. */
1939 unsigned int
1940 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1941 unsigned int xlen, unsigned int xprecision,
1942 unsigned int precision, unsigned int shift)
1944 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1946 /* The value we just created has precision XPRECISION - SHIFT.
1947 Zero-extend it to wider precisions. */
1948 if (precision > xprecision - shift)
1950 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1951 if (small_prec)
1952 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1953 else if (val[len - 1] < 0)
1955 /* Add a new block with a zero. */
1956 val[len++] = 0;
1957 return len;
1960 return canonize (val, len, precision);
1963 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1964 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1965 VAL has PRECISION bits. */
1966 unsigned int
1967 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1968 unsigned int xlen, unsigned int xprecision,
1969 unsigned int precision, unsigned int shift)
1971 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1973 /* The value we just created has precision XPRECISION - SHIFT.
1974 Sign-extend it to wider types. */
1975 if (precision > xprecision - shift)
1977 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1978 if (small_prec)
1979 val[len - 1] = sext_hwi (val[len - 1], small_prec);
1981 return canonize (val, len, precision);
1984 /* Return the number of leading (upper) zeros in X. */
1986 wi::clz (const wide_int_ref &x)
1988 /* Calculate how many bits there above the highest represented block. */
1989 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1991 unsigned HOST_WIDE_INT high = x.uhigh ();
1992 if (count < 0)
1993 /* The upper -COUNT bits of HIGH are not part of the value.
1994 Clear them out. */
1995 high = (high << -count) >> -count;
1996 else if (x.sign_mask () < 0)
1997 /* The upper bit is set, so there are no leading zeros. */
1998 return 0;
2000 /* We don't need to look below HIGH. Either HIGH is nonzero,
2001 or the top bit of the block below is nonzero; clz_hwi is
2002 HOST_BITS_PER_WIDE_INT in the latter case. */
2003 return count + clz_hwi (high);
2006 /* Return the number of redundant sign bits in X. (That is, the number
2007 of bits immediately below the sign bit that have the same value as
2008 the sign bit.) */
2010 wi::clrsb (const wide_int_ref &x)
2012 /* Calculate how many bits there above the highest represented block. */
2013 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2015 unsigned HOST_WIDE_INT high = x.uhigh ();
2016 unsigned HOST_WIDE_INT mask = -1;
2017 if (count < 0)
2019 /* The upper -COUNT bits of HIGH are not part of the value.
2020 Clear them from both MASK and HIGH. */
2021 mask >>= -count;
2022 high &= mask;
2025 /* If the top bit is 1, count the number of leading 1s. If the top
2026 bit is zero, count the number of leading zeros. */
2027 if (high > mask / 2)
2028 high ^= mask;
2030 /* There are no sign bits below the top block, so we don't need to look
2031 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2032 HIGH is 0. */
2033 return count + clz_hwi (high) - 1;
2036 /* Return the number of trailing (lower) zeros in X. */
2038 wi::ctz (const wide_int_ref &x)
2040 if (x.len == 1 && x.ulow () == 0)
2041 return x.precision;
2043 /* Having dealt with the zero case, there must be a block with a
2044 nonzero bit. We don't care about the bits above the first 1. */
2045 unsigned int i = 0;
2046 while (x.val[i] == 0)
2047 ++i;
2048 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2051 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2052 return -1. */
2054 wi::exact_log2 (const wide_int_ref &x)
2056 /* Reject cases where there are implicit -1 blocks above HIGH. */
2057 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2058 return -1;
2060 /* Set CRUX to the index of the entry that should be nonzero.
2061 If the top block is zero then the next lowest block (if any)
2062 must have the high bit set. */
2063 unsigned int crux = x.len - 1;
2064 if (crux > 0 && x.val[crux] == 0)
2065 crux -= 1;
2067 /* Check that all lower blocks are zero. */
2068 for (unsigned int i = 0; i < crux; ++i)
2069 if (x.val[i] != 0)
2070 return -1;
2072 /* Get a zero-extended form of block CRUX. */
2073 unsigned HOST_WIDE_INT hwi = x.val[crux];
2074 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2075 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2077 /* Now it's down to whether HWI is a power of 2. */
2078 int res = ::exact_log2 (hwi);
2079 if (res >= 0)
2080 res += crux * HOST_BITS_PER_WIDE_INT;
2081 return res;
2084 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2086 wi::floor_log2 (const wide_int_ref &x)
2088 return x.precision - 1 - clz (x);
2091 /* Return the index of the first (lowest) set bit in X, counting from 1.
2092 Return 0 if X is 0. */
2094 wi::ffs (const wide_int_ref &x)
2096 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2099 /* Return true if sign-extending X to have precision PRECISION would give
2100 the minimum signed value at that precision. */
2101 bool
2102 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2104 return ctz (x) + 1 == int (precision);
2107 /* Return true if X represents the minimum signed value. */
2108 bool
2109 wi::only_sign_bit_p (const wide_int_ref &x)
2111 return only_sign_bit_p (x, x.precision);
2115 * Private utilities.
2118 void gt_ggc_mx (widest_int *) { }
2119 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2120 void gt_pch_nx (widest_int *) { }
2122 template void wide_int::dump () const;
2123 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2124 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2125 template void offset_int::dump () const;
2126 template void widest_int::dump () const;