hppa: Export main in pr104869.C on hpux
[official-gcc.git] / gcc / wide-int.cc
blob542676679813415e98e1cae7dbad4030ad8307af
1 /* Operations with very long integers.
2 Copyright (C) 2012-2023 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "selftest.h"
29 #define HOST_BITS_PER_HALF_WIDE_INT 32
30 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
31 # define HOST_HALF_WIDE_INT long
32 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
33 # define HOST_HALF_WIDE_INT int
34 #else
35 #error Please add support for HOST_HALF_WIDE_INT
36 #endif
38 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
39 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
40 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
41 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
42 typedef unsigned HOST_WIDE_INT UWtype;
43 typedef unsigned int UQItype __attribute__ ((mode (QI)));
44 typedef unsigned int USItype __attribute__ ((mode (SI)));
45 typedef unsigned int UDItype __attribute__ ((mode (DI)));
46 #if W_TYPE_SIZE == 32
47 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
48 #else
49 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
50 #endif
51 #include "longlong.h"
52 #endif
54 static const HOST_WIDE_INT zeros[1] = {};
57 * Internal utilities.
60 /* Quantities to deal with values that hold half of a wide int. Used
61 in multiply and divide. */
62 #define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
64 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
65 #define BLOCKS_NEEDED(PREC) (PREC ? CEIL (PREC, HOST_BITS_PER_WIDE_INT) : 1)
66 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
68 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
69 based on the top existing bit of VAL. */
71 static unsigned HOST_WIDE_INT
72 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
74 return i < len ? val[i] : val[len - 1] < 0 ? HOST_WIDE_INT_M1 : 0;
77 /* Convert the integer in VAL to canonical form, returning its new length.
78 LEN is the number of blocks currently in VAL and PRECISION is the number
79 of bits in the integer it represents.
81 This function only changes the representation, not the value. */
82 static unsigned int
83 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
85 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
86 HOST_WIDE_INT top;
87 int i;
89 if (len > blocks_needed)
90 len = blocks_needed;
92 if (len == 1)
93 return len;
95 top = val[len - 1];
96 if (len * HOST_BITS_PER_WIDE_INT > precision)
97 val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
98 if (top != 0 && top != HOST_WIDE_INT_M1)
99 return len;
101 /* At this point we know that the top is either 0 or -1. Find the
102 first block that is not a copy of this. */
103 for (i = len - 2; i >= 0; i--)
105 HOST_WIDE_INT x = val[i];
106 if (x != top)
108 if (SIGN_MASK (x) == top)
109 return i + 1;
111 /* We need an extra block because the top bit block i does
112 not match the extension. */
113 return i + 2;
117 /* The number is 0 or -1. */
118 return 1;
121 /* VAL[0] is the unsigned result of an operation. Canonize it by adding
122 another 0 block if needed, and return number of blocks needed. */
124 static inline unsigned int
125 canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision)
127 if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT)
129 val[1] = 0;
130 return 2;
132 return 1;
136 * Conversion routines in and out of wide_int.
139 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
140 result for an integer with precision PRECISION. Return the length
141 of VAL (after any canonization). */
142 unsigned int
143 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
144 unsigned int xlen, unsigned int precision, bool need_canon)
146 for (unsigned i = 0; i < xlen; i++)
147 val[i] = xval[i];
148 return need_canon ? canonize (val, xlen, precision) : xlen;
151 /* Construct a wide int from a buffer of length LEN. BUFFER will be
152 read according to byte endianness and word endianness of the target.
153 Only the lower BUFFER_LEN bytes of the result are set; the remaining
154 high bytes are cleared. */
155 wide_int
156 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
158 unsigned int precision = buffer_len * BITS_PER_UNIT;
159 wide_int result = wide_int::create (precision);
160 unsigned int words = buffer_len / UNITS_PER_WORD;
162 /* We have to clear all the bits ourself, as we merely or in values
163 below. */
164 unsigned int len = BLOCKS_NEEDED (precision);
165 HOST_WIDE_INT *val = result.write_val (0);
166 for (unsigned int i = 0; i < len; ++i)
167 val[i] = 0;
169 for (unsigned int byte = 0; byte < buffer_len; byte++)
171 unsigned int offset;
172 unsigned int index;
173 unsigned int bitpos = byte * BITS_PER_UNIT;
174 unsigned HOST_WIDE_INT value;
176 if (buffer_len > UNITS_PER_WORD)
178 unsigned int word = byte / UNITS_PER_WORD;
180 if (WORDS_BIG_ENDIAN)
181 word = (words - 1) - word;
183 offset = word * UNITS_PER_WORD;
185 if (BYTES_BIG_ENDIAN)
186 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
187 else
188 offset += byte % UNITS_PER_WORD;
190 else
191 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
193 value = (unsigned HOST_WIDE_INT) buffer[offset];
195 index = bitpos / HOST_BITS_PER_WIDE_INT;
196 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
199 result.set_len (canonize (val, len, precision));
201 return result;
204 /* Sets RESULT from X, the sign is taken according to SGN. */
205 void
206 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
208 int len = x.get_len ();
209 const HOST_WIDE_INT *v = x.get_val ();
210 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
212 if (wi::neg_p (x, sgn))
214 /* We use ones complement to avoid -x80..0 edge case that -
215 won't work on. */
216 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
217 for (int i = 0; i < len; i++)
218 t[i] = ~v[i];
219 if (excess > 0)
220 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
221 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
222 mpz_com (result, result);
224 else if (excess > 0)
226 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
227 for (int i = 0; i < len - 1; i++)
228 t[i] = v[i];
229 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
230 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
232 else if (excess < 0 && wi::neg_p (x))
234 int extra = CEIL (-excess, HOST_BITS_PER_WIDE_INT);
235 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len + extra);
236 for (int i = 0; i < len; i++)
237 t[i] = v[i];
238 for (int i = 0; i < extra; i++)
239 t[len + i] = -1;
240 excess = (-excess) % HOST_BITS_PER_WIDE_INT;
241 if (excess)
242 t[len + extra - 1] = (HOST_WIDE_INT_1U << excess) - 1;
243 mpz_import (result, len + extra, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
245 else
246 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
249 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
250 values of VAL will be wrapped; otherwise, they will be set to the
251 appropriate minimum or maximum TYPE bound. */
252 wide_int
253 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
255 size_t count, numb;
256 unsigned int prec = TYPE_PRECISION (type);
257 wide_int res = wide_int::create (prec);
259 if (!wrap)
261 mpz_t min, max;
263 mpz_init (min);
264 mpz_init (max);
265 get_type_static_bounds (type, min, max);
267 if (mpz_cmp (x, min) < 0)
268 mpz_set (x, min);
269 else if (mpz_cmp (x, max) > 0)
270 mpz_set (x, max);
272 mpz_clear (min);
273 mpz_clear (max);
276 /* Determine the number of unsigned HOST_WIDE_INTs that are required
277 for representing the absolute value. The code to calculate count is
278 extracted from the GMP manual, section "Integer Import and Export":
279 http://gmplib.org/manual/Integer-Import-and-Export.html */
280 numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
281 count = CEIL (mpz_sizeinbase (x, 2), numb);
282 HOST_WIDE_INT *val = res.write_val (0);
283 /* Read the absolute value.
285 Write directly to the wide_int storage if possible, otherwise leave
286 GMP to allocate the memory for us. It might be slightly more efficient
287 to use mpz_tdiv_r_2exp for the latter case, but the situation is
288 pathological and it seems safer to operate on the original mpz value
289 in all cases. */
290 void *valres = mpz_export (count <= WIDE_INT_MAX_INL_ELTS ? val : 0,
291 &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
292 if (count < 1)
294 val[0] = 0;
295 count = 1;
297 count = MIN (count, BLOCKS_NEEDED (prec));
298 if (valres != val)
300 memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
301 free (valres);
303 /* Zero-extend the absolute value to PREC bits. */
304 if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
305 val[count++] = 0;
306 else
307 count = canonize (val, count, prec);
308 res.set_len (count);
310 if (mpz_sgn (x) < 0)
311 res = -res;
313 return res;
317 * Largest and smallest values in a mode.
320 /* Return the largest SGNed number that is representable in PRECISION bits.
322 TODO: There is still code from the double_int era that trys to
323 make up for the fact that double int's could not represent the
324 min and max values of all types. This code should be removed
325 because the min and max values can always be represented in
326 wide_ints and int-csts. */
327 wide_int
328 wi::max_value (unsigned int precision, signop sgn)
330 gcc_checking_assert (precision != 0);
331 if (sgn == UNSIGNED)
332 /* The unsigned max is just all ones. */
333 return shwi (-1, precision);
334 else
335 /* The signed max is all ones except the top bit. This must be
336 explicitly represented. */
337 return mask (precision - 1, false, precision);
340 /* Return the largest SGNed number that is representable in PRECISION bits. */
341 wide_int
342 wi::min_value (unsigned int precision, signop sgn)
344 gcc_checking_assert (precision != 0);
345 if (sgn == UNSIGNED)
346 return uhwi (0, precision);
347 else
348 /* The signed min is all zeros except the top bit. This must be
349 explicitly represented. */
350 return wi::set_bit_in_zero (precision - 1, precision);
354 * Public utilities.
357 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
358 signedness SGN, to an integer that has PRECISION bits. Store the blocks
359 in VAL and return the number of blocks used.
361 This function can handle both extension (PRECISION > XPRECISION)
362 and truncation (PRECISION < XPRECISION). */
363 unsigned int
364 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
365 unsigned int xlen, unsigned int xprecision,
366 unsigned int precision, signop sgn)
368 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
369 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
370 for (unsigned i = 0; i < len; i++)
371 val[i] = xval[i];
373 if (precision > xprecision)
375 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
377 /* Expanding. */
378 if (sgn == UNSIGNED)
380 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
381 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
382 else if (val[len - 1] < 0)
384 while (len < BLOCKS_NEEDED (xprecision))
385 val[len++] = -1;
386 if (small_xprecision)
387 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
388 else
389 val[len++] = 0;
392 else
394 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
395 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
398 len = canonize (val, len, precision);
400 return len;
403 /* This function hides the fact that we cannot rely on the bits beyond
404 the precision. This issue comes up in the relational comparisions
405 where we do allow comparisons of values of different precisions. */
406 static inline HOST_WIDE_INT
407 selt (const HOST_WIDE_INT *a, unsigned int len,
408 unsigned int blocks_needed, unsigned int small_prec,
409 unsigned int index, signop sgn)
411 HOST_WIDE_INT val;
412 if (index < len)
413 val = a[index];
414 else if (index < blocks_needed || sgn == SIGNED)
415 /* Signed or within the precision. */
416 val = SIGN_MASK (a[len - 1]);
417 else
418 /* Unsigned extension beyond the precision. */
419 val = 0;
421 if (small_prec && index == blocks_needed - 1)
422 return (sgn == SIGNED
423 ? sext_hwi (val, small_prec)
424 : zext_hwi (val, small_prec));
425 else
426 return val;
429 /* Find the highest bit represented in a wide int. This will in
430 general have the same value as the sign bit. */
431 static inline HOST_WIDE_INT
432 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
434 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
435 unsigned HOST_WIDE_INT val = a[len - 1];
436 if (excess > 0)
437 val <<= excess;
438 return val >> (HOST_BITS_PER_WIDE_INT - 1);
442 * Comparisons, note that only equality is an operator. The other
443 * comparisons cannot be operators since they are inherently signed or
444 * unsigned and C++ has no such operators.
447 /* Return true if OP0 == OP1. */
448 bool
449 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
450 const HOST_WIDE_INT *op1, unsigned int op1len,
451 unsigned int prec)
453 int l0 = op0len - 1;
454 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
456 if (op0len != op1len)
457 return false;
459 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
461 /* It does not matter if we zext or sext here, we just have to
462 do both the same way. */
463 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
464 return false;
465 l0--;
468 while (l0 >= 0)
469 if (op0[l0] != op1[l0])
470 return false;
471 else
472 l0--;
474 return true;
477 /* Return true if OP0 < OP1 using signed comparisons. */
478 bool
479 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
480 unsigned int precision,
481 const HOST_WIDE_INT *op1, unsigned int op1len)
483 HOST_WIDE_INT s0, s1;
484 unsigned HOST_WIDE_INT u0, u1;
485 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
486 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
487 int l = MAX (op0len - 1, op1len - 1);
489 /* Only the top block is compared as signed. The rest are unsigned
490 comparisons. */
491 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
492 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
493 if (s0 < s1)
494 return true;
495 if (s0 > s1)
496 return false;
498 l--;
499 while (l >= 0)
501 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
502 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
504 if (u0 < u1)
505 return true;
506 if (u0 > u1)
507 return false;
508 l--;
511 return false;
514 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
515 signed compares. */
517 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
518 unsigned int precision,
519 const HOST_WIDE_INT *op1, unsigned int op1len)
521 HOST_WIDE_INT s0, s1;
522 unsigned HOST_WIDE_INT u0, u1;
523 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
524 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
525 int l = MAX (op0len - 1, op1len - 1);
527 /* Only the top block is compared as signed. The rest are unsigned
528 comparisons. */
529 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
530 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
531 if (s0 < s1)
532 return -1;
533 if (s0 > s1)
534 return 1;
536 l--;
537 while (l >= 0)
539 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
540 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
542 if (u0 < u1)
543 return -1;
544 if (u0 > u1)
545 return 1;
546 l--;
549 return 0;
552 /* Return true if OP0 < OP1 using unsigned comparisons. */
553 bool
554 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
555 unsigned int precision,
556 const HOST_WIDE_INT *op1, unsigned int op1len)
558 unsigned HOST_WIDE_INT x0;
559 unsigned HOST_WIDE_INT x1;
560 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
561 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
562 int l = MAX (op0len - 1, op1len - 1);
564 while (l >= 0)
566 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
567 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
568 if (x0 < x1)
569 return true;
570 if (x0 > x1)
571 return false;
572 l--;
575 return false;
578 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
579 unsigned compares. */
581 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
582 unsigned int precision,
583 const HOST_WIDE_INT *op1, unsigned int op1len)
585 unsigned HOST_WIDE_INT x0;
586 unsigned HOST_WIDE_INT x1;
587 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
588 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
589 int l = MAX (op0len - 1, op1len - 1);
591 while (l >= 0)
593 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
594 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
595 if (x0 < x1)
596 return -1;
597 if (x0 > x1)
598 return 1;
599 l--;
602 return 0;
606 * Extension.
609 /* Sign-extend the number represented by XVAL and XLEN into VAL,
610 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
611 and VAL have PRECISION bits. */
612 unsigned int
613 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
614 unsigned int xlen, unsigned int precision, unsigned int offset)
616 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
617 /* Extending beyond the precision is a no-op. If we have only stored
618 OFFSET bits or fewer, the rest are already signs. */
619 if (offset >= precision || len >= xlen)
621 for (unsigned i = 0; i < xlen; ++i)
622 val[i] = xval[i];
623 return xlen;
625 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
626 for (unsigned int i = 0; i < len; i++)
627 val[i] = xval[i];
628 if (suboffset > 0)
630 val[len] = sext_hwi (xval[len], suboffset);
631 len += 1;
633 return canonize (val, len, precision);
636 /* Zero-extend the number represented by XVAL and XLEN into VAL,
637 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
638 and VAL have PRECISION bits. */
639 unsigned int
640 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
641 unsigned int xlen, unsigned int precision, unsigned int offset)
643 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
644 /* Extending beyond the precision is a no-op. If we have only stored
645 OFFSET bits or fewer, and the upper stored bit is zero, then there
646 is nothing to do. */
647 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
649 for (unsigned i = 0; i < xlen; ++i)
650 val[i] = xval[i];
651 return xlen;
653 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
654 for (unsigned int i = 0; i < len; i++)
655 val[i] = i < xlen ? xval[i] : -1;
656 if (suboffset > 0)
657 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
658 else
659 val[len] = 0;
660 return canonize (val, len + 1, precision);
664 * Masking, inserting, shifting, rotating.
667 /* Insert WIDTH bits from Y into X starting at START. */
668 wide_int
669 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
670 unsigned int width)
672 wide_int result;
673 wide_int mask;
674 wide_int tmp;
676 unsigned int precision = x.get_precision ();
677 if (start >= precision)
678 return x;
680 gcc_checking_assert (precision >= width);
682 if (start + width >= precision)
683 width = precision - start;
685 mask = wi::shifted_mask (start, width, false, precision);
686 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
687 result = tmp & mask;
689 tmp = wi::bit_and_not (x, mask);
690 result = result | tmp;
692 return result;
695 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
696 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
697 bits. */
698 unsigned int
699 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
700 unsigned int xlen, unsigned int precision, unsigned int bit)
702 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
703 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
705 if (block + 1 >= xlen)
707 /* The operation either affects the last current block or needs
708 a new block. */
709 unsigned int len = block + 1;
710 for (unsigned int i = 0; i < len; i++)
711 val[i] = safe_uhwi (xval, xlen, i);
712 val[block] |= HOST_WIDE_INT_1U << subbit;
714 /* If the bit we just set is at the msb of the block, make sure
715 that any higher bits are zeros. */
716 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
718 val[len++] = 0;
719 return len;
721 return canonize (val, len, precision);
723 else
725 for (unsigned int i = 0; i < xlen; i++)
726 val[i] = xval[i];
727 val[block] |= HOST_WIDE_INT_1U << subbit;
728 return canonize (val, xlen, precision);
732 /* Byte swap the integer represented by XVAL and LEN into VAL. Return
733 the number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
734 unsigned int
735 wi::bswap_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
736 unsigned int len, unsigned int precision)
738 unsigned int i, s;
740 /* This is not a well defined operation if the precision is not a
741 multiple of 8. */
742 gcc_assert ((precision & 0x7) == 0);
744 for (i = 0; i < len; i++)
745 val[i] = 0;
747 /* Only swap the bytes that are not the padding. */
748 for (s = 0; s < precision; s += 8)
750 unsigned int d = precision - s - 8;
751 unsigned HOST_WIDE_INT byte;
753 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
754 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
756 byte = (safe_uhwi (xval, len, block) >> offset) & 0xff;
758 block = d / HOST_BITS_PER_WIDE_INT;
759 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
761 val[block] |= byte << offset;
764 return canonize (val, len, precision);
767 /* Bitreverse the integer represented by XVAL and LEN into VAL. Return
768 the number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
769 unsigned int
770 wi::bitreverse_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
771 unsigned int len, unsigned int precision)
773 unsigned int i, s;
775 for (i = 0; i < len; i++)
776 val[i] = 0;
778 for (s = 0; s < precision; s++)
780 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
781 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
782 if (((safe_uhwi (xval, len, block) >> offset) & 1) != 0)
784 unsigned int d = (precision - 1) - s;
785 block = d / HOST_BITS_PER_WIDE_INT;
786 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
787 val[block] |= HOST_WIDE_INT_1U << offset;
791 return canonize (val, len, precision);
794 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
795 above that up to PREC are zeros. The result is inverted if NEGATE
796 is true. Return the number of blocks in VAL. */
797 unsigned int
798 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
799 unsigned int prec)
801 if (width >= prec)
803 val[0] = negate ? 0 : -1;
804 return 1;
806 else if (width == 0)
808 val[0] = negate ? -1 : 0;
809 return 1;
812 unsigned int i = 0;
813 while (i < width / HOST_BITS_PER_WIDE_INT)
814 val[i++] = negate ? 0 : -1;
816 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
817 if (shift != 0)
819 HOST_WIDE_INT last = (HOST_WIDE_INT_1U << shift) - 1;
820 val[i++] = negate ? ~last : last;
822 else
823 val[i++] = negate ? -1 : 0;
825 return i;
828 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
829 bits are ones, and the bits above that up to PREC are zeros. The result
830 is inverted if NEGATE is true. Return the number of blocks in VAL. */
831 unsigned int
832 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
833 bool negate, unsigned int prec)
835 if (start >= prec || width == 0)
837 val[0] = negate ? -1 : 0;
838 return 1;
841 if (width > prec - start)
842 width = prec - start;
843 unsigned int end = start + width;
845 unsigned int i = 0;
846 while (i < start / HOST_BITS_PER_WIDE_INT)
847 val[i++] = negate ? -1 : 0;
849 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
850 if (shift)
852 HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
853 shift += width;
854 if (shift < HOST_BITS_PER_WIDE_INT)
856 /* case 000111000 */
857 block = (HOST_WIDE_INT_1U << shift) - block - 1;
858 val[i++] = negate ? ~block : block;
859 return i;
861 else
862 /* ...111000 */
863 val[i++] = negate ? block : ~block;
866 if (end >= prec)
868 if (!shift)
869 val[i++] = negate ? 0 : -1;
870 return i;
873 while (i < end / HOST_BITS_PER_WIDE_INT)
874 /* 1111111 */
875 val[i++] = negate ? 0 : -1;
877 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
878 if (shift != 0)
880 /* 000011111 */
881 HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
882 val[i++] = negate ? ~block : block;
884 else
885 val[i++] = negate ? -1 : 0;
887 return i;
891 * logical operations.
894 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
895 unsigned int
896 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
897 unsigned int op0len, const HOST_WIDE_INT *op1,
898 unsigned int op1len, unsigned int prec)
900 int l0 = op0len - 1;
901 int l1 = op1len - 1;
902 bool need_canon = true;
904 unsigned int len = MAX (op0len, op1len);
905 if (l0 > l1)
907 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
908 if (op1mask == 0)
910 l0 = l1;
911 len = l1 + 1;
913 else
915 need_canon = false;
916 while (l0 > l1)
918 val[l0] = op0[l0];
919 l0--;
923 else if (l1 > l0)
925 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
926 if (op0mask == 0)
927 len = l0 + 1;
928 else
930 need_canon = false;
931 while (l1 > l0)
933 val[l1] = op1[l1];
934 l1--;
939 while (l0 >= 0)
941 val[l0] = op0[l0] & op1[l0];
942 l0--;
945 if (need_canon)
946 len = canonize (val, len, prec);
948 return len;
951 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
952 unsigned int
953 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
954 unsigned int op0len, const HOST_WIDE_INT *op1,
955 unsigned int op1len, unsigned int prec)
957 wide_int result;
958 int l0 = op0len - 1;
959 int l1 = op1len - 1;
960 bool need_canon = true;
962 unsigned int len = MAX (op0len, op1len);
963 if (l0 > l1)
965 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
966 if (op1mask != 0)
968 l0 = l1;
969 len = l1 + 1;
971 else
973 need_canon = false;
974 while (l0 > l1)
976 val[l0] = op0[l0];
977 l0--;
981 else if (l1 > l0)
983 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
984 if (op0mask == 0)
985 len = l0 + 1;
986 else
988 need_canon = false;
989 while (l1 > l0)
991 val[l1] = ~op1[l1];
992 l1--;
997 while (l0 >= 0)
999 val[l0] = op0[l0] & ~op1[l0];
1000 l0--;
1003 if (need_canon)
1004 len = canonize (val, len, prec);
1006 return len;
1009 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
1010 unsigned int
1011 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1012 unsigned int op0len, const HOST_WIDE_INT *op1,
1013 unsigned int op1len, unsigned int prec)
1015 wide_int result;
1016 int l0 = op0len - 1;
1017 int l1 = op1len - 1;
1018 bool need_canon = true;
1020 unsigned int len = MAX (op0len, op1len);
1021 if (l0 > l1)
1023 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1024 if (op1mask != 0)
1026 l0 = l1;
1027 len = l1 + 1;
1029 else
1031 need_canon = false;
1032 while (l0 > l1)
1034 val[l0] = op0[l0];
1035 l0--;
1039 else if (l1 > l0)
1041 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1042 if (op0mask != 0)
1043 len = l0 + 1;
1044 else
1046 need_canon = false;
1047 while (l1 > l0)
1049 val[l1] = op1[l1];
1050 l1--;
1055 while (l0 >= 0)
1057 val[l0] = op0[l0] | op1[l0];
1058 l0--;
1061 if (need_canon)
1062 len = canonize (val, len, prec);
1064 return len;
1067 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1068 unsigned int
1069 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1070 unsigned int op0len, const HOST_WIDE_INT *op1,
1071 unsigned int op1len, unsigned int prec)
1073 wide_int result;
1074 int l0 = op0len - 1;
1075 int l1 = op1len - 1;
1076 bool need_canon = true;
1078 unsigned int len = MAX (op0len, op1len);
1079 if (l0 > l1)
1081 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1082 if (op1mask == 0)
1084 l0 = l1;
1085 len = l1 + 1;
1087 else
1089 need_canon = false;
1090 while (l0 > l1)
1092 val[l0] = op0[l0];
1093 l0--;
1097 else if (l1 > l0)
1099 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1100 if (op0mask != 0)
1101 len = l0 + 1;
1102 else
1104 need_canon = false;
1105 while (l1 > l0)
1107 val[l1] = ~op1[l1];
1108 l1--;
1113 while (l0 >= 0)
1115 val[l0] = op0[l0] | ~op1[l0];
1116 l0--;
1119 if (need_canon)
1120 len = canonize (val, len, prec);
1122 return len;
1125 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1126 unsigned int
1127 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1128 unsigned int op0len, const HOST_WIDE_INT *op1,
1129 unsigned int op1len, unsigned int prec)
1131 wide_int result;
1132 int l0 = op0len - 1;
1133 int l1 = op1len - 1;
1135 unsigned int len = MAX (op0len, op1len);
1136 if (l0 > l1)
1138 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1139 while (l0 > l1)
1141 val[l0] = op0[l0] ^ op1mask;
1142 l0--;
1146 if (l1 > l0)
1148 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1149 while (l1 > l0)
1151 val[l1] = op0mask ^ op1[l1];
1152 l1--;
1156 while (l0 >= 0)
1158 val[l0] = op0[l0] ^ op1[l0];
1159 l0--;
1162 return canonize (val, len, prec);
1166 * math
1169 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1170 whether the result overflows when OP0 and OP1 are treated as having
1171 signedness SGN. Return the number of blocks in VAL. */
1172 unsigned int
1173 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1174 unsigned int op0len, const HOST_WIDE_INT *op1,
1175 unsigned int op1len, unsigned int prec,
1176 signop sgn, wi::overflow_type *overflow)
1178 unsigned HOST_WIDE_INT o0 = 0;
1179 unsigned HOST_WIDE_INT o1 = 0;
1180 unsigned HOST_WIDE_INT x = 0;
1181 unsigned HOST_WIDE_INT carry = 0;
1182 unsigned HOST_WIDE_INT old_carry = 0;
1183 unsigned HOST_WIDE_INT mask0, mask1;
1184 unsigned int i;
1186 unsigned int len = MAX (op0len, op1len);
1187 mask0 = -top_bit_of (op0, op0len, prec);
1188 mask1 = -top_bit_of (op1, op1len, prec);
1189 /* Add all of the explicitly defined elements. */
1191 for (i = 0; i < len; i++)
1193 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1194 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1195 x = o0 + o1 + carry;
1196 val[i] = x;
1197 old_carry = carry;
1198 carry = carry == 0 ? x < o0 : x <= o0;
1201 if (len * HOST_BITS_PER_WIDE_INT < prec)
1203 val[len] = mask0 + mask1 + carry;
1204 len++;
1205 if (overflow)
1206 *overflow
1207 = (sgn == UNSIGNED && carry) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1209 else if (overflow)
1211 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1212 if (sgn == SIGNED)
1214 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1215 if ((HOST_WIDE_INT) (x << shift) < 0)
1217 if (o0 > (unsigned HOST_WIDE_INT) val[len - 1])
1218 *overflow = wi::OVF_UNDERFLOW;
1219 else if (o0 < (unsigned HOST_WIDE_INT) val[len - 1])
1220 *overflow = wi::OVF_OVERFLOW;
1221 else
1222 *overflow = wi::OVF_NONE;
1224 else
1225 *overflow = wi::OVF_NONE;
1227 else
1229 /* Put the MSB of X and O0 and in the top of the HWI. */
1230 x <<= shift;
1231 o0 <<= shift;
1232 if (old_carry)
1233 *overflow = (x <= o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1234 else
1235 *overflow = (x < o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1239 return canonize (val, len, prec);
1242 /* Subroutines of the multiplication and division operations. Unpack
1243 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1244 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1245 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1246 static void
1247 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1248 unsigned int in_len, unsigned int out_len,
1249 unsigned int prec, signop sgn)
1251 unsigned int i;
1252 unsigned int j = 0;
1253 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1254 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1255 HOST_WIDE_INT mask;
1257 if (sgn == SIGNED)
1259 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1260 mask &= HALF_INT_MASK;
1262 else
1263 mask = 0;
1265 for (i = 0; i < blocks_needed - 1; i++)
1267 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1268 result[j++] = x;
1269 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1272 HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1273 if (small_prec)
1275 if (sgn == SIGNED)
1276 x = sext_hwi (x, small_prec);
1277 else
1278 x = zext_hwi (x, small_prec);
1280 result[j++] = x;
1281 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1283 /* Smear the sign bit. */
1284 while (j < out_len)
1285 result[j++] = mask;
1288 /* The inverse of wi_unpack. IN_LEN is the number of input
1289 blocks and PRECISION is the precision of the result. Return the
1290 number of blocks in the canonicalized result. */
1291 static unsigned int
1292 wi_pack (HOST_WIDE_INT *result,
1293 const unsigned HOST_HALF_WIDE_INT *input,
1294 unsigned int in_len, unsigned int precision)
1296 unsigned int i = 0;
1297 unsigned int j = 0;
1298 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
1300 while (i + 1 < in_len)
1302 result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1303 | ((unsigned HOST_WIDE_INT) input[i + 1]
1304 << HOST_BITS_PER_HALF_WIDE_INT));
1305 i += 2;
1308 /* Handle the case where in_len is odd. For this we zero extend. */
1309 if (in_len & 1)
1310 result[j++] = (unsigned HOST_WIDE_INT) input[i];
1311 else if (j < blocks_needed)
1312 result[j++] = 0;
1313 return canonize (result, j, precision);
1316 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1317 result is returned.
1319 If HIGH is not set, throw away the upper half after the check is
1320 made to see if it overflows. Unfortunately there is no better way
1321 to check for overflow than to do this. If OVERFLOW is nonnull,
1322 record in *OVERFLOW whether the result overflowed. SGN controls
1323 the signedness and is used to check overflow or if HIGH is set.
1325 NOTE: Overflow type for signed overflow is not yet implemented. */
1326 unsigned int
1327 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1328 unsigned int op1len, const HOST_WIDE_INT *op2val,
1329 unsigned int op2len, unsigned int prec, signop sgn,
1330 wi::overflow_type *overflow, bool high)
1332 unsigned HOST_WIDE_INT o0, o1, k, t;
1333 unsigned int i;
1334 unsigned int j;
1336 /* If the top level routine did not really pass in an overflow, then
1337 just make sure that we never attempt to set it. */
1338 bool needs_overflow = (overflow != 0);
1339 if (needs_overflow)
1340 *overflow = wi::OVF_NONE;
1342 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1343 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1345 /* This is a surprisingly common case, so do it first. */
1346 if (op1 == 0 || op2 == 0)
1348 val[0] = 0;
1349 return 1;
1352 #ifdef umul_ppmm
1353 if (sgn == UNSIGNED)
1355 /* If the inputs are single HWIs and the output has room for at
1356 least two HWIs, we can use umul_ppmm directly. */
1357 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1358 && wi::fits_uhwi_p (op1)
1359 && wi::fits_uhwi_p (op2))
1361 /* This case never overflows. */
1362 if (high)
1364 val[0] = 0;
1365 return 1;
1367 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1368 if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1370 val[2] = 0;
1371 return 3;
1373 return 1 + (val[1] != 0 || val[0] < 0);
1375 /* Likewise if the output is a full single HWI, except that the
1376 upper HWI of the result is only used for determining overflow.
1377 (We handle this case inline when overflow isn't needed.) */
1378 else if (prec == HOST_BITS_PER_WIDE_INT)
1380 unsigned HOST_WIDE_INT upper;
1381 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1382 if (needs_overflow)
1383 /* Unsigned overflow can only be +OVERFLOW. */
1384 *overflow = (upper != 0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1385 if (high)
1386 val[0] = upper;
1387 return 1;
1390 #endif
1392 /* Handle multiplications by 1. */
1393 if (op1 == 1)
1395 if (high)
1397 val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1398 return 1;
1400 for (i = 0; i < op2len; i++)
1401 val[i] = op2val[i];
1402 return op2len;
1404 if (op2 == 1)
1406 if (high)
1408 val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1409 return 1;
1411 for (i = 0; i < op1len; i++)
1412 val[i] = op1val[i];
1413 return op1len;
1416 /* If we need to check for overflow, we can only do half wide
1417 multiplies quickly because we need to look at the top bits to
1418 check for the overflow. */
1419 if ((high || needs_overflow)
1420 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1422 unsigned HOST_WIDE_INT r;
1424 if (sgn == SIGNED)
1426 o0 = op1.to_shwi ();
1427 o1 = op2.to_shwi ();
1429 else
1431 o0 = op1.to_uhwi ();
1432 o1 = op2.to_uhwi ();
1435 r = o0 * o1;
1436 if (needs_overflow)
1438 if (sgn == SIGNED)
1440 if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1441 /* FIXME: Signed overflow type is not implemented yet. */
1442 *overflow = OVF_UNKNOWN;
1444 else
1446 if ((r >> prec) != 0)
1447 /* Unsigned overflow can only be +OVERFLOW. */
1448 *overflow = OVF_OVERFLOW;
1451 val[0] = high ? r >> prec : r;
1452 return 1;
1455 /* The sizes here are scaled to support a 2x WIDE_INT_MAX_INL_PRECISION by 2x
1456 WIDE_INT_MAX_INL_PRECISION yielding a 4x WIDE_INT_MAX_INL_PRECISION
1457 result. */
1459 unsigned HOST_HALF_WIDE_INT
1460 ubuf[4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
1461 unsigned HOST_HALF_WIDE_INT
1462 vbuf[4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
1463 /* The '2' in 'R' is because we are internally doing a full
1464 multiply. */
1465 unsigned HOST_HALF_WIDE_INT
1466 rbuf[2 * 4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
1467 const HOST_WIDE_INT mask
1468 = (HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1469 unsigned HOST_HALF_WIDE_INT *u = ubuf;
1470 unsigned HOST_HALF_WIDE_INT *v = vbuf;
1471 unsigned HOST_HALF_WIDE_INT *r = rbuf;
1473 if (!high)
1474 prec = MIN ((op1len + op2len + 1) * HOST_BITS_PER_WIDE_INT, prec);
1475 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1476 unsigned int half_blocks_needed = blocks_needed * 2;
1477 if (UNLIKELY (prec > WIDE_INT_MAX_INL_PRECISION))
1479 unsigned HOST_HALF_WIDE_INT *buf
1480 = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT, 4 * 4 * blocks_needed);
1481 u = buf;
1482 v = u + 4 * blocks_needed;
1483 r = v + 4 * blocks_needed;
1486 /* We do unsigned mul and then correct it. */
1487 wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1488 wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1490 /* The 2 is for a full mult. */
1491 memset (r, 0, half_blocks_needed * 2
1492 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1494 for (j = 0; j < half_blocks_needed; j++)
1496 k = 0;
1497 for (i = 0; i < half_blocks_needed; i++)
1499 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1500 + r[i + j] + k);
1501 r[i + j] = t & HALF_INT_MASK;
1502 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1504 r[j + half_blocks_needed] = k;
1507 /* We did unsigned math above. For signed we must adjust the
1508 product (assuming we need to see that). */
1509 if (sgn == SIGNED && (high || needs_overflow))
1511 unsigned HOST_WIDE_INT b;
1512 if (wi::neg_p (op1))
1514 b = 0;
1515 for (i = 0; i < half_blocks_needed; i++)
1517 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1518 - (unsigned HOST_WIDE_INT)v[i] - b;
1519 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1520 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1523 if (wi::neg_p (op2))
1525 b = 0;
1526 for (i = 0; i < half_blocks_needed; i++)
1528 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1529 - (unsigned HOST_WIDE_INT)u[i] - b;
1530 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1531 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1536 if (needs_overflow)
1538 HOST_WIDE_INT top;
1540 /* For unsigned, overflow is true if any of the top bits are set.
1541 For signed, overflow is true if any of the top bits are not equal
1542 to the sign bit. */
1543 if (sgn == UNSIGNED)
1544 top = 0;
1545 else
1547 top = r[(half_blocks_needed) - 1];
1548 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1549 top &= mask;
1552 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1553 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1554 /* FIXME: Signed overflow type is not implemented yet. */
1555 *overflow = (sgn == UNSIGNED) ? wi::OVF_OVERFLOW : wi::OVF_UNKNOWN;
1558 int r_offset = high ? half_blocks_needed : 0;
1559 return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
1562 /* Compute the population count of X. */
1564 wi::popcount (const wide_int_ref &x)
1566 unsigned int i;
1567 int count;
1569 /* The high order block is special if it is the last block and the
1570 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1571 have to clear out any ones above the precision before doing
1572 popcount on this block. */
1573 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1574 unsigned int stop = x.len;
1575 if (count < 0)
1577 count = popcount_hwi (x.uhigh () << -count);
1578 stop -= 1;
1580 else
1582 if (x.sign_mask () >= 0)
1583 count = 0;
1586 for (i = 0; i < stop; ++i)
1587 count += popcount_hwi (x.val[i]);
1589 return count;
1592 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1593 whether the result overflows when OP0 and OP1 are treated as having
1594 signedness SGN. Return the number of blocks in VAL. */
1595 unsigned int
1596 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1597 unsigned int op0len, const HOST_WIDE_INT *op1,
1598 unsigned int op1len, unsigned int prec,
1599 signop sgn, wi::overflow_type *overflow)
1601 unsigned HOST_WIDE_INT o0 = 0;
1602 unsigned HOST_WIDE_INT o1 = 0;
1603 unsigned HOST_WIDE_INT x = 0;
1604 /* We implement subtraction as an in place negate and add. Negation
1605 is just inversion and add 1, so we can do the add of 1 by just
1606 starting the borrow in of the first element at 1. */
1607 unsigned HOST_WIDE_INT borrow = 0;
1608 unsigned HOST_WIDE_INT old_borrow = 0;
1610 unsigned HOST_WIDE_INT mask0, mask1;
1611 unsigned int i;
1613 unsigned int len = MAX (op0len, op1len);
1614 mask0 = -top_bit_of (op0, op0len, prec);
1615 mask1 = -top_bit_of (op1, op1len, prec);
1617 /* Subtract all of the explicitly defined elements. */
1618 for (i = 0; i < len; i++)
1620 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1621 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1622 x = o0 - o1 - borrow;
1623 val[i] = x;
1624 old_borrow = borrow;
1625 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1628 if (len * HOST_BITS_PER_WIDE_INT < prec)
1630 val[len] = mask0 - mask1 - borrow;
1631 len++;
1632 if (overflow)
1633 *overflow = (sgn == UNSIGNED && borrow) ? OVF_UNDERFLOW : OVF_NONE;
1635 else if (overflow)
1637 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1638 if (sgn == SIGNED)
1640 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1641 if ((HOST_WIDE_INT) (x << shift) < 0)
1643 if (o0 > o1)
1644 *overflow = OVF_UNDERFLOW;
1645 else if (o0 < o1)
1646 *overflow = OVF_OVERFLOW;
1647 else
1648 *overflow = OVF_NONE;
1650 else
1651 *overflow = OVF_NONE;
1653 else
1655 /* Put the MSB of X and O0 and in the top of the HWI. */
1656 x <<= shift;
1657 o0 <<= shift;
1658 if (old_borrow)
1659 *overflow = (x >= o0) ? OVF_UNDERFLOW : OVF_NONE;
1660 else
1661 *overflow = (x > o0) ? OVF_UNDERFLOW : OVF_NONE;
1665 return canonize (val, len, prec);
1670 * Division and Mod
1673 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1674 algorithm is a small modification of the algorithm in Hacker's
1675 Delight by Warren, which itself is a small modification of Knuth's
1676 algorithm. M is the number of significant elements of U however
1677 there needs to be at least one extra element of B_DIVIDEND
1678 allocated, N is the number of elements of B_DIVISOR. */
1679 static void
1680 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1681 unsigned HOST_HALF_WIDE_INT *b_remainder,
1682 unsigned HOST_HALF_WIDE_INT *b_dividend,
1683 unsigned HOST_HALF_WIDE_INT *b_divisor,
1684 int m, int n)
1686 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1687 HOST_WIDE_INT and stored in the lower bits of each word. This
1688 algorithm should work properly on both 32 and 64 bit
1689 machines. */
1690 unsigned HOST_WIDE_INT b
1691 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1692 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1693 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1694 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1695 HOST_WIDE_INT t, k;
1696 int i, j, s;
1698 /* Single digit divisor. */
1699 if (n == 1)
1701 k = 0;
1702 for (j = m - 1; j >= 0; j--)
1704 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1705 k = ((k * b + b_dividend[j])
1706 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1707 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1709 b_remainder[0] = k;
1710 return;
1713 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1715 if (s)
1717 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1718 algorithm, we can overwrite b_dividend and b_divisor, so we do
1719 that. */
1720 for (i = n - 1; i > 0; i--)
1721 b_divisor[i] = (b_divisor[i] << s)
1722 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1723 b_divisor[0] = b_divisor[0] << s;
1725 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1726 for (i = m - 1; i > 0; i--)
1727 b_dividend[i] = (b_dividend[i] << s)
1728 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1729 b_dividend[0] = b_dividend[0] << s;
1732 /* Main loop. */
1733 for (j = m - n; j >= 0; j--)
1735 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1736 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1737 again:
1738 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1740 qhat -= 1;
1741 rhat += b_divisor[n-1];
1742 if (rhat < b)
1743 goto again;
1746 /* Multiply and subtract. */
1747 k = 0;
1748 for (i = 0; i < n; i++)
1750 p = qhat * b_divisor[i];
1751 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1752 b_dividend[i + j] = t;
1753 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1754 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1756 t = b_dividend[j+n] - k;
1757 b_dividend[j+n] = t;
1759 b_quotient[j] = qhat;
1760 if (t < 0)
1762 b_quotient[j] -= 1;
1763 k = 0;
1764 for (i = 0; i < n; i++)
1766 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1767 b_dividend[i+j] = t;
1768 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1770 b_dividend[j+n] += k;
1773 if (s)
1774 for (i = 0; i < n; i++)
1775 b_remainder[i] = (b_dividend[i] >> s)
1776 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1777 else
1778 for (i = 0; i < n; i++)
1779 b_remainder[i] = b_dividend[i];
1783 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1784 the result. If QUOTIENT is nonnull, store the value of the quotient
1785 there and return the number of blocks in it. The return value is
1786 not defined otherwise. If REMAINDER is nonnull, store the value
1787 of the remainder there and store the number of blocks in
1788 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1789 the division overflowed. */
1790 unsigned int
1791 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1792 HOST_WIDE_INT *remainder,
1793 const HOST_WIDE_INT *dividend_val,
1794 unsigned int dividend_len, unsigned int dividend_prec,
1795 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1796 unsigned int divisor_prec, signop sgn,
1797 wi::overflow_type *oflow)
1799 unsigned int m, n;
1800 bool dividend_neg = false;
1801 bool divisor_neg = false;
1802 bool overflow = false;
1803 wide_int neg_dividend, neg_divisor;
1805 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1806 dividend_prec);
1807 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1808 divisor_prec);
1809 if (divisor == 0)
1810 overflow = true;
1812 /* The smallest signed number / -1 causes overflow. The dividend_len
1813 check is for speed rather than correctness. */
1814 if (sgn == SIGNED
1815 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1816 && divisor == -1
1817 && wi::only_sign_bit_p (dividend))
1818 overflow = true;
1820 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1821 (signed min / -1) has the same representation as the orignal dividend.
1822 We have traditionally made division by zero act as division by one,
1823 so there too we use the original dividend. */
1824 if (overflow)
1826 if (remainder)
1828 *remainder_len = 1;
1829 remainder[0] = 0;
1831 if (oflow)
1832 *oflow = OVF_OVERFLOW;
1833 if (quotient)
1834 for (unsigned int i = 0; i < dividend_len; ++i)
1835 quotient[i] = dividend_val[i];
1836 return dividend_len;
1839 if (oflow)
1840 *oflow = OVF_NONE;
1842 /* Do it on the host if you can. */
1843 if (sgn == SIGNED
1844 && wi::fits_shwi_p (dividend)
1845 && wi::fits_shwi_p (divisor))
1847 HOST_WIDE_INT o0 = dividend.to_shwi ();
1848 HOST_WIDE_INT o1 = divisor.to_shwi ();
1850 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1852 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1853 if (quotient)
1855 quotient[0] = HOST_WIDE_INT_MIN;
1856 quotient[1] = 0;
1858 if (remainder)
1860 remainder[0] = 0;
1861 *remainder_len = 1;
1863 return 2;
1865 else
1867 if (quotient)
1868 quotient[0] = o0 / o1;
1869 if (remainder)
1871 remainder[0] = o0 % o1;
1872 *remainder_len = 1;
1874 return 1;
1878 if (sgn == UNSIGNED
1879 && wi::fits_uhwi_p (dividend)
1880 && wi::fits_uhwi_p (divisor))
1882 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1883 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1884 unsigned int quotient_len = 1;
1886 if (quotient)
1888 quotient[0] = o0 / o1;
1889 quotient_len = canonize_uhwi (quotient, dividend_prec);
1891 if (remainder)
1893 remainder[0] = o0 % o1;
1894 *remainder_len = canonize_uhwi (remainder, dividend_prec);
1896 return quotient_len;
1899 /* Make the divisor and dividend positive and remember what we
1900 did. */
1901 if (sgn == SIGNED)
1903 if (wi::neg_p (dividend))
1905 neg_dividend = -dividend;
1906 dividend = neg_dividend;
1907 dividend_neg = true;
1909 if (wi::neg_p (divisor))
1911 neg_divisor = -divisor;
1912 divisor = neg_divisor;
1913 divisor_neg = true;
1917 unsigned HOST_HALF_WIDE_INT
1918 b_quotient_buf[4 * WIDE_INT_MAX_INL_PRECISION
1919 / HOST_BITS_PER_HALF_WIDE_INT];
1920 unsigned HOST_HALF_WIDE_INT
1921 b_remainder_buf[4 * WIDE_INT_MAX_INL_PRECISION
1922 / HOST_BITS_PER_HALF_WIDE_INT];
1923 unsigned HOST_HALF_WIDE_INT
1924 b_dividend_buf[(4 * WIDE_INT_MAX_INL_PRECISION
1925 / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1926 unsigned HOST_HALF_WIDE_INT
1927 b_divisor_buf[4 * WIDE_INT_MAX_INL_PRECISION
1928 / HOST_BITS_PER_HALF_WIDE_INT];
1929 unsigned HOST_HALF_WIDE_INT *b_quotient = b_quotient_buf;
1930 unsigned HOST_HALF_WIDE_INT *b_remainder = b_remainder_buf;
1931 unsigned HOST_HALF_WIDE_INT *b_dividend = b_dividend_buf;
1932 unsigned HOST_HALF_WIDE_INT *b_divisor = b_divisor_buf;
1934 if (sgn == SIGNED || dividend_val[dividend_len - 1] >= 0)
1935 dividend_prec = MIN ((dividend_len + 1) * HOST_BITS_PER_WIDE_INT,
1936 dividend_prec);
1937 if (sgn == SIGNED || divisor_val[divisor_len - 1] >= 0)
1938 divisor_prec = MIN (divisor_len * HOST_BITS_PER_WIDE_INT, divisor_prec);
1939 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1940 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1941 if (UNLIKELY (dividend_prec > WIDE_INT_MAX_INL_PRECISION)
1942 || UNLIKELY (divisor_prec > WIDE_INT_MAX_INL_PRECISION))
1944 unsigned HOST_HALF_WIDE_INT *buf
1945 = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT,
1946 12 * dividend_blocks_needed
1947 + 4 * divisor_blocks_needed + 1);
1948 b_quotient = buf;
1949 b_remainder = b_quotient + 4 * dividend_blocks_needed;
1950 b_dividend = b_remainder + 4 * dividend_blocks_needed;
1951 b_divisor = b_dividend + 4 * dividend_blocks_needed + 1;
1952 memset (b_quotient, 0,
1953 4 * dividend_blocks_needed * sizeof (HOST_HALF_WIDE_INT));
1955 wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1956 dividend_blocks_needed, dividend_prec, UNSIGNED);
1957 wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1958 divisor_blocks_needed, divisor_prec, UNSIGNED);
1960 m = dividend_blocks_needed;
1961 b_dividend[m] = 0;
1962 while (m > 1 && b_dividend[m - 1] == 0)
1963 m--;
1965 n = divisor_blocks_needed;
1966 while (n > 1 && b_divisor[n - 1] == 0)
1967 n--;
1969 if (b_quotient == b_quotient_buf)
1970 memset (b_quotient_buf, 0, sizeof (b_quotient_buf));
1972 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1974 unsigned int quotient_len = 0;
1975 if (quotient)
1977 quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
1978 /* The quotient is neg if exactly one of the divisor or dividend is
1979 neg. */
1980 if (dividend_neg != divisor_neg)
1981 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1982 quotient_len, dividend_prec,
1983 UNSIGNED, 0);
1986 if (remainder)
1988 *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
1989 /* The remainder is always the same sign as the dividend. */
1990 if (dividend_neg)
1991 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1992 *remainder_len, dividend_prec,
1993 UNSIGNED, 0);
1996 return quotient_len;
2000 * Shifting, rotating and extraction.
2003 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
2004 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
2005 unsigned int
2006 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2007 unsigned int xlen, unsigned int precision,
2008 unsigned int shift)
2010 /* Split the shift into a whole-block shift and a subblock shift. */
2011 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
2012 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
2014 /* The whole-block shift fills with zeros. */
2015 unsigned int len = BLOCKS_NEEDED (precision);
2016 len = MIN (xlen + skip + 1, len);
2017 for (unsigned int i = 0; i < skip; ++i)
2018 val[i] = 0;
2020 /* It's easier to handle the simple block case specially. */
2021 if (small_shift == 0)
2022 for (unsigned int i = skip; i < len; ++i)
2023 val[i] = safe_uhwi (xval, xlen, i - skip);
2024 else
2026 /* The first unfilled output block is a left shift of the first
2027 block in XVAL. The other output blocks contain bits from two
2028 consecutive input blocks. */
2029 unsigned HOST_WIDE_INT carry = 0;
2030 for (unsigned int i = skip; i < len; ++i)
2032 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
2033 val[i] = (x << small_shift) | carry;
2034 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
2037 return canonize (val, len, precision);
2040 /* Right shift XVAL by SHIFT and store the result in VAL. LEN is the
2041 number of blocks in VAL. The input has XPRECISION bits and the
2042 output has XPRECISION - SHIFT bits. */
2043 static void
2044 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2045 unsigned int xlen, unsigned int shift, unsigned int len)
2047 /* Split the shift into a whole-block shift and a subblock shift. */
2048 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
2049 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
2051 /* It's easier to handle the simple block case specially. */
2052 if (small_shift == 0)
2053 for (unsigned int i = 0; i < len; ++i)
2054 val[i] = safe_uhwi (xval, xlen, i + skip);
2055 else
2057 /* Each output block but the last is a combination of two input blocks.
2058 The last block is a right shift of the last block in XVAL. */
2059 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
2060 for (unsigned int i = 0; i < len; ++i)
2062 val[i] = curr >> small_shift;
2063 curr = safe_uhwi (xval, xlen, i + skip + 1);
2064 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
2069 /* Logically right shift XVAL by SHIFT and store the result in VAL.
2070 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2071 VAL has PRECISION bits. */
2072 unsigned int
2073 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2074 unsigned int xlen, unsigned int xprecision,
2075 unsigned int precision, unsigned int shift)
2077 /* Work out how many blocks are needed to store the significant bits
2078 (excluding the upper zeros or signs). */
2079 unsigned int blocks_needed = BLOCKS_NEEDED (xprecision - shift);
2080 unsigned int len = blocks_needed;
2081 if (len > xlen && xval[xlen - 1] >= 0)
2082 len = xlen;
2084 rshift_large_common (val, xval, xlen, shift, len);
2086 /* The value we just created has precision XPRECISION - SHIFT.
2087 Zero-extend it to wider precisions. */
2088 if (precision > xprecision - shift && len == blocks_needed)
2090 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2091 if (small_prec)
2092 val[len - 1] = zext_hwi (val[len - 1], small_prec);
2093 else if (val[len - 1] < 0)
2095 /* Add a new block with a zero. */
2096 val[len++] = 0;
2097 return len;
2100 return canonize (val, len, precision);
2103 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
2104 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2105 VAL has PRECISION bits. */
2106 unsigned int
2107 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2108 unsigned int xlen, unsigned int xprecision,
2109 unsigned int precision, unsigned int shift)
2111 /* Work out how many blocks are needed to store the significant bits
2112 (excluding the upper zeros or signs). */
2113 unsigned int blocks_needed = BLOCKS_NEEDED (xprecision - shift);
2114 unsigned int len = MIN (xlen, blocks_needed);
2116 rshift_large_common (val, xval, xlen, shift, len);
2118 /* The value we just created has precision XPRECISION - SHIFT.
2119 Sign-extend it to wider types. */
2120 if (precision > xprecision - shift && len == blocks_needed)
2122 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2123 if (small_prec)
2124 val[len - 1] = sext_hwi (val[len - 1], small_prec);
2126 return canonize (val, len, precision);
2129 /* Return the number of leading (upper) zeros in X. */
2131 wi::clz (const wide_int_ref &x)
2133 if (x.sign_mask () < 0)
2134 /* The upper bit is set, so there are no leading zeros. */
2135 return 0;
2137 /* Calculate how many bits there above the highest represented block. */
2138 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2140 unsigned HOST_WIDE_INT high = x.uhigh ();
2141 if (count < 0)
2142 /* The upper -COUNT bits of HIGH are not part of the value.
2143 Clear them out. */
2144 high = (high << -count) >> -count;
2146 /* We don't need to look below HIGH. Either HIGH is nonzero,
2147 or the top bit of the block below is nonzero; clz_hwi is
2148 HOST_BITS_PER_WIDE_INT in the latter case. */
2149 return count + clz_hwi (high);
2152 /* Return the number of redundant sign bits in X. (That is, the number
2153 of bits immediately below the sign bit that have the same value as
2154 the sign bit.) */
2156 wi::clrsb (const wide_int_ref &x)
2158 /* Calculate how many bits there above the highest represented block. */
2159 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2161 unsigned HOST_WIDE_INT high = x.uhigh ();
2162 unsigned HOST_WIDE_INT mask = -1;
2163 if (count < 0)
2165 /* The upper -COUNT bits of HIGH are not part of the value.
2166 Clear them from both MASK and HIGH. */
2167 mask >>= -count;
2168 high &= mask;
2171 /* If the top bit is 1, count the number of leading 1s. If the top
2172 bit is zero, count the number of leading zeros. */
2173 if (high > mask / 2)
2174 high ^= mask;
2176 /* There are no sign bits below the top block, so we don't need to look
2177 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2178 HIGH is 0. */
2179 return count + clz_hwi (high) - 1;
2182 /* Return the number of trailing (lower) zeros in X. */
2184 wi::ctz (const wide_int_ref &x)
2186 if (x.len == 1 && x.ulow () == 0)
2187 return x.precision;
2189 /* Having dealt with the zero case, there must be a block with a
2190 nonzero bit. We don't care about the bits above the first 1. */
2191 unsigned int i = 0;
2192 while (x.val[i] == 0)
2193 ++i;
2194 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2197 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2198 return -1. */
2200 wi::exact_log2 (const wide_int_ref &x)
2202 /* Reject cases where there are implicit -1 blocks above HIGH. */
2203 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2204 return -1;
2206 /* Set CRUX to the index of the entry that should be nonzero.
2207 If the top block is zero then the next lowest block (if any)
2208 must have the high bit set. */
2209 unsigned int crux = x.len - 1;
2210 if (crux > 0 && x.val[crux] == 0)
2211 crux -= 1;
2213 /* Check that all lower blocks are zero. */
2214 for (unsigned int i = 0; i < crux; ++i)
2215 if (x.val[i] != 0)
2216 return -1;
2218 /* Get a zero-extended form of block CRUX. */
2219 unsigned HOST_WIDE_INT hwi = x.val[crux];
2220 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2221 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2223 /* Now it's down to whether HWI is a power of 2. */
2224 int res = ::exact_log2 (hwi);
2225 if (res >= 0)
2226 res += crux * HOST_BITS_PER_WIDE_INT;
2227 return res;
2230 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2232 wi::floor_log2 (const wide_int_ref &x)
2234 return x.precision - 1 - clz (x);
2237 /* Return the index of the first (lowest) set bit in X, counting from 1.
2238 Return 0 if X is 0. */
2240 wi::ffs (const wide_int_ref &x)
2242 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2245 /* Return true if sign-extending X to have precision PRECISION would give
2246 the minimum signed value at that precision. */
2247 bool
2248 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2250 return ctz (x) + 1 == int (precision);
2253 /* Return true if X represents the minimum signed value. */
2254 bool
2255 wi::only_sign_bit_p (const wide_int_ref &x)
2257 return only_sign_bit_p (x, x.precision);
2260 /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2261 down to the previous value that has no bits set outside MASK.
2262 This rounding wraps for signed values if VAL is negative and
2263 the top bit of MASK is clear.
2265 For example, round_down_for_mask (6, 0xf1) would give 1 and
2266 round_down_for_mask (24, 0xf1) would give 17. */
2268 wide_int
2269 wi::round_down_for_mask (const wide_int &val, const wide_int &mask)
2271 /* Get the bits in VAL that are outside the mask. */
2272 wide_int extra_bits = wi::bit_and_not (val, mask);
2273 if (extra_bits == 0)
2274 return val;
2276 /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
2277 below that bit. */
2278 unsigned int precision = val.get_precision ();
2279 wide_int lower_mask = wi::mask (precision - wi::clz (extra_bits),
2280 false, precision);
2282 /* Clear the bits that aren't in MASK, but ensure that all bits
2283 in MASK below the top cleared bit are set. */
2284 return (val & mask) | (mask & lower_mask);
2287 /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2288 up to the next value that has no bits set outside MASK. The rounding
2289 wraps if there are no suitable values greater than VAL.
2291 For example, round_up_for_mask (6, 0xf1) would give 16 and
2292 round_up_for_mask (24, 0xf1) would give 32. */
2294 wide_int
2295 wi::round_up_for_mask (const wide_int &val, const wide_int &mask)
2297 /* Get the bits in VAL that are outside the mask. */
2298 wide_int extra_bits = wi::bit_and_not (val, mask);
2299 if (extra_bits == 0)
2300 return val;
2302 /* Get a mask that is all 1s above the top bit in EXTRA_BITS. */
2303 unsigned int precision = val.get_precision ();
2304 wide_int upper_mask = wi::mask (precision - wi::clz (extra_bits),
2305 true, precision);
2307 /* Get the bits of the mask that are above the top bit in EXTRA_BITS. */
2308 upper_mask &= mask;
2310 /* Conceptually we need to:
2312 - clear bits of VAL outside UPPER_MASK
2313 - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
2314 - propagate the carry through the bits of VAL in UPPER_MASK
2316 If (~VAL & UPPER_MASK) is nonzero, the carry eventually
2317 reaches that bit and the process leaves all lower bits clear.
2318 If (~VAL & UPPER_MASK) is zero then the result is also zero. */
2319 wide_int tmp = wi::bit_and_not (upper_mask, val);
2321 return (val | tmp) & -tmp;
2324 /* Compute the modular multiplicative inverse of A modulo B
2325 using extended Euclid's algorithm. Assumes A and B are coprime,
2326 and that A and B have the same precision. */
2327 wide_int
2328 wi::mod_inv (const wide_int &a, const wide_int &b)
2330 /* Verify the assumption. */
2331 gcc_checking_assert (wi::eq_p (wi::gcd (a, b), 1));
2333 unsigned int p = a.get_precision () + 1;
2334 gcc_checking_assert (b.get_precision () + 1 == p);
2335 wide_int c = wide_int::from (a, p, UNSIGNED);
2336 wide_int d = wide_int::from (b, p, UNSIGNED);
2337 wide_int x0 = wide_int::from (0, p, UNSIGNED);
2338 wide_int x1 = wide_int::from (1, p, UNSIGNED);
2340 if (wi::eq_p (b, 1))
2341 return wide_int::from (1, p, UNSIGNED);
2343 while (wi::gt_p (c, 1, UNSIGNED))
2345 wide_int t = d;
2346 wide_int q = wi::divmod_trunc (c, d, UNSIGNED, &d);
2347 c = t;
2348 wide_int s = x0;
2349 x0 = wi::sub (x1, wi::mul (q, x0));
2350 x1 = s;
2352 if (wi::lt_p (x1, 0, SIGNED))
2353 x1 += d;
2354 return x1;
2358 * Private utilities.
2361 void gt_ggc_mx (widest_int *) { }
2362 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2363 void gt_pch_nx (widest_int *) { }
2365 template void wide_int::dump () const;
2366 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2367 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2368 template void offset_int::dump () const;
2369 template void widest_int::dump () const;
2371 /* We could add all the above ::dump variants here, but wide_int and
2372 widest_int should handle the common cases. Besides, you can always
2373 call the dump method directly. */
2375 DEBUG_FUNCTION void
2376 debug (const wide_int &ref)
2378 ref.dump ();
2381 DEBUG_FUNCTION void
2382 debug (const wide_int *ptr)
2384 if (ptr)
2385 debug (*ptr);
2386 else
2387 fprintf (stderr, "<nil>\n");
2390 DEBUG_FUNCTION void
2391 debug (const widest_int &ref)
2393 ref.dump ();
2396 DEBUG_FUNCTION void
2397 debug (const widest_int *ptr)
2399 if (ptr)
2400 debug (*ptr);
2401 else
2402 fprintf (stderr, "<nil>\n");
2405 #if CHECKING_P
2407 namespace selftest {
2409 /* Selftests for wide ints. We run these multiple times, once per type. */
2411 /* Helper function for building a test value. */
2413 template <class VALUE_TYPE>
2414 static VALUE_TYPE
2415 from_int (int i);
2417 /* Specializations of the fixture for each wide-int type. */
2419 /* Specialization for VALUE_TYPE == wide_int. */
2421 template <>
2422 wide_int
2423 from_int (int i)
2425 return wi::shwi (i, 32);
2428 /* Specialization for VALUE_TYPE == offset_int. */
2430 template <>
2431 offset_int
2432 from_int (int i)
2434 return offset_int (i);
2437 /* Specialization for VALUE_TYPE == widest_int. */
2439 template <>
2440 widest_int
2441 from_int (int i)
2443 return widest_int (i);
2446 /* Verify that print_dec (WI, ..., SGN) gives the expected string
2447 representation (using base 10). */
2449 static void
2450 assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn)
2452 char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p = buf;
2453 unsigned len;
2454 if (print_dec_buf_size (wi, sgn, &len))
2455 p = XALLOCAVEC (char, len);
2456 print_dec (wi, p, sgn);
2457 ASSERT_STREQ (expected, p);
2460 /* Likewise for base 16. */
2462 static void
2463 assert_hexeq (const char *expected, const wide_int_ref &wi)
2465 char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p = buf;
2466 unsigned len;
2467 if (print_hex_buf_size (wi, &len))
2468 p = XALLOCAVEC (char, len);
2469 print_hex (wi, p);
2470 ASSERT_STREQ (expected, p);
2473 /* Test cases. */
2475 /* Verify that print_dec and print_hex work for VALUE_TYPE. */
2477 template <class VALUE_TYPE>
2478 static void
2479 test_printing ()
2481 VALUE_TYPE a = from_int<VALUE_TYPE> (42);
2482 assert_deceq ("42", a, SIGNED);
2483 assert_hexeq ("0x2a", a);
2484 assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
2485 assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
2486 assert_hexeq ("0xffffffffffffffff", wi::mask <widest_int> (64, false));
2487 if (WIDE_INT_MAX_INL_PRECISION > 128)
2489 assert_hexeq ("0x20000000000000000fffffffffffffffe",
2490 wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
2491 assert_hexeq ("0x200000000000004000123456789abcdef",
2492 wi::lshift (1, 129) + wi::lshift (1, 74)
2493 + wi::lshift (0x1234567, 32) + 0x89abcdef);
2497 /* Verify that various operations work correctly for VALUE_TYPE,
2498 unary and binary, using both function syntax, and
2499 overloaded-operators. */
2501 template <class VALUE_TYPE>
2502 static void
2503 test_ops ()
2505 VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2506 VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2508 /* Using functions. */
2509 assert_deceq ("-7", wi::neg (a), SIGNED);
2510 assert_deceq ("10", wi::add (a, b), SIGNED);
2511 assert_deceq ("4", wi::sub (a, b), SIGNED);
2512 assert_deceq ("-4", wi::sub (b, a), SIGNED);
2513 assert_deceq ("21", wi::mul (a, b), SIGNED);
2515 /* Using operators. */
2516 assert_deceq ("-7", -a, SIGNED);
2517 assert_deceq ("10", a + b, SIGNED);
2518 assert_deceq ("4", a - b, SIGNED);
2519 assert_deceq ("-4", b - a, SIGNED);
2520 assert_deceq ("21", a * b, SIGNED);
2523 /* Verify that various comparisons work correctly for VALUE_TYPE. */
2525 template <class VALUE_TYPE>
2526 static void
2527 test_comparisons ()
2529 VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2530 VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2532 /* == */
2533 ASSERT_TRUE (wi::eq_p (a, a));
2534 ASSERT_FALSE (wi::eq_p (a, b));
2536 /* != */
2537 ASSERT_TRUE (wi::ne_p (a, b));
2538 ASSERT_FALSE (wi::ne_p (a, a));
2540 /* < */
2541 ASSERT_FALSE (wi::lts_p (a, a));
2542 ASSERT_FALSE (wi::lts_p (a, b));
2543 ASSERT_TRUE (wi::lts_p (b, a));
2545 /* <= */
2546 ASSERT_TRUE (wi::les_p (a, a));
2547 ASSERT_FALSE (wi::les_p (a, b));
2548 ASSERT_TRUE (wi::les_p (b, a));
2550 /* > */
2551 ASSERT_FALSE (wi::gts_p (a, a));
2552 ASSERT_TRUE (wi::gts_p (a, b));
2553 ASSERT_FALSE (wi::gts_p (b, a));
2555 /* >= */
2556 ASSERT_TRUE (wi::ges_p (a, a));
2557 ASSERT_TRUE (wi::ges_p (a, b));
2558 ASSERT_FALSE (wi::ges_p (b, a));
2560 /* comparison */
2561 ASSERT_EQ (-1, wi::cmps (b, a));
2562 ASSERT_EQ (0, wi::cmps (a, a));
2563 ASSERT_EQ (1, wi::cmps (a, b));
2566 /* Run all of the selftests, using the given VALUE_TYPE. */
2568 template <class VALUE_TYPE>
2569 static void run_all_wide_int_tests ()
2571 test_printing <VALUE_TYPE> ();
2572 test_ops <VALUE_TYPE> ();
2573 test_comparisons <VALUE_TYPE> ();
2576 /* Test overflow conditions. */
2578 static void
2579 test_overflow ()
2581 static int precs[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
2582 static int offsets[] = { 16, 1, 0 };
2583 for (unsigned int i = 0; i < ARRAY_SIZE (precs); ++i)
2584 for (unsigned int j = 0; j < ARRAY_SIZE (offsets); ++j)
2586 int prec = precs[i];
2587 int offset = offsets[j];
2588 wi::overflow_type overflow;
2589 wide_int sum, diff;
2591 sum = wi::add (wi::max_value (prec, UNSIGNED) - offset, 1,
2592 UNSIGNED, &overflow);
2593 ASSERT_EQ (sum, -offset);
2594 ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2596 sum = wi::add (1, wi::max_value (prec, UNSIGNED) - offset,
2597 UNSIGNED, &overflow);
2598 ASSERT_EQ (sum, -offset);
2599 ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2601 diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2602 wi::max_value (prec, UNSIGNED),
2603 UNSIGNED, &overflow);
2604 ASSERT_EQ (diff, -offset);
2605 ASSERT_EQ (overflow != wi::OVF_NONE, offset != 0);
2607 diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2608 wi::max_value (prec, UNSIGNED) - 1,
2609 UNSIGNED, &overflow);
2610 ASSERT_EQ (diff, 1 - offset);
2611 ASSERT_EQ (overflow != wi::OVF_NONE, offset > 1);
2615 /* Test the round_{down,up}_for_mask functions. */
2617 static void
2618 test_round_for_mask ()
2620 unsigned int prec = 18;
2621 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec),
2622 wi::shwi (0xf1, prec)));
2623 ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec),
2624 wi::shwi (0xf1, prec)));
2626 ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec),
2627 wi::shwi (0xf1, prec)));
2628 ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec),
2629 wi::shwi (0xf1, prec)));
2631 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec),
2632 wi::shwi (0xf1, prec)));
2633 ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec),
2634 wi::shwi (0xf1, prec)));
2636 ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec),
2637 wi::shwi (0x111, prec)));
2638 ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec),
2639 wi::shwi (0x111, prec)));
2641 ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec),
2642 wi::shwi (0xfc, prec)));
2643 ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec),
2644 wi::shwi (0xfc, prec)));
2646 ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec),
2647 wi::shwi (0xabc, prec)));
2648 ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec),
2649 wi::shwi (0xabc, prec)));
2651 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec),
2652 wi::shwi (0xabc, prec)));
2653 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec),
2654 wi::shwi (0xabc, prec)));
2656 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec),
2657 wi::shwi (0xabc, prec)));
2658 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec),
2659 wi::shwi (0xabc, prec)));
2662 /* Run all of the selftests within this file, for all value types. */
2664 void
2665 wide_int_cc_tests ()
2667 run_all_wide_int_tests <wide_int> ();
2668 run_all_wide_int_tests <offset_int> ();
2669 run_all_wide_int_tests <widest_int> ();
2670 test_overflow ();
2671 test_round_for_mask ();
2672 ASSERT_EQ (wi::mask (128, false, 128),
2673 wi::shifted_mask (0, 128, false, 128));
2674 ASSERT_EQ (wi::mask (128, true, 128),
2675 wi::shifted_mask (0, 128, true, 128));
2678 } // namespace selftest
2679 #endif /* CHECKING_P */