Fix signed min / -1.
[official-gcc.git] / gcc / wide-int.cc
bloba99f667e5a8c20a8d80fbd97ad3fe44a7a180816
1 /* Operations with very long integers.
2 Copyright (C) 2012-2013 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hwint.h"
26 #include "wide-int.h"
27 #include "tree.h"
28 #include "dumpfile.h"
30 #if GCC_VERSION >= 3000
31 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
32 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
33 typedef unsigned HOST_WIDE_INT UWtype;
34 typedef unsigned int UQItype __attribute__ ((mode (QI)));
35 typedef unsigned int USItype __attribute__ ((mode (SI)));
36 typedef unsigned int UDItype __attribute__ ((mode (DI)));
37 #include "longlong.h"
38 #endif
40 /* This is the maximal size of the buffer needed for dump. */
41 const unsigned int MAX_SIZE = (4 * (MAX_BITSIZE_MODE_ANY_INT / 4
42 + (MAX_BITSIZE_MODE_ANY_INT
43 / HOST_BITS_PER_WIDE_INT)
44 + 32));
46 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
49 * Internal utilities.
52 /* Quantities to deal with values that hold half of a wide int. Used
53 in multiply and divide. */
54 #define HALF_INT_MASK (((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
56 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
57 #define BLOCKS_NEEDED(PREC) \
58 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
59 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
61 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
62 based on the top existing bit of VAL. */
64 static unsigned HOST_WIDE_INT
65 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
67 return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
70 /* Convert the integer in VAL to canonical form, returning its new length.
71 LEN is the number of blocks currently in VAL and PRECISION is the number
72 of bits in the integer it represents.
74 This function only changes the representation, not the value. */
75 static unsigned int
76 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
78 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
79 HOST_WIDE_INT top;
80 int i;
82 if (len > blocks_needed)
83 len = blocks_needed;
85 if (len == 1)
86 return len;
88 top = val[len - 1];
89 if (len * HOST_BITS_PER_WIDE_INT > precision)
90 top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
91 if (top != 0 && top != (HOST_WIDE_INT)-1)
92 return len;
94 /* At this point we know that the top is either 0 or -1. Find the
95 first block that is not a copy of this. */
96 for (i = len - 2; i >= 0; i--)
98 HOST_WIDE_INT x = val[i];
99 if (x != top)
101 if (SIGN_MASK (x) == top)
102 return i + 1;
104 /* We need an extra block because the top bit block i does
105 not match the extension. */
106 return i + 2;
110 /* The number is 0 or -1. */
111 return 1;
115 * Conversion routines in and out of wide_int.
118 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
119 result for an integer with precision PRECISION. Return the length
120 of VAL (after any canonization. */
121 unsigned int
122 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
123 unsigned int xlen, unsigned int precision, bool need_canon)
125 for (unsigned i = 0; i < xlen; i++)
126 val[i] = xval[i];
127 return need_canon ? canonize (val, xlen, precision) : xlen;
130 /* Construct a wide int from a buffer of length LEN. BUFFER will be
131 read according to byte endianess and word endianess of the target.
132 Only the lower LEN bytes of the result are set; the remaining high
133 bytes are cleared. */
134 wide_int
135 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
137 unsigned int precision = buffer_len * BITS_PER_UNIT;
138 wide_int result = wide_int::create (precision);
139 unsigned int words = buffer_len / UNITS_PER_WORD;
141 /* We have to clear all the bits ourself, as we merely or in values
142 below. */
143 unsigned int len = BLOCKS_NEEDED (precision);
144 HOST_WIDE_INT *val = result.write_val ();
145 for (unsigned int i = 0; i < len; ++i)
146 val[i] = 0;
148 for (unsigned int byte = 0; byte < buffer_len; byte++)
150 unsigned int offset;
151 unsigned int index;
152 unsigned int bitpos = byte * BITS_PER_UNIT;
153 unsigned HOST_WIDE_INT value;
155 if (buffer_len > UNITS_PER_WORD)
157 unsigned int word = byte / UNITS_PER_WORD;
159 if (WORDS_BIG_ENDIAN)
160 word = (words - 1) - word;
162 offset = word * UNITS_PER_WORD;
164 if (BYTES_BIG_ENDIAN)
165 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
166 else
167 offset += byte % UNITS_PER_WORD;
169 else
170 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
172 value = (unsigned HOST_WIDE_INT) buffer[offset];
174 index = bitpos / HOST_BITS_PER_WIDE_INT;
175 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
178 result.set_len (canonize (val, len, precision));
180 return result;
183 /* Sets RESULT from THIS, the sign is taken according to SGN. */
184 void
185 wi::to_mpz (wide_int x, mpz_t result, signop sgn)
187 bool negative = false;
188 int len = x.get_len ();
189 const HOST_WIDE_INT *v = x.get_val ();
190 int small_prec = x.get_precision () & (HOST_BITS_PER_WIDE_INT - 1);
192 if (wi::neg_p (x, sgn))
194 negative = true;
195 /* We use ones complement to avoid -x80..0 edge case that -
196 won't work on. */
197 x = ~x;
200 if (sgn == UNSIGNED && small_prec)
202 HOST_WIDE_INT t[WIDE_INT_MAX_ELTS];
204 for (int i = 0; i < len - 1; i++)
205 t[i] = v[i];
206 t[len-1] = zext_hwi (v[len-1], small_prec);
207 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
209 else
210 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
212 if (negative)
213 mpz_com (result, result);
216 /* Returns VAL converted to TYPE. If WRAP is true, then out-of-range
217 values of VAL will be wrapped; otherwise, they will be set to the
218 appropriate minimum or maximum TYPE bound. */
219 wide_int
220 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
222 size_t count, numb;
223 int prec = TYPE_PRECISION (type);
224 wide_int res = wide_int::create (prec);
226 if (!wrap)
228 mpz_t min, max;
230 mpz_init (min);
231 mpz_init (max);
232 get_type_static_bounds (type, min, max);
234 if (mpz_cmp (x, min) < 0)
235 mpz_set (x, min);
236 else if (mpz_cmp (x, max) > 0)
237 mpz_set (x, max);
239 mpz_clear (min);
240 mpz_clear (max);
243 /* Determine the number of unsigned HOST_WIDE_INTs that are required
244 for representing the value. The code to calculate count is
245 extracted from the GMP manual, section "Integer Import and Export":
246 http://gmplib.org/manual/Integer-Import-and-Export.html */
247 numb = 8*sizeof(HOST_WIDE_INT);
248 count = (mpz_sizeinbase (x, 2) + numb-1) / numb;
249 HOST_WIDE_INT *val = res.write_val ();
250 mpz_export (val, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
251 if (count < 1)
253 val[0] = 0;
254 count = 1;
256 res.set_len (count);
258 if (mpz_sgn (x) < 0)
259 res = -res;
261 return res;
265 * Largest and smallest values in a mode.
268 /* Return the largest SGNed number that is representable in PRECISION bits.
270 TODO: There is still code from the double_int era that trys to
271 make up for the fact that double int's could not represent the
272 min and max values of all types. This code should be removed
273 because the min and max values can always be represented in
274 wide_ints and int-csts. */
275 wide_int
276 wi::max_value (unsigned int precision, signop sgn)
278 gcc_checking_assert (precision != 0);
279 if (sgn == UNSIGNED)
280 /* The unsigned max is just all ones. */
281 return shwi (-1, precision);
282 else
283 /* The signed max is all ones except the top bit. This must be
284 explicitly represented. */
285 return mask (precision - 1, false, precision);
288 /* Return the largest SGNed number that is representable in PRECISION bits. */
289 wide_int
290 wi::min_value (unsigned int precision, signop sgn)
292 gcc_checking_assert (precision != 0);
293 if (sgn == UNSIGNED)
294 return uhwi (0, precision);
295 else
296 /* The signed min is all zeros except the top bit. This must be
297 explicitly represented. */
298 return wi::set_bit_in_zero (precision - 1, precision);
302 * Public utilities.
305 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
306 signedness SGN, to an integer that has PRECISION bits. Store the blocks
307 in VAL and return the number of blocks used.
309 This function can handle both extension (PRECISION > XPRECISION)
310 and truncation (PRECISION < XPRECISION). */
311 unsigned int
312 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
313 unsigned int xlen, unsigned int xprecision,
314 unsigned int precision, signop sgn)
316 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
317 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
318 for (unsigned i = 0; i < len; i++)
319 val[i] = xval[i];
321 if (precision > xprecision)
323 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
325 /* Expanding. */
326 if (sgn == UNSIGNED)
328 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
329 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
330 else if (val[len - 1] < 0)
332 while (len < BLOCKS_NEEDED (xprecision))
333 val[len++] = -1;
334 if (small_xprecision)
335 val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
336 else
337 val[len++] = 0;
340 else
342 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
343 val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
346 len = canonize (val, len, precision);
348 return len;
351 /* This function hides the fact that we cannot rely on the bits beyond
352 the precision. This issue comes up in the relational comparisions
353 where we do allow comparisons of values of different precisions. */
354 static inline HOST_WIDE_INT
355 selt (const HOST_WIDE_INT *a, unsigned int len,
356 unsigned int blocks_needed,
357 unsigned int small_prec,
358 unsigned int index, signop sgn)
360 HOST_WIDE_INT val;
361 if (index < len)
362 val = a[index];
363 else if (index < blocks_needed || sgn == SIGNED)
364 /* Signed or within the precision. */
365 val = SIGN_MASK (a[len - 1]);
366 else
367 /* Unsigned extension beyond the precision. */
368 val = 0;
370 if (small_prec && index == blocks_needed - 1)
371 return (sgn == SIGNED
372 ? sext_hwi (val, small_prec)
373 : zext_hwi (val, small_prec));
374 else
375 return val;
378 /* Find the highest bit represented in a wide int. This will in
379 general have the same value as the sign bit. */
380 static inline HOST_WIDE_INT
381 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
383 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
384 unsigned HOST_WIDE_INT val = a[len - 1];
385 if (excess > 0)
386 val <<= excess;
387 return val >> (HOST_BITS_PER_WIDE_INT - 1);
391 * Comparisons, note that only equality is an operator. The other
392 * comparisons cannot be operators since they are inherently singed or
393 * unsigned and C++ has no such operators.
396 /* Return true if OP0 == OP1. */
397 bool
398 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
399 const HOST_WIDE_INT *op1, unsigned int op1len,
400 unsigned int prec)
402 int l0 = op0len - 1;
403 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
405 while (op0len != op1len)
406 return false;
408 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
410 /* It does not matter if we zext or sext here, we just have to
411 do both the same way. */
412 if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
413 return false;
414 l0--;
417 while (l0 >= 0)
418 if (op0[l0] != op1[l0])
419 return false;
420 else
421 l0--;
423 return true;
426 /* Return true if OP0 < OP1 using signed comparisons. */
427 bool
428 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
429 unsigned int precision,
430 const HOST_WIDE_INT *op1, unsigned int op1len)
432 HOST_WIDE_INT s0, s1;
433 unsigned HOST_WIDE_INT u0, u1;
434 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
435 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
436 int l = MAX (op0len - 1, op1len - 1);
438 /* Only the top block is compared as signed. The rest are unsigned
439 comparisons. */
440 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
441 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
442 if (s0 < s1)
443 return true;
444 if (s0 > s1)
445 return false;
447 l--;
448 while (l >= 0)
450 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
451 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
453 if (u0 < u1)
454 return true;
455 if (u0 > u1)
456 return false;
457 l--;
460 return false;
463 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
464 signed compares. */
466 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
467 unsigned int precision,
468 const HOST_WIDE_INT *op1, unsigned int op1len)
470 HOST_WIDE_INT s0, s1;
471 unsigned HOST_WIDE_INT u0, u1;
472 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
473 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
474 int l = MAX (op0len - 1, op1len - 1);
476 /* Only the top block is compared as signed. The rest are unsigned
477 comparisons. */
478 s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
479 s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
480 if (s0 < s1)
481 return -1;
482 if (s0 > s1)
483 return 1;
485 l--;
486 while (l >= 0)
488 u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
489 u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
491 if (u0 < u1)
492 return -1;
493 if (u0 > u1)
494 return 1;
495 l--;
498 return 0;
501 /* Return true if OP0 < OP1 using unsigned comparisons. */
502 bool
503 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
504 unsigned int precision,
505 const HOST_WIDE_INT *op1, unsigned int op1len)
507 unsigned HOST_WIDE_INT x0;
508 unsigned HOST_WIDE_INT x1;
509 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
510 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
511 int l = MAX (op0len - 1, op1len - 1);
513 while (l >= 0)
515 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
516 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
517 if (x0 < x1)
518 return true;
519 if (x0 > x1)
520 return false;
521 l--;
524 return false;
527 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
528 unsigned compares. */
530 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
531 unsigned int precision,
532 const HOST_WIDE_INT *op1, unsigned int op1len)
534 unsigned HOST_WIDE_INT x0;
535 unsigned HOST_WIDE_INT x1;
536 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
537 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
538 int l = MAX (op0len - 1, op1len - 1);
540 while (l >= 0)
542 x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
543 x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
544 if (x0 < x1)
545 return -1;
546 if (x0 > x1)
547 return 1;
548 l--;
551 return 0;
555 * Extension.
558 /* Sign-extend the number represented by XVAL and XLEN into VAL,
559 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
560 and VAL have PRECISION bits. */
561 unsigned int
562 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
563 unsigned int xlen, unsigned int precision, unsigned int offset)
565 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
566 /* Extending beyond the precision is a no-op. If we have only stored
567 OFFSET bits or fewer, the rest are already signs. */
568 if (offset >= precision || len >= xlen)
570 for (unsigned i = 0; i < xlen; ++i)
571 val[i] = xval[i];
572 return xlen;
574 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
575 for (unsigned int i = 0; i < len; i++)
576 val[i] = xval[i];
577 if (suboffset > 0)
579 val[len] = sext_hwi (xval[len], suboffset);
580 len += 1;
582 return canonize (val, len, precision);
585 /* Zero-extend the number represented by XVAL and XLEN into VAL,
586 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
587 and VAL have PRECISION bits. */
588 unsigned int
589 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
590 unsigned int xlen, unsigned int precision, unsigned int offset)
592 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
593 /* Extending beyond the precision is a no-op. If we have only stored
594 OFFSET bits or fewer, and the upper stored bit is zero, then there
595 is nothing to do. */
596 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
598 for (unsigned i = 0; i < xlen; ++i)
599 val[i] = xval[i];
600 return xlen;
602 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
603 for (unsigned int i = 0; i < len; i++)
604 val[i] = i < xlen ? xval[i] : -1;
605 if (suboffset > 0)
606 val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
607 else
608 val[len] = 0;
609 return canonize (val, len + 1, precision);
613 * Masking, inserting, shifting, rotating.
616 /* Insert WIDTH bits from Y into X starting at START. */
617 wide_int
618 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
619 unsigned int width)
621 wide_int result;
622 wide_int mask;
623 wide_int tmp;
625 unsigned int precision = x.get_precision ();
626 if (start >= precision)
627 return x;
629 gcc_checking_assert (precision >= width);
631 if (start + width >= precision)
632 width = precision - start;
634 mask = wi::shifted_mask (start, width, false, precision);
635 tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
636 result = tmp & mask;
638 tmp = wi::bit_and_not (x, mask);
639 result = result | tmp;
641 return result;
644 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
645 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
646 bits. */
647 unsigned int
648 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
649 unsigned int xlen, unsigned int precision, unsigned int bit)
651 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
652 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
654 if (block + 1 >= xlen)
656 /* The operation either affects the last current block or needs
657 a new block. */
658 unsigned int len = block + 1;
659 for (unsigned int i = 0; i < len; i++)
660 val[i] = safe_uhwi (xval, xlen, i);
661 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
663 /* If the bit we just set is at the msb of the block, make sure
664 that any higher bits are zeros. */
665 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
666 val[len++] = 0;
667 return len;
669 else
671 for (unsigned int i = 0; i < xlen; i++)
672 val[i] = xval[i];
673 val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
674 return canonize (val, xlen, precision);
678 /* bswap THIS. */
679 wide_int
680 wide_int_storage::bswap () const
682 wide_int result = wide_int::create (precision);
683 unsigned int i, s;
684 unsigned int len = BLOCKS_NEEDED (precision);
685 unsigned int xlen = get_len ();
686 const HOST_WIDE_INT *xval = get_val ();
687 HOST_WIDE_INT *val = result.write_val ();
689 /* This is not a well defined operation if the precision is not a
690 multiple of 8. */
691 gcc_assert ((precision & 0x7) == 0);
693 for (i = 0; i < len; i++)
694 val[i] = 0;
696 /* Only swap the bytes that are not the padding. */
697 for (s = 0; s < precision; s += 8)
699 unsigned int d = precision - s - 8;
700 unsigned HOST_WIDE_INT byte;
702 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
703 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
705 byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
707 block = d / HOST_BITS_PER_WIDE_INT;
708 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
710 val[block] |= byte << offset;
713 result.set_len (canonize (val, len, precision));
714 return result;
717 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
718 above that up to PREC are zeros. The result is inverted if NEGATE
719 is true. Return the number of blocks in VAL. */
720 unsigned int
721 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
722 unsigned int prec)
724 gcc_assert (width < 4 * MAX_BITSIZE_MODE_ANY_INT);
725 gcc_assert (prec <= 4 * MAX_BITSIZE_MODE_ANY_INT);
727 if (width == prec)
729 val[0] = negate ? 0 : -1;
730 return 1;
732 else if (width == 0)
734 val[0] = negate ? -1 : 0;
735 return 1;
738 unsigned int i = 0;
739 while (i < width / HOST_BITS_PER_WIDE_INT)
740 val[i++] = negate ? 0 : -1;
742 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
743 if (shift != 0)
745 HOST_WIDE_INT last = (((unsigned HOST_WIDE_INT) 1) << shift) - 1;
746 val[i++] = negate ? ~last : last;
748 else
749 val[i++] = negate ? -1 : 0;
751 return i;
754 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
755 bits are ones, and the bits above that up to PREC are zeros. The result
756 is inverted if NEGATE is true. Return the number of blocks in VAL. */
757 unsigned int
758 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
759 bool negate, unsigned int prec)
761 if (start >= prec || width == 0)
763 val[0] = negate ? -1 : 0;
764 return 1;
767 if (width > prec - start)
768 width = prec - start;
769 unsigned int end = start + width;
771 unsigned int i = 0;
772 while (i < start / HOST_BITS_PER_WIDE_INT)
773 val[i++] = negate ? -1 : 0;
775 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
776 if (shift)
778 HOST_WIDE_INT block = (((unsigned HOST_WIDE_INT) 1) << shift) - 1;
779 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
780 if (shift)
782 /* case 000111000 */
783 block = (((unsigned HOST_WIDE_INT) 1) << shift) - block - 1;
784 val[i++] = negate ? ~block : block;
785 return i;
787 else
788 /* ...111000 */
789 val[i++] = negate ? block : ~block;
792 while (i < end / HOST_BITS_PER_WIDE_INT)
793 /* 1111111 */
794 val[i++] = negate ? 0 : -1;
796 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
797 if (shift != 0)
799 /* 000011111 */
800 HOST_WIDE_INT block = (((unsigned HOST_WIDE_INT) 1) << shift) - 1;
801 val[i++] = negate ? ~block : block;
803 else if (end < prec)
804 val[i++] = negate ? -1 : 0;
806 return i;
810 * logical operations.
813 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
814 unsigned int
815 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
816 unsigned int op0len, const HOST_WIDE_INT *op1,
817 unsigned int op1len, unsigned int prec)
819 int l0 = op0len - 1;
820 int l1 = op1len - 1;
821 bool need_canon = true;
823 unsigned int len = MAX (op0len, op1len);
824 if (l0 > l1)
826 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
827 if (op1mask == 0)
829 l0 = l1;
830 len = l1 + 1;
832 else
834 need_canon = false;
835 while (l0 > l1)
837 val[l0] = op0[l0];
838 l0--;
842 else if (l1 > l0)
844 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
845 if (op0mask == 0)
846 len = l0 + 1;
847 else
849 need_canon = false;
850 while (l1 > l0)
852 val[l1] = op1[l1];
853 l1--;
858 while (l0 >= 0)
860 val[l0] = op0[l0] & op1[l0];
861 l0--;
864 if (need_canon)
865 len = canonize (val, len, prec);
867 return len;
870 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
871 unsigned int
872 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
873 unsigned int op0len, const HOST_WIDE_INT *op1,
874 unsigned int op1len, unsigned int prec)
876 wide_int result;
877 int l0 = op0len - 1;
878 int l1 = op1len - 1;
879 bool need_canon = true;
881 unsigned int len = MAX (op0len, op1len);
882 if (l0 > l1)
884 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
885 if (op1mask != 0)
887 l0 = l1;
888 len = l1 + 1;
890 else
892 need_canon = false;
893 while (l0 > l1)
895 val[l0] = op0[l0];
896 l0--;
900 else if (l1 > l0)
902 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
903 if (op0mask == 0)
904 len = l0 + 1;
905 else
907 need_canon = false;
908 while (l1 > l0)
910 val[l1] = ~op1[l1];
911 l1--;
916 while (l0 >= 0)
918 val[l0] = op0[l0] & ~op1[l0];
919 l0--;
922 if (need_canon)
923 len = canonize (val, len, prec);
925 return len;
928 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
929 unsigned int
930 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
931 unsigned int op0len, const HOST_WIDE_INT *op1,
932 unsigned int op1len, unsigned int prec)
934 wide_int result;
935 int l0 = op0len - 1;
936 int l1 = op1len - 1;
937 bool need_canon = true;
939 unsigned int len = MAX (op0len, op1len);
940 if (l0 > l1)
942 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
943 if (op1mask != 0)
945 l0 = l1;
946 len = l1 + 1;
948 else
950 need_canon = false;
951 while (l0 > l1)
953 val[l0] = op0[l0];
954 l0--;
958 else if (l1 > l0)
960 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
961 if (op0mask != 0)
962 len = l0 + 1;
963 else
965 need_canon = false;
966 while (l1 > l0)
968 val[l1] = op1[l1];
969 l1--;
974 while (l0 >= 0)
976 val[l0] = op0[l0] | op1[l0];
977 l0--;
980 if (need_canon)
981 len = canonize (val, len, prec);
983 return len;
986 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
987 unsigned int
988 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
989 unsigned int op0len, const HOST_WIDE_INT *op1,
990 unsigned int op1len, unsigned int prec)
992 wide_int result;
993 int l0 = op0len - 1;
994 int l1 = op1len - 1;
995 bool need_canon = true;
997 unsigned int len = MAX (op0len, op1len);
998 if (l0 > l1)
1000 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1001 if (op1mask == 0)
1003 l0 = l1;
1004 len = l1 + 1;
1006 else
1008 need_canon = false;
1009 while (l0 > l1)
1011 val[l0] = op0[l0];
1012 l0--;
1016 else if (l1 > l0)
1018 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1019 if (op0mask != 0)
1020 len = l0 + 1;
1021 else
1023 need_canon = false;
1024 while (l1 > l0)
1026 val[l1] = ~op1[l1];
1027 l1--;
1032 while (l0 >= 0)
1034 val[l0] = op0[l0] | ~op1[l0];
1035 l0--;
1038 if (need_canon)
1039 len = canonize (val, len, prec);
1041 return len;
1044 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1045 unsigned int
1046 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1047 unsigned int op0len, const HOST_WIDE_INT *op1,
1048 unsigned int op1len, unsigned int prec)
1050 wide_int result;
1051 int l0 = op0len - 1;
1052 int l1 = op1len - 1;
1054 unsigned int len = MAX (op0len, op1len);
1055 if (l0 > l1)
1057 HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1058 while (l0 > l1)
1060 val[l0] = op0[l0] ^ op1mask;
1061 l0--;
1065 if (l1 > l0)
1067 HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1068 while (l1 > l0)
1070 val[l1] = op0mask ^ op1[l1];
1071 l1--;
1075 while (l0 >= 0)
1077 val[l0] = op0[l0] ^ op1[l0];
1078 l0--;
1081 return canonize (val, len, prec);
1085 * math
1088 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1089 whether the result overflows when OP0 and OP1 are treated as having
1090 signedness SGN. Return the number of blocks in VAL. */
1091 unsigned int
1092 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1093 unsigned int op0len, const HOST_WIDE_INT *op1,
1094 unsigned int op1len, unsigned int prec,
1095 signop sgn, bool *overflow)
1097 unsigned HOST_WIDE_INT o0 = 0;
1098 unsigned HOST_WIDE_INT o1 = 0;
1099 unsigned HOST_WIDE_INT x = 0;
1100 unsigned HOST_WIDE_INT carry = 0;
1101 unsigned HOST_WIDE_INT old_carry = 0;
1102 unsigned HOST_WIDE_INT mask0, mask1;
1103 unsigned int i;
1105 unsigned int len = MAX (op0len, op1len);
1106 mask0 = -top_bit_of (op0, op0len, prec);
1107 mask1 = -top_bit_of (op1, op1len, prec);
1108 /* Add all of the explicitly defined elements. */
1110 for (i = 0; i < len; i++)
1112 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1113 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1114 x = o0 + o1 + carry;
1115 val[i] = x;
1116 old_carry = carry;
1117 carry = carry == 0 ? x < o0 : x <= o0;
1120 if (len * HOST_BITS_PER_WIDE_INT < prec)
1122 val[len] = mask0 + mask1 + carry;
1123 len++;
1124 if (overflow)
1125 *overflow = false;
1127 else if (overflow)
1129 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1130 if (sgn == SIGNED)
1132 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1133 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1135 else
1137 /* Put the MSB of X and O0 and in the top of the HWI. */
1138 x <<= shift;
1139 o0 <<= shift;
1140 if (old_carry)
1141 *overflow = (x <= o0);
1142 else
1143 *overflow = (x < o0);
1147 return canonize (val, len, prec);
1150 /* This is bogus. We should always return the precision and leave the
1151 caller to handle target dependencies. */
1152 static int
1153 clz_zero (unsigned int precision)
1155 unsigned int count;
1157 enum machine_mode mode = mode_for_size (precision, MODE_INT, 0);
1158 if (mode == BLKmode)
1159 mode_for_size (precision, MODE_PARTIAL_INT, 0);
1161 /* Even if the value at zero is undefined, we have to come up
1162 with some replacement. Seems good enough. */
1163 if (mode == BLKmode)
1164 count = precision;
1165 else if (!CLZ_DEFINED_VALUE_AT_ZERO (mode, count))
1166 count = precision;
1167 return count;
1170 /* This is bogus. We should always return the precision and leave the
1171 caller to handle target dependencies. */
1172 static int
1173 ctz_zero (unsigned int precision)
1175 unsigned int count;
1177 enum machine_mode mode = mode_for_size (precision, MODE_INT, 0);
1178 if (mode == BLKmode)
1179 mode_for_size (precision, MODE_PARTIAL_INT, 0);
1181 /* Even if the value at zero is undefined, we have to come up
1182 with some replacement. Seems good enough. */
1183 if (mode == BLKmode)
1184 count = precision;
1185 else if (!CTZ_DEFINED_VALUE_AT_ZERO (mode, count))
1186 count = precision;
1187 return count;
1190 /* Subroutines of the multiplication and division operations. Unpack
1191 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1192 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1193 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1194 static void
1195 wi_unpack (unsigned HOST_HALF_WIDE_INT *result,
1196 const unsigned HOST_WIDE_INT *input,
1197 unsigned int in_len, unsigned int out_len,
1198 unsigned int prec, signop sgn)
1200 unsigned int i;
1201 unsigned int j = 0;
1202 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1203 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1204 HOST_WIDE_INT mask;
1206 if (sgn == SIGNED)
1208 mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1209 mask &= HALF_INT_MASK;
1211 else
1212 mask = 0;
1214 for (i = 0; i < in_len; i++)
1216 HOST_WIDE_INT x = input[i];
1217 if (i == blocks_needed - 1 && small_prec)
1219 if (sgn == SIGNED)
1220 x = sext_hwi (x, small_prec);
1221 else
1222 x = zext_hwi (x, small_prec);
1224 result[j++] = x;
1225 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1228 /* Smear the sign bit. */
1229 while (j < out_len)
1230 result[j++] = mask;
1233 /* The inverse of wi_unpack. IN_LEN is the the number of input
1234 blocks. The number of output blocks will be half this amount. */
1235 static void
1236 wi_pack (unsigned HOST_WIDE_INT *result,
1237 const unsigned HOST_HALF_WIDE_INT *input,
1238 unsigned int in_len)
1240 unsigned int i = 0;
1241 unsigned int j = 0;
1243 while (i + 2 < in_len)
1245 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1246 | ((unsigned HOST_WIDE_INT)input[i + 1]
1247 << HOST_BITS_PER_HALF_WIDE_INT);
1248 i += 2;
1251 /* Handle the case where in_len is odd. For this we zero extend. */
1252 if (in_len & 1)
1253 result[j++] = (unsigned HOST_WIDE_INT)input[i];
1254 else
1255 result[j++] = (unsigned HOST_WIDE_INT)input[i]
1256 | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT);
1259 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1260 result is returned.
1262 If HIGH is not set, throw away the upper half after the check is
1263 made to see if it overflows. Unfortunately there is no better way
1264 to check for overflow than to do this. If OVERFLOW is nonnull,
1265 record in *OVERFLOW whether the result overflowed. SGN controls
1266 the signedness and is used to check overflow or if HIGH is set. */
1267 unsigned int
1268 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1269 unsigned int op1len, const HOST_WIDE_INT *op2val,
1270 unsigned int op2len, unsigned int prec, signop sgn,
1271 bool *overflow, bool high)
1273 unsigned HOST_WIDE_INT o0, o1, k, t;
1274 unsigned int i;
1275 unsigned int j;
1276 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1277 unsigned int half_blocks_needed = blocks_needed * 2;
1278 /* The sizes here are scaled to support a 2x largest mode by 2x
1279 largest mode yielding a 4x largest mode result. This is what is
1280 needed by vpn. */
1282 unsigned HOST_HALF_WIDE_INT
1283 u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1284 unsigned HOST_HALF_WIDE_INT
1285 v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1286 /* The '2' in 'R' is because we are internally doing a full
1287 multiply. */
1288 unsigned HOST_HALF_WIDE_INT
1289 r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1290 HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1292 /* If the top level routine did not really pass in an overflow, then
1293 just make sure that we never attempt to set it. */
1294 bool needs_overflow = (overflow != 0);
1295 if (needs_overflow)
1296 *overflow = false;
1298 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1299 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1301 /* This is a surprisingly common case, so do it first. */
1302 if (op1 == 0 || op2 == 0)
1304 val[0] = 0;
1305 return 1;
1308 #ifdef umul_ppmm
1309 if (sgn == UNSIGNED)
1311 /* If the inputs are single HWIs and the output has room for at
1312 least two HWIs, we can use umul_ppmm directly. */
1313 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1314 && wi::fits_uhwi_p (op1)
1315 && wi::fits_uhwi_p (op2))
1317 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1318 return 1 + (val[1] != 0 || val[0] < 0);
1320 /* Likewise if the output is a full single HWI, except that the
1321 upper HWI of the result is only used for determining overflow.
1322 (We handle this case inline when overflow isn't needed.) */
1323 else if (prec == HOST_BITS_PER_WIDE_INT)
1325 unsigned HOST_WIDE_INT upper;
1326 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1327 if (needs_overflow)
1328 *overflow = (upper != 0);
1329 return 1;
1332 #endif
1334 /* Handle multiplications by 1. */
1335 if (op1 == 1)
1337 for (i = 0; i < op2len; i++)
1338 val[i] = op2val[i];
1339 return op2len;
1341 if (op2 == 1)
1343 for (i = 0; i < op1len; i++)
1344 val[i] = op1val[i];
1345 return op1len;
1348 /* If we need to check for overflow, we can only do half wide
1349 multiplies quickly because we need to look at the top bits to
1350 check for the overflow. */
1351 if ((high || needs_overflow)
1352 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1354 unsigned HOST_WIDE_INT r;
1356 if (sgn == SIGNED)
1358 o0 = op1.to_shwi ();
1359 o1 = op2.to_shwi ();
1361 else
1363 o0 = op1.to_uhwi ();
1364 o1 = op2.to_uhwi ();
1367 r = o0 * o1;
1368 if (needs_overflow)
1370 if (sgn == SIGNED)
1372 if ((HOST_WIDE_INT) (r) != sext_hwi (r, prec))
1373 *overflow = true;
1375 else
1377 if ((r >> prec) != 0)
1378 *overflow = true;
1381 val[0] = high ? r >> prec : r;
1382 return 1;
1385 /* We do unsigned mul and then correct it. */
1386 wi_unpack (u, (const unsigned HOST_WIDE_INT *) op1val, op1len,
1387 half_blocks_needed, prec, SIGNED);
1388 wi_unpack (v, (const unsigned HOST_WIDE_INT *) op2val, op2len,
1389 half_blocks_needed, prec, SIGNED);
1391 /* The 2 is for a full mult. */
1392 memset (r, 0, half_blocks_needed * 2
1393 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1395 for (j = 0; j < half_blocks_needed; j++)
1397 k = 0;
1398 for (i = 0; i < half_blocks_needed; i++)
1400 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1401 + r[i + j] + k);
1402 r[i + j] = t & HALF_INT_MASK;
1403 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1405 r[j + half_blocks_needed] = k;
1408 /* We did unsigned math above. For signed we must adjust the
1409 product (assuming we need to see that). */
1410 if (sgn == SIGNED && (high || needs_overflow))
1412 unsigned HOST_WIDE_INT b;
1413 if (wi::neg_p (op1))
1415 b = 0;
1416 for (i = 0; i < half_blocks_needed; i++)
1418 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1419 - (unsigned HOST_WIDE_INT)v[i] - b;
1420 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1421 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1424 if (wi::neg_p (op2))
1426 b = 0;
1427 for (i = 0; i < half_blocks_needed; i++)
1429 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1430 - (unsigned HOST_WIDE_INT)u[i] - b;
1431 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1432 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1437 if (needs_overflow)
1439 HOST_WIDE_INT top;
1441 /* For unsigned, overflow is true if any of the top bits are set.
1442 For signed, overflow is true if any of the top bits are not equal
1443 to the sign bit. */
1444 if (sgn == UNSIGNED)
1445 top = 0;
1446 else
1448 top = r[(half_blocks_needed) - 1];
1449 top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1450 top &= mask;
1453 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1454 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1455 *overflow = true;
1458 if (high)
1460 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1461 wi_pack ((unsigned HOST_WIDE_INT *) val,
1462 &r[half_blocks_needed], half_blocks_needed);
1463 return canonize (val, blocks_needed, prec);
1465 else
1467 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1468 wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed);
1469 return canonize (val, blocks_needed, prec);
1473 /* Compute the population count of X. */
1475 wi::popcount (const wide_int_ref &x)
1477 unsigned int i;
1478 int count;
1480 /* The high order block is special if it is the last block and the
1481 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1482 have to clear out any ones above the precision before doing
1483 popcount on this block. */
1484 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1485 unsigned int stop = x.len;
1486 if (count < 0)
1488 count = popcount_hwi (x.uhigh () << -count);
1489 stop -= 1;
1491 else
1493 if (x.sign_mask () >= 0)
1494 count = 0;
1497 for (i = 0; i < stop; ++i)
1498 count += popcount_hwi (x.val[i]);
1500 return count;
1503 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1504 whether the result overflows when OP0 and OP1 are treated as having
1505 signedness SGN. Return the number of blocks in VAL. */
1506 unsigned int
1507 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1508 unsigned int op0len, const HOST_WIDE_INT *op1,
1509 unsigned int op1len, unsigned int prec,
1510 signop sgn, bool *overflow)
1512 unsigned HOST_WIDE_INT o0 = 0;
1513 unsigned HOST_WIDE_INT o1 = 0;
1514 unsigned HOST_WIDE_INT x = 0;
1515 /* We implement subtraction as an in place negate and add. Negation
1516 is just inversion and add 1, so we can do the add of 1 by just
1517 starting the borrow in of the first element at 1. */
1518 unsigned HOST_WIDE_INT borrow = 0;
1519 unsigned HOST_WIDE_INT old_borrow = 0;
1521 unsigned HOST_WIDE_INT mask0, mask1;
1522 unsigned int i;
1524 unsigned int len = MAX (op0len, op1len);
1525 mask0 = -top_bit_of (op0, op0len, prec);
1526 mask1 = -top_bit_of (op1, op1len, prec);
1528 /* Subtract all of the explicitly defined elements. */
1529 for (i = 0; i < len; i++)
1531 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1532 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1533 x = o0 - o1 - borrow;
1534 val[i] = x;
1535 old_borrow = borrow;
1536 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1539 if (len * HOST_BITS_PER_WIDE_INT < prec)
1541 val[len] = mask0 - mask1 - borrow;
1542 len++;
1543 if (overflow)
1544 *overflow = false;
1546 else if (overflow)
1548 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1549 if (sgn == SIGNED)
1551 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1552 *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1554 else
1556 /* Put the MSB of X and O0 and in the top of the HWI. */
1557 x <<= shift;
1558 o0 <<= shift;
1559 if (old_borrow)
1560 *overflow = (x >= o0);
1561 else
1562 *overflow = (x > o0);
1566 return canonize (val, len, prec);
1571 * Division and Mod
1574 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1575 algorithm is a small modification of the algorithm in Hacker's
1576 Delight by Warren, which itself is a small modification of Knuth's
1577 algorithm. M is the number of significant elements of U however
1578 there needs to be at least one extra element of B_DIVIDEND
1579 allocated, N is the number of elements of B_DIVISOR. */
1580 static void
1581 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1582 unsigned HOST_HALF_WIDE_INT *b_remainder,
1583 unsigned HOST_HALF_WIDE_INT *b_dividend,
1584 unsigned HOST_HALF_WIDE_INT *b_divisor,
1585 unsigned int m, unsigned int n)
1587 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1588 HOST_WIDE_INT and stored in the lower bits of each word. This
1589 algorithm should work properly on both 32 and 64 bit
1590 machines. */
1591 unsigned HOST_WIDE_INT b
1592 = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1593 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1594 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1595 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1596 HOST_WIDE_INT s, i, j, t, k;
1598 /* Single digit divisor. */
1599 if (n == 1)
1601 k = 0;
1602 for (j = m - 1; j >= 0; j--)
1604 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1605 k = ((k * b + b_dividend[j])
1606 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1607 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1609 b_remainder[0] = k;
1610 return;
1613 s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1615 if (s)
1617 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1618 algorithm, we can overwrite b_dividend and b_divisor, so we do
1619 that. */
1620 for (i = n - 1; i > 0; i--)
1621 b_divisor[i] = (b_divisor[i] << s)
1622 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1623 b_divisor[0] = b_divisor[0] << s;
1625 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1626 for (i = m - 1; i > 0; i--)
1627 b_dividend[i] = (b_dividend[i] << s)
1628 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1629 b_dividend[0] = b_dividend[0] << s;
1632 /* Main loop. */
1633 for (j = m - n; j >= 0; j--)
1635 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1636 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1637 again:
1638 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1640 qhat -= 1;
1641 rhat += b_divisor[n-1];
1642 if (rhat < b)
1643 goto again;
1646 /* Multiply and subtract. */
1647 k = 0;
1648 for (i = 0; i < n; i++)
1650 p = qhat * b_divisor[i];
1651 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1652 b_dividend[i + j] = t;
1653 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1654 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1656 t = b_dividend[j+n] - k;
1657 b_dividend[j+n] = t;
1659 b_quotient[j] = qhat;
1660 if (t < 0)
1662 b_quotient[j] -= 1;
1663 k = 0;
1664 for (i = 0; i < n; i++)
1666 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1667 b_dividend[i+j] = t;
1668 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1670 b_dividend[j+n] += k;
1673 if (s)
1674 for (i = 0; i < n; i++)
1675 b_remainder[i] = (b_dividend[i] >> s)
1676 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1677 else
1678 for (i = 0; i < n; i++)
1679 b_remainder[i] = b_dividend[i];
1683 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1684 the result. If QUOTIENT is nonnull, store the value of the quotient
1685 there and return the number of blocks in it. The return value is
1686 not defined otherwise. If REMAINDER is nonnull, store the value
1687 of the remainder there and store the number of blocks in
1688 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1689 the division overflowed. */
1690 unsigned int
1691 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1692 HOST_WIDE_INT *remainder,
1693 const HOST_WIDE_INT *dividend_val,
1694 unsigned int dividend_len, unsigned int dividend_prec,
1695 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1696 unsigned int divisor_prec, signop sgn,
1697 bool *oflow)
1699 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1700 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1701 unsigned HOST_HALF_WIDE_INT
1702 b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1703 unsigned HOST_HALF_WIDE_INT
1704 b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1705 unsigned HOST_HALF_WIDE_INT
1706 b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1707 unsigned HOST_HALF_WIDE_INT
1708 b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1709 unsigned int m, n;
1710 bool dividend_neg = false;
1711 bool divisor_neg = false;
1712 bool overflow = false;
1713 wide_int neg_dividend, neg_divisor;
1715 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1716 dividend_prec);
1717 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1718 divisor_prec);
1719 if (divisor == 0)
1720 overflow = true;
1722 /* The smallest signed number / -1 causes overflow. The dividend_len
1723 check is for speed rather than correctness. */
1724 if (sgn == SIGNED
1725 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1726 && divisor == -1
1727 && wi::only_sign_bit_p (dividend))
1728 overflow = true;
1730 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1731 (signed min / -1) has the same representation as the orignal dividend.
1732 We have traditionally made division by zero act as division by one,
1733 so there too we use the original dividend. */
1734 if (overflow)
1736 if (remainder)
1738 *remainder_len = 1;
1739 remainder[0] = 0;
1741 if (oflow != 0)
1742 *oflow = true;
1743 if (quotient)
1744 for (unsigned int i = 0; i < dividend_len; ++i)
1745 quotient[i] = dividend_val[i];
1746 return dividend_len;
1749 if (oflow)
1750 *oflow = false;
1752 /* Do it on the host if you can. */
1753 if (sgn == SIGNED
1754 && wi::fits_shwi_p (dividend)
1755 && wi::fits_shwi_p (divisor))
1757 HOST_WIDE_INT o0 = dividend.to_shwi ();
1758 HOST_WIDE_INT o1 = divisor.to_shwi ();
1760 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1762 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1763 if (quotient)
1765 quotient[0] = HOST_WIDE_INT_MIN;
1766 quotient[1] = 0;
1768 if (remainder)
1770 remainder[0] = 0;
1771 *remainder_len = 1;
1773 return 2;
1775 else
1777 if (quotient)
1778 quotient[0] = o0 / o1;
1779 if (remainder)
1781 remainder[0] = o0 % o1;
1782 *remainder_len = 1;
1784 return 1;
1788 if (sgn == UNSIGNED
1789 && wi::fits_uhwi_p (dividend)
1790 && wi::fits_uhwi_p (divisor))
1792 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1793 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1795 if (quotient)
1796 quotient[0] = o0 / o1;
1797 if (remainder)
1799 remainder[0] = o0 % o1;
1800 *remainder_len = 1;
1802 return 1;
1805 /* Make the divisor and dividend positive and remember what we
1806 did. */
1807 if (sgn == SIGNED)
1809 if (wi::neg_p (dividend))
1811 neg_dividend = -dividend;
1812 dividend = neg_dividend;
1813 dividend_neg = true;
1815 if (wi::neg_p (divisor))
1817 neg_divisor = -divisor;
1818 divisor = neg_divisor;
1819 divisor_neg = true;
1823 wi_unpack (b_dividend, (const unsigned HOST_WIDE_INT *) dividend.get_val (),
1824 dividend.get_len (), dividend_blocks_needed, dividend_prec, sgn);
1825 wi_unpack (b_divisor, (const unsigned HOST_WIDE_INT *) divisor.get_val (),
1826 divisor.get_len (), divisor_blocks_needed, divisor_prec, sgn);
1828 if (wi::neg_p (dividend, sgn))
1829 m = dividend_blocks_needed;
1830 else
1831 m = 2 * dividend.get_len ();
1833 if (wi::neg_p (divisor, sgn))
1834 n = divisor_blocks_needed;
1835 else
1836 n = 2 * divisor.get_len ();
1838 /* We need to find the top non zero block of b_divisor. At most the
1839 top two blocks are zero. */
1840 if (b_divisor[n - 1] == 0)
1841 n--;
1842 if (b_divisor[n - 1] == 0)
1843 n--;
1845 memset (b_quotient, 0, sizeof (b_quotient));
1847 divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1849 unsigned int quotient_len = 0;
1850 if (quotient)
1852 wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m);
1853 quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec);
1854 /* The quotient is neg if exactly one of the divisor or dividend is
1855 neg. */
1856 if (dividend_neg != divisor_neg)
1857 quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1858 quotient_len, dividend_prec,
1859 UNSIGNED, 0);
1862 if (remainder)
1864 wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n);
1865 *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec);
1866 /* The remainder is always the same sign as the dividend. */
1867 if (dividend_neg)
1868 *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1869 *remainder_len, dividend_prec,
1870 UNSIGNED, 0);
1873 return quotient_len;
1877 * Shifting, rotating and extraction.
1880 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1881 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1882 unsigned int
1883 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1884 unsigned int xlen, unsigned int precision,
1885 unsigned int shift)
1887 /* Split the shift into a whole-block shift and a subblock shift. */
1888 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1889 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1891 /* The whole-block shift fills with zeros. */
1892 unsigned int len = BLOCKS_NEEDED (precision);
1893 for (unsigned int i = 0; i < skip; ++i)
1894 val[i] = 0;
1896 /* It's easier to handle the simple block case specially. */
1897 if (small_shift == 0)
1898 for (unsigned int i = skip; i < len; ++i)
1899 val[i] = safe_uhwi (xval, xlen, i - skip);
1900 else
1902 /* The first unfilled output block is a left shift of the first
1903 block in XVAL. The other output blocks contain bits from two
1904 consecutive input blocks. */
1905 unsigned HOST_WIDE_INT carry = 0;
1906 for (unsigned int i = skip; i < len; ++i)
1908 unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1909 val[i] = (x << small_shift) | carry;
1910 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1913 return canonize (val, len, precision);
1916 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1917 number of blocks in VAL. The input has XPRECISION bits and the
1918 output has XPRECISION - SHIFT bits. */
1919 static unsigned int
1920 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1921 unsigned int xlen, unsigned int xprecision,
1922 unsigned int shift)
1924 /* Split the shift into a whole-block shift and a subblock shift. */
1925 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1926 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1928 /* Work out how many blocks are needed to store the significant bits
1929 (excluding the upper zeros or signs). */
1930 unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1932 /* It's easier to handle the simple block case specially. */
1933 if (small_shift == 0)
1934 for (unsigned int i = 0; i < len; ++i)
1935 val[i] = safe_uhwi (xval, xlen, i + skip);
1936 else
1938 /* Each output block but the last is a combination of two input blocks.
1939 The last block is a right shift of the last block in XVAL. */
1940 unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1941 for (unsigned int i = 0; i < len; ++i)
1943 val[i] = curr >> small_shift;
1944 curr = safe_uhwi (xval, xlen, i + skip + 1);
1945 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1948 return len;
1951 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1952 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1953 VAL has PRECISION bits. */
1954 unsigned int
1955 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1956 unsigned int xlen, unsigned int xprecision,
1957 unsigned int precision, unsigned int shift)
1959 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1961 /* The value we just created has precision XPRECISION - SHIFT.
1962 Zero-extend it to wider precisions. */
1963 if (precision > xprecision - shift)
1965 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1966 if (small_prec)
1967 val[len - 1] = zext_hwi (val[len - 1], small_prec);
1968 else if (val[len - 1] < 0)
1970 /* Add a new block with a zero. */
1971 val[len++] = 0;
1972 return len;
1975 return canonize (val, len, precision);
1978 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1979 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1980 VAL has PRECISION bits. */
1981 unsigned int
1982 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1983 unsigned int xlen, unsigned int xprecision,
1984 unsigned int precision, unsigned int shift)
1986 unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1988 /* The value we just created has precision XPRECISION - SHIFT.
1989 Sign-extend it to wider types. */
1990 if (precision > xprecision - shift)
1992 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1993 if (small_prec)
1994 val[len - 1] = sext_hwi (val[len - 1], small_prec);
1996 return canonize (val, len, precision);
1999 /* Return the number of leading (upper) zeros in X. */
2001 wi::clz (const wide_int_ref &x)
2003 /* Calculate how many bits there above the highest represented block. */
2004 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2006 unsigned HOST_WIDE_INT high = x.uhigh ();
2007 if (count < 0)
2008 /* The upper -COUNT bits of HIGH are not part of the value.
2009 Clear them out. */
2010 high = (high << -count) >> -count;
2011 else if (x.sign_mask () < 0)
2012 /* The upper bit is set, so there are no leading zeros. */
2013 return 0;
2015 /* Check whether the value is zero. */
2016 if (high == 0 && x.len == 1)
2017 return clz_zero (x.precision);
2019 /* We don't need to look below HIGH. Either HIGH is nonzero,
2020 or the top bit of the block below is nonzero; clz_hwi is
2021 HOST_BITS_PER_WIDE_INT in the latter case. */
2022 return count + clz_hwi (high);
2025 /* Return the number of redundant sign bits in X. (That is, the number
2026 of bits immediately below the sign bit that have the same value as
2027 the sign bit.) */
2029 wi::clrsb (const wide_int_ref &x)
2031 /* Calculate how many bits there above the highest represented block. */
2032 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2034 unsigned HOST_WIDE_INT high = x.uhigh ();
2035 unsigned HOST_WIDE_INT mask = -1;
2036 if (count < 0)
2038 /* The upper -COUNT bits of HIGH are not part of the value.
2039 Clear them from both MASK and HIGH. */
2040 mask >>= -count;
2041 high &= mask;
2044 /* If the top bit is 1, count the number of leading 1s. If the top
2045 bit is zero, count the number of leading zeros. */
2046 if (high > mask / 2)
2047 high ^= mask;
2049 /* There are no sign bits below the top block, so we don't need to look
2050 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2051 HIGH is 0. */
2052 return count + clz_hwi (high) - 1;
2055 /* Return the number of trailing (lower) zeros in X. */
2057 wi::ctz (const wide_int_ref &x)
2059 if (x.len == 1 && x.ulow () == 0)
2060 return ctz_zero (x.precision);
2062 /* Having dealt with the zero case, there must be a block with a
2063 nonzero bit. We don't care about the bits above the first 1. */
2064 unsigned int i = 0;
2065 while (x.val[i] == 0)
2066 ++i;
2067 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2070 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2071 return -1. */
2073 wi::exact_log2 (const wide_int_ref &x)
2075 /* Reject cases where there are implicit -1 blocks above HIGH. */
2076 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2077 return -1;
2079 /* Set CRUX to the index of the entry that should be nonzero.
2080 If the top block is zero then the next lowest block (if any)
2081 must have the high bit set. */
2082 unsigned int crux = x.len - 1;
2083 if (crux > 0 && x.val[crux] == 0)
2084 crux -= 1;
2086 /* Check that all lower blocks are zero. */
2087 for (unsigned int i = 0; i < crux; ++i)
2088 if (x.val[i] != 0)
2089 return -1;
2091 /* Get a zero-extended form of block CRUX. */
2092 unsigned HOST_WIDE_INT hwi = x.val[crux];
2093 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2094 hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2096 /* Now it's down to whether HWI is a power of 2. */
2097 int res = ::exact_log2 (hwi);
2098 if (res >= 0)
2099 res += crux * HOST_BITS_PER_WIDE_INT;
2100 return res;
2103 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2105 wi::floor_log2 (const wide_int_ref &x)
2107 return x.precision - 1 - clz (x);
2110 /* Return the index of the first (lowest) set bit in X, counting from 1.
2111 Return 0 if X is 0. */
2113 wi::ffs (const wide_int_ref &x)
2115 return eq_p (x, 0) ? 0 : ctz (x) + 1;
2118 /* Return true if sign-extending X to have precision PRECISION would give
2119 the minimum signed value at that precision. */
2120 bool
2121 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2123 return ctz (x) + 1 == int (precision);
2126 /* Return true if X represents the minimum signed value. */
2127 bool
2128 wi::only_sign_bit_p (const wide_int_ref &x)
2130 return only_sign_bit_p (x, x.precision);
2134 * Private utilities.
2137 void gt_ggc_mx (widest_int *) { }
2138 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2139 void gt_pch_nx (widest_int *) { }
2142 * Private debug printing routines.
2144 #ifdef DEBUG_WIDE_INT
2145 /* The debugging routines print results of wide operations into the
2146 dump files of the respective passes in which they were called. */
2147 static char *
2148 dumpa (const HOST_WIDE_INT *val, unsigned int len, unsigned int prec, char *buf)
2150 int i;
2151 unsigned int l;
2152 const char * sep = "";
2154 l = sprintf (buf, "[%d (", prec);
2155 for (i = len - 1; i >= 0; i--)
2157 l += sprintf (&buf[l], "%s" HOST_WIDE_INT_PRINT_HEX, sep, val[i]);
2158 sep = " ";
2161 gcc_assert (len != 0);
2163 l += sprintf (&buf[l], ")]");
2165 gcc_assert (l < MAX_SIZE);
2166 return buf;
2170 #endif
2172 #if 0
2173 /* The debugging routines print results of wide operations into the
2174 dump files of the respective passes in which they were called. */
2175 char *
2176 wide_int_ro::dump (char* buf) const
2178 int i;
2179 unsigned int l;
2180 const char * sep = "";
2182 l = sprintf (buf, "[%d (", precision);
2183 for (i = len - 1; i >= 0; i--)
2185 l += sprintf (&buf[l], "%s" HOST_WIDE_INT_PRINT_HEX, sep, val[i]);
2186 sep = " ";
2189 gcc_assert (len != 0);
2191 l += sprintf (&buf[l], ")]");
2193 gcc_assert (l < MAX_SIZE);
2194 return buf;
2196 #endif