1 /* Operations with very long integers.
2 Copyright (C) 2012-2014 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
31 #define HOST_BITS_PER_HALF_WIDE_INT 32
32 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
33 # define HOST_HALF_WIDE_INT long
34 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
35 # define HOST_HALF_WIDE_INT int
37 #error Please add support for HOST_HALF_WIDE_INT
40 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
41 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
42 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
43 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
44 typedef unsigned HOST_WIDE_INT UWtype
;
45 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
46 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
47 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
49 typedef unsigned int UDWtype
__attribute__ ((mode (DI
)));
51 typedef unsigned int UDWtype
__attribute__ ((mode (TI
)));
56 static const HOST_WIDE_INT zeros
[WIDE_INT_MAX_ELTS
] = {};
62 /* Quantities to deal with values that hold half of a wide int. Used
63 in multiply and divide. */
64 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
66 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
67 #define BLOCKS_NEEDED(PREC) \
68 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
69 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
71 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
72 based on the top existing bit of VAL. */
74 static unsigned HOST_WIDE_INT
75 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
77 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? (HOST_WIDE_INT
) -1 : 0;
80 /* Convert the integer in VAL to canonical form, returning its new length.
81 LEN is the number of blocks currently in VAL and PRECISION is the number
82 of bits in the integer it represents.
84 This function only changes the representation, not the value. */
86 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
88 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
92 if (len
> blocks_needed
)
99 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
100 val
[len
- 1] = top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
101 if (top
!= 0 && top
!= (HOST_WIDE_INT
)-1)
104 /* At this point we know that the top is either 0 or -1. Find the
105 first block that is not a copy of this. */
106 for (i
= len
- 2; i
>= 0; i
--)
108 HOST_WIDE_INT x
= val
[i
];
111 if (SIGN_MASK (x
) == top
)
114 /* We need an extra block because the top bit block i does
115 not match the extension. */
120 /* The number is 0 or -1. */
125 * Conversion routines in and out of wide_int.
128 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
129 result for an integer with precision PRECISION. Return the length
130 of VAL (after any canonization. */
132 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
133 unsigned int xlen
, unsigned int precision
, bool need_canon
)
135 for (unsigned i
= 0; i
< xlen
; i
++)
137 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
140 /* Construct a wide int from a buffer of length LEN. BUFFER will be
141 read according to byte endianess and word endianess of the target.
142 Only the lower BUFFER_LEN bytes of the result are set; the remaining
143 high bytes are cleared. */
145 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
147 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
148 wide_int result
= wide_int::create (precision
);
149 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
151 /* We have to clear all the bits ourself, as we merely or in values
153 unsigned int len
= BLOCKS_NEEDED (precision
);
154 HOST_WIDE_INT
*val
= result
.write_val ();
155 for (unsigned int i
= 0; i
< len
; ++i
)
158 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
162 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
163 unsigned HOST_WIDE_INT value
;
165 if (buffer_len
> UNITS_PER_WORD
)
167 unsigned int word
= byte
/ UNITS_PER_WORD
;
169 if (WORDS_BIG_ENDIAN
)
170 word
= (words
- 1) - word
;
172 offset
= word
* UNITS_PER_WORD
;
174 if (BYTES_BIG_ENDIAN
)
175 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
177 offset
+= byte
% UNITS_PER_WORD
;
180 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
182 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
184 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
185 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
188 result
.set_len (canonize (val
, len
, precision
));
193 /* Sets RESULT from X, the sign is taken according to SGN. */
195 wi::to_mpz (const wide_int_ref
&x
, mpz_t result
, signop sgn
)
197 int len
= x
.get_len ();
198 const HOST_WIDE_INT
*v
= x
.get_val ();
199 int excess
= len
* HOST_BITS_PER_WIDE_INT
- x
.get_precision ();
201 if (wi::neg_p (x
, sgn
))
203 /* We use ones complement to avoid -x80..0 edge case that -
205 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
206 for (int i
= 0; i
< len
; i
++)
209 t
[len
- 1] = (unsigned HOST_WIDE_INT
) t
[len
- 1] << excess
>> excess
;
210 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
211 mpz_com (result
, result
);
215 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
216 for (int i
= 0; i
< len
- 1; i
++)
218 t
[len
- 1] = (unsigned HOST_WIDE_INT
) v
[len
- 1] << excess
>> excess
;
219 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
222 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
225 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
226 values of VAL will be wrapped; otherwise, they will be set to the
227 appropriate minimum or maximum TYPE bound. */
229 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
232 int prec
= TYPE_PRECISION (type
);
233 wide_int res
= wide_int::create (prec
);
241 get_type_static_bounds (type
, min
, max
);
243 if (mpz_cmp (x
, min
) < 0)
245 else if (mpz_cmp (x
, max
) > 0)
252 /* Determine the number of unsigned HOST_WIDE_INTs that are required
253 for representing the value. The code to calculate count is
254 extracted from the GMP manual, section "Integer Import and Export":
255 http://gmplib.org/manual/Integer-Import-and-Export.html */
256 numb
= 8 * sizeof(HOST_WIDE_INT
);
257 count
= (mpz_sizeinbase (x
, 2) + numb
- 1) / numb
;
258 HOST_WIDE_INT
*val
= res
.write_val ();
259 mpz_export (val
, &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
274 * Largest and smallest values in a mode.
277 /* Return the largest SGNed number that is representable in PRECISION bits.
279 TODO: There is still code from the double_int era that trys to
280 make up for the fact that double int's could not represent the
281 min and max values of all types. This code should be removed
282 because the min and max values can always be represented in
283 wide_ints and int-csts. */
285 wi::max_value (unsigned int precision
, signop sgn
)
287 gcc_checking_assert (precision
!= 0);
289 /* The unsigned max is just all ones. */
290 return shwi (-1, precision
);
292 /* The signed max is all ones except the top bit. This must be
293 explicitly represented. */
294 return mask (precision
- 1, false, precision
);
297 /* Return the largest SGNed number that is representable in PRECISION bits. */
299 wi::min_value (unsigned int precision
, signop sgn
)
301 gcc_checking_assert (precision
!= 0);
303 return uhwi (0, precision
);
305 /* The signed min is all zeros except the top bit. This must be
306 explicitly represented. */
307 return wi::set_bit_in_zero (precision
- 1, precision
);
314 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
315 signedness SGN, to an integer that has PRECISION bits. Store the blocks
316 in VAL and return the number of blocks used.
318 This function can handle both extension (PRECISION > XPRECISION)
319 and truncation (PRECISION < XPRECISION). */
321 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
322 unsigned int xlen
, unsigned int xprecision
,
323 unsigned int precision
, signop sgn
)
325 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
326 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
327 for (unsigned i
= 0; i
< len
; i
++)
330 if (precision
> xprecision
)
332 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
337 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
338 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
339 else if (val
[len
- 1] < 0)
341 while (len
< BLOCKS_NEEDED (xprecision
))
343 if (small_xprecision
)
344 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
351 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
352 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
355 len
= canonize (val
, len
, precision
);
360 /* This function hides the fact that we cannot rely on the bits beyond
361 the precision. This issue comes up in the relational comparisions
362 where we do allow comparisons of values of different precisions. */
363 static inline HOST_WIDE_INT
364 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
365 unsigned int blocks_needed
, unsigned int small_prec
,
366 unsigned int index
, signop sgn
)
371 else if (index
< blocks_needed
|| sgn
== SIGNED
)
372 /* Signed or within the precision. */
373 val
= SIGN_MASK (a
[len
- 1]);
375 /* Unsigned extension beyond the precision. */
378 if (small_prec
&& index
== blocks_needed
- 1)
379 return (sgn
== SIGNED
380 ? sext_hwi (val
, small_prec
)
381 : zext_hwi (val
, small_prec
));
386 /* Find the highest bit represented in a wide int. This will in
387 general have the same value as the sign bit. */
388 static inline HOST_WIDE_INT
389 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
391 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
392 unsigned HOST_WIDE_INT val
= a
[len
- 1];
395 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
399 * Comparisons, note that only equality is an operator. The other
400 * comparisons cannot be operators since they are inherently signed or
401 * unsigned and C++ has no such operators.
404 /* Return true if OP0 == OP1. */
406 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
407 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
411 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
413 if (op0len
!= op1len
)
416 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
418 /* It does not matter if we zext or sext here, we just have to
419 do both the same way. */
420 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
426 if (op0
[l0
] != op1
[l0
])
434 /* Return true if OP0 < OP1 using signed comparisons. */
436 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
437 unsigned int precision
,
438 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
440 HOST_WIDE_INT s0
, s1
;
441 unsigned HOST_WIDE_INT u0
, u1
;
442 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
443 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
444 int l
= MAX (op0len
- 1, op1len
- 1);
446 /* Only the top block is compared as signed. The rest are unsigned
448 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
449 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
458 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
459 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
471 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
474 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
475 unsigned int precision
,
476 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
478 HOST_WIDE_INT s0
, s1
;
479 unsigned HOST_WIDE_INT u0
, u1
;
480 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
481 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
482 int l
= MAX (op0len
- 1, op1len
- 1);
484 /* Only the top block is compared as signed. The rest are unsigned
486 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
487 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
496 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
497 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
509 /* Return true if OP0 < OP1 using unsigned comparisons. */
511 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
512 unsigned int precision
,
513 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
515 unsigned HOST_WIDE_INT x0
;
516 unsigned HOST_WIDE_INT x1
;
517 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
518 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
519 int l
= MAX (op0len
- 1, op1len
- 1);
523 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
524 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
535 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
536 unsigned compares. */
538 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
539 unsigned int precision
,
540 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
542 unsigned HOST_WIDE_INT x0
;
543 unsigned HOST_WIDE_INT x1
;
544 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
545 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
546 int l
= MAX (op0len
- 1, op1len
- 1);
550 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
551 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
566 /* Sign-extend the number represented by XVAL and XLEN into VAL,
567 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
568 and VAL have PRECISION bits. */
570 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
571 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
573 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
574 /* Extending beyond the precision is a no-op. If we have only stored
575 OFFSET bits or fewer, the rest are already signs. */
576 if (offset
>= precision
|| len
>= xlen
)
578 for (unsigned i
= 0; i
< xlen
; ++i
)
582 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
583 for (unsigned int i
= 0; i
< len
; i
++)
587 val
[len
] = sext_hwi (xval
[len
], suboffset
);
590 return canonize (val
, len
, precision
);
593 /* Zero-extend the number represented by XVAL and XLEN into VAL,
594 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
595 and VAL have PRECISION bits. */
597 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
598 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
600 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
601 /* Extending beyond the precision is a no-op. If we have only stored
602 OFFSET bits or fewer, and the upper stored bit is zero, then there
604 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
606 for (unsigned i
= 0; i
< xlen
; ++i
)
610 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
611 for (unsigned int i
= 0; i
< len
; i
++)
612 val
[i
] = i
< xlen
? xval
[i
] : -1;
614 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
617 return canonize (val
, len
+ 1, precision
);
621 * Masking, inserting, shifting, rotating.
624 /* Insert WIDTH bits from Y into X starting at START. */
626 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
633 unsigned int precision
= x
.get_precision ();
634 if (start
>= precision
)
637 gcc_checking_assert (precision
>= width
);
639 if (start
+ width
>= precision
)
640 width
= precision
- start
;
642 mask
= wi::shifted_mask (start
, width
, false, precision
);
643 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
646 tmp
= wi::bit_and_not (x
, mask
);
647 result
= result
| tmp
;
652 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
653 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
656 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
657 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
659 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
660 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
662 if (block
+ 1 >= xlen
)
664 /* The operation either affects the last current block or needs
666 unsigned int len
= block
+ 1;
667 for (unsigned int i
= 0; i
< len
; i
++)
668 val
[i
] = safe_uhwi (xval
, xlen
, i
);
669 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
671 /* If the bit we just set is at the msb of the block, make sure
672 that any higher bits are zeros. */
673 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
679 for (unsigned int i
= 0; i
< xlen
; i
++)
681 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
682 return canonize (val
, xlen
, precision
);
688 wide_int_storage::bswap () const
690 wide_int result
= wide_int::create (precision
);
692 unsigned int len
= BLOCKS_NEEDED (precision
);
693 unsigned int xlen
= get_len ();
694 const HOST_WIDE_INT
*xval
= get_val ();
695 HOST_WIDE_INT
*val
= result
.write_val ();
697 /* This is not a well defined operation if the precision is not a
699 gcc_assert ((precision
& 0x7) == 0);
701 for (i
= 0; i
< len
; i
++)
704 /* Only swap the bytes that are not the padding. */
705 for (s
= 0; s
< precision
; s
+= 8)
707 unsigned int d
= precision
- s
- 8;
708 unsigned HOST_WIDE_INT byte
;
710 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
711 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
713 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
715 block
= d
/ HOST_BITS_PER_WIDE_INT
;
716 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
718 val
[block
] |= byte
<< offset
;
721 result
.set_len (canonize (val
, len
, precision
));
725 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
726 above that up to PREC are zeros. The result is inverted if NEGATE
727 is true. Return the number of blocks in VAL. */
729 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
734 val
[0] = negate
? 0 : -1;
739 val
[0] = negate
? -1 : 0;
744 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
745 val
[i
++] = negate
? 0 : -1;
747 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
750 HOST_WIDE_INT last
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
751 val
[i
++] = negate
? ~last
: last
;
754 val
[i
++] = negate
? -1 : 0;
759 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
760 bits are ones, and the bits above that up to PREC are zeros. The result
761 is inverted if NEGATE is true. Return the number of blocks in VAL. */
763 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
764 bool negate
, unsigned int prec
)
766 if (start
>= prec
|| width
== 0)
768 val
[0] = negate
? -1 : 0;
772 if (width
> prec
- start
)
773 width
= prec
- start
;
774 unsigned int end
= start
+ width
;
777 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
778 val
[i
++] = negate
? -1 : 0;
780 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
783 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
785 if (shift
< HOST_BITS_PER_WIDE_INT
)
788 block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - block
- 1;
789 val
[i
++] = negate
? ~block
: block
;
794 val
[i
++] = negate
? block
: ~block
;
797 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
799 val
[i
++] = negate
? 0 : -1;
801 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
805 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
806 val
[i
++] = negate
? ~block
: block
;
809 val
[i
++] = negate
? -1 : 0;
815 * logical operations.
818 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
820 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
821 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
822 unsigned int op1len
, unsigned int prec
)
826 bool need_canon
= true;
828 unsigned int len
= MAX (op0len
, op1len
);
831 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
849 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
865 val
[l0
] = op0
[l0
] & op1
[l0
];
870 len
= canonize (val
, len
, prec
);
875 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
877 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
878 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
879 unsigned int op1len
, unsigned int prec
)
884 bool need_canon
= true;
886 unsigned int len
= MAX (op0len
, op1len
);
889 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
907 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
923 val
[l0
] = op0
[l0
] & ~op1
[l0
];
928 len
= canonize (val
, len
, prec
);
933 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
935 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
936 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
937 unsigned int op1len
, unsigned int prec
)
942 bool need_canon
= true;
944 unsigned int len
= MAX (op0len
, op1len
);
947 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
965 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
981 val
[l0
] = op0
[l0
] | op1
[l0
];
986 len
= canonize (val
, len
, prec
);
991 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
993 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
994 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
995 unsigned int op1len
, unsigned int prec
)
1000 bool need_canon
= true;
1002 unsigned int len
= MAX (op0len
, op1len
);
1005 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1023 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1039 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1044 len
= canonize (val
, len
, prec
);
1049 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1051 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1052 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1053 unsigned int op1len
, unsigned int prec
)
1056 int l0
= op0len
- 1;
1057 int l1
= op1len
- 1;
1059 unsigned int len
= MAX (op0len
, op1len
);
1062 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1065 val
[l0
] = op0
[l0
] ^ op1mask
;
1072 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1075 val
[l1
] = op0mask
^ op1
[l1
];
1082 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1086 return canonize (val
, len
, prec
);
1093 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1094 whether the result overflows when OP0 and OP1 are treated as having
1095 signedness SGN. Return the number of blocks in VAL. */
1097 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1098 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1099 unsigned int op1len
, unsigned int prec
,
1100 signop sgn
, bool *overflow
)
1102 unsigned HOST_WIDE_INT o0
= 0;
1103 unsigned HOST_WIDE_INT o1
= 0;
1104 unsigned HOST_WIDE_INT x
= 0;
1105 unsigned HOST_WIDE_INT carry
= 0;
1106 unsigned HOST_WIDE_INT old_carry
= 0;
1107 unsigned HOST_WIDE_INT mask0
, mask1
;
1110 unsigned int len
= MAX (op0len
, op1len
);
1111 mask0
= -top_bit_of (op0
, op0len
, prec
);
1112 mask1
= -top_bit_of (op1
, op1len
, prec
);
1113 /* Add all of the explicitly defined elements. */
1115 for (i
= 0; i
< len
; i
++)
1117 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1118 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1119 x
= o0
+ o1
+ carry
;
1122 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1125 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1127 val
[len
] = mask0
+ mask1
+ carry
;
1134 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1137 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1138 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1142 /* Put the MSB of X and O0 and in the top of the HWI. */
1146 *overflow
= (x
<= o0
);
1148 *overflow
= (x
< o0
);
1152 return canonize (val
, len
, prec
);
1155 /* Subroutines of the multiplication and division operations. Unpack
1156 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1157 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1158 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1160 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
, const HOST_WIDE_INT
*input
,
1161 unsigned int in_len
, unsigned int out_len
,
1162 unsigned int prec
, signop sgn
)
1166 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1167 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1172 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1173 mask
&= HALF_INT_MASK
;
1178 for (i
= 0; i
< blocks_needed
- 1; i
++)
1180 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1182 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1185 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1189 x
= sext_hwi (x
, small_prec
);
1191 x
= zext_hwi (x
, small_prec
);
1194 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1196 /* Smear the sign bit. */
1201 /* The inverse of wi_unpack. IN_LEN is the the number of input
1202 blocks. The number of output blocks will be half this amount. */
1204 wi_pack (unsigned HOST_WIDE_INT
*result
,
1205 const unsigned HOST_HALF_WIDE_INT
*input
,
1206 unsigned int in_len
)
1211 while (i
+ 2 < in_len
)
1213 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1214 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1]
1215 << HOST_BITS_PER_HALF_WIDE_INT
);
1219 /* Handle the case where in_len is odd. For this we zero extend. */
1221 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
];
1223 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1224 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1] << HOST_BITS_PER_HALF_WIDE_INT
);
1227 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1230 If HIGH is not set, throw away the upper half after the check is
1231 made to see if it overflows. Unfortunately there is no better way
1232 to check for overflow than to do this. If OVERFLOW is nonnull,
1233 record in *OVERFLOW whether the result overflowed. SGN controls
1234 the signedness and is used to check overflow or if HIGH is set. */
1236 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1237 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1238 unsigned int op2len
, unsigned int prec
, signop sgn
,
1239 bool *overflow
, bool high
)
1241 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1244 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1245 unsigned int half_blocks_needed
= blocks_needed
* 2;
1246 /* The sizes here are scaled to support a 2x largest mode by 2x
1247 largest mode yielding a 4x largest mode result. This is what is
1250 unsigned HOST_HALF_WIDE_INT
1251 u
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1252 unsigned HOST_HALF_WIDE_INT
1253 v
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1254 /* The '2' in 'R' is because we are internally doing a full
1256 unsigned HOST_HALF_WIDE_INT
1257 r
[2 * 4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1258 HOST_WIDE_INT mask
= ((HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1260 /* If the top level routine did not really pass in an overflow, then
1261 just make sure that we never attempt to set it. */
1262 bool needs_overflow
= (overflow
!= 0);
1266 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1267 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1269 /* This is a surprisingly common case, so do it first. */
1270 if (op1
== 0 || op2
== 0)
1277 if (sgn
== UNSIGNED
)
1279 /* If the inputs are single HWIs and the output has room for at
1280 least two HWIs, we can use umul_ppmm directly. */
1281 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1282 && wi::fits_uhwi_p (op1
)
1283 && wi::fits_uhwi_p (op2
))
1285 /* This case never overflows. */
1291 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1292 return 1 + (val
[1] != 0 || val
[0] < 0);
1294 /* Likewise if the output is a full single HWI, except that the
1295 upper HWI of the result is only used for determining overflow.
1296 (We handle this case inline when overflow isn't needed.) */
1297 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1299 unsigned HOST_WIDE_INT upper
;
1300 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1302 *overflow
= (upper
!= 0);
1310 /* Handle multiplications by 1. */
1315 val
[0] = wi::neg_p (op2
, sgn
) ? -1 : 0;
1318 for (i
= 0; i
< op2len
; i
++)
1326 val
[0] = wi::neg_p (op1
, sgn
) ? -1 : 0;
1329 for (i
= 0; i
< op1len
; i
++)
1334 /* If we need to check for overflow, we can only do half wide
1335 multiplies quickly because we need to look at the top bits to
1336 check for the overflow. */
1337 if ((high
|| needs_overflow
)
1338 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1340 unsigned HOST_WIDE_INT r
;
1344 o0
= op1
.to_shwi ();
1345 o1
= op2
.to_shwi ();
1349 o0
= op1
.to_uhwi ();
1350 o1
= op2
.to_uhwi ();
1358 if ((HOST_WIDE_INT
) r
!= sext_hwi (r
, prec
))
1363 if ((r
>> prec
) != 0)
1367 val
[0] = high
? r
>> prec
: r
;
1371 /* We do unsigned mul and then correct it. */
1372 wi_unpack (u
, op1val
, op1len
, half_blocks_needed
, prec
, SIGNED
);
1373 wi_unpack (v
, op2val
, op2len
, half_blocks_needed
, prec
, SIGNED
);
1375 /* The 2 is for a full mult. */
1376 memset (r
, 0, half_blocks_needed
* 2
1377 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1379 for (j
= 0; j
< half_blocks_needed
; j
++)
1382 for (i
= 0; i
< half_blocks_needed
; i
++)
1384 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1386 r
[i
+ j
] = t
& HALF_INT_MASK
;
1387 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1389 r
[j
+ half_blocks_needed
] = k
;
1392 /* We did unsigned math above. For signed we must adjust the
1393 product (assuming we need to see that). */
1394 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1396 unsigned HOST_WIDE_INT b
;
1397 if (wi::neg_p (op1
))
1400 for (i
= 0; i
< half_blocks_needed
; i
++)
1402 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1403 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1404 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1405 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1408 if (wi::neg_p (op2
))
1411 for (i
= 0; i
< half_blocks_needed
; i
++)
1413 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1414 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1415 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1416 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1425 /* For unsigned, overflow is true if any of the top bits are set.
1426 For signed, overflow is true if any of the top bits are not equal
1428 if (sgn
== UNSIGNED
)
1432 top
= r
[(half_blocks_needed
) - 1];
1433 top
= SIGN_MASK (top
<< (HOST_BITS_PER_WIDE_INT
/ 2));
1437 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1438 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1444 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1445 wi_pack ((unsigned HOST_WIDE_INT
*) val
,
1446 &r
[half_blocks_needed
], half_blocks_needed
);
1447 return canonize (val
, blocks_needed
, prec
);
1451 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1452 wi_pack ((unsigned HOST_WIDE_INT
*) val
, r
, half_blocks_needed
);
1453 return canonize (val
, blocks_needed
, prec
);
1457 /* Compute the population count of X. */
1459 wi::popcount (const wide_int_ref
&x
)
1464 /* The high order block is special if it is the last block and the
1465 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1466 have to clear out any ones above the precision before doing
1467 popcount on this block. */
1468 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1469 unsigned int stop
= x
.len
;
1472 count
= popcount_hwi (x
.uhigh () << -count
);
1477 if (x
.sign_mask () >= 0)
1481 for (i
= 0; i
< stop
; ++i
)
1482 count
+= popcount_hwi (x
.val
[i
]);
1487 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1488 whether the result overflows when OP0 and OP1 are treated as having
1489 signedness SGN. Return the number of blocks in VAL. */
1491 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1492 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1493 unsigned int op1len
, unsigned int prec
,
1494 signop sgn
, bool *overflow
)
1496 unsigned HOST_WIDE_INT o0
= 0;
1497 unsigned HOST_WIDE_INT o1
= 0;
1498 unsigned HOST_WIDE_INT x
= 0;
1499 /* We implement subtraction as an in place negate and add. Negation
1500 is just inversion and add 1, so we can do the add of 1 by just
1501 starting the borrow in of the first element at 1. */
1502 unsigned HOST_WIDE_INT borrow
= 0;
1503 unsigned HOST_WIDE_INT old_borrow
= 0;
1505 unsigned HOST_WIDE_INT mask0
, mask1
;
1508 unsigned int len
= MAX (op0len
, op1len
);
1509 mask0
= -top_bit_of (op0
, op0len
, prec
);
1510 mask1
= -top_bit_of (op1
, op1len
, prec
);
1512 /* Subtract all of the explicitly defined elements. */
1513 for (i
= 0; i
< len
; i
++)
1515 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1516 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1517 x
= o0
- o1
- borrow
;
1519 old_borrow
= borrow
;
1520 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1523 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1525 val
[len
] = mask0
- mask1
- borrow
;
1532 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1535 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1536 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1540 /* Put the MSB of X and O0 and in the top of the HWI. */
1544 *overflow
= (x
>= o0
);
1546 *overflow
= (x
> o0
);
1550 return canonize (val
, len
, prec
);
1558 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1559 algorithm is a small modification of the algorithm in Hacker's
1560 Delight by Warren, which itself is a small modification of Knuth's
1561 algorithm. M is the number of significant elements of U however
1562 there needs to be at least one extra element of B_DIVIDEND
1563 allocated, N is the number of elements of B_DIVISOR. */
1565 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1566 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1567 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1568 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1571 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1572 HOST_WIDE_INT and stored in the lower bits of each word. This
1573 algorithm should work properly on both 32 and 64 bit
1575 unsigned HOST_WIDE_INT b
1576 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1577 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1578 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1579 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1583 /* Single digit divisor. */
1587 for (j
= m
- 1; j
>= 0; j
--)
1589 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1590 k
= ((k
* b
+ b_dividend
[j
])
1591 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1592 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1598 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1602 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1603 algorithm, we can overwrite b_dividend and b_divisor, so we do
1605 for (i
= n
- 1; i
> 0; i
--)
1606 b_divisor
[i
] = (b_divisor
[i
] << s
)
1607 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1608 b_divisor
[0] = b_divisor
[0] << s
;
1610 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1611 for (i
= m
- 1; i
> 0; i
--)
1612 b_dividend
[i
] = (b_dividend
[i
] << s
)
1613 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1614 b_dividend
[0] = b_dividend
[0] << s
;
1618 for (j
= m
- n
; j
>= 0; j
--)
1620 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1621 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1623 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1626 rhat
+= b_divisor
[n
-1];
1631 /* Multiply and subtract. */
1633 for (i
= 0; i
< n
; i
++)
1635 p
= qhat
* b_divisor
[i
];
1636 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1637 b_dividend
[i
+ j
] = t
;
1638 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1639 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1641 t
= b_dividend
[j
+n
] - k
;
1642 b_dividend
[j
+n
] = t
;
1644 b_quotient
[j
] = qhat
;
1649 for (i
= 0; i
< n
; i
++)
1651 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1652 b_dividend
[i
+j
] = t
;
1653 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1655 b_dividend
[j
+n
] += k
;
1659 for (i
= 0; i
< n
; i
++)
1660 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1661 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1663 for (i
= 0; i
< n
; i
++)
1664 b_remainder
[i
] = b_dividend
[i
];
1668 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1669 the result. If QUOTIENT is nonnull, store the value of the quotient
1670 there and return the number of blocks in it. The return value is
1671 not defined otherwise. If REMAINDER is nonnull, store the value
1672 of the remainder there and store the number of blocks in
1673 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1674 the division overflowed. */
1676 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1677 HOST_WIDE_INT
*remainder
,
1678 const HOST_WIDE_INT
*dividend_val
,
1679 unsigned int dividend_len
, unsigned int dividend_prec
,
1680 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1681 unsigned int divisor_prec
, signop sgn
,
1684 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1685 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1686 unsigned HOST_HALF_WIDE_INT
1687 b_quotient
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1688 unsigned HOST_HALF_WIDE_INT
1689 b_remainder
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1690 unsigned HOST_HALF_WIDE_INT
1691 b_dividend
[(4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1692 unsigned HOST_HALF_WIDE_INT
1693 b_divisor
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1695 bool dividend_neg
= false;
1696 bool divisor_neg
= false;
1697 bool overflow
= false;
1698 wide_int neg_dividend
, neg_divisor
;
1700 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1702 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1707 /* The smallest signed number / -1 causes overflow. The dividend_len
1708 check is for speed rather than correctness. */
1710 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1712 && wi::only_sign_bit_p (dividend
))
1715 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1716 (signed min / -1) has the same representation as the orignal dividend.
1717 We have traditionally made division by zero act as division by one,
1718 so there too we use the original dividend. */
1729 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1730 quotient
[i
] = dividend_val
[i
];
1731 return dividend_len
;
1737 /* Do it on the host if you can. */
1739 && wi::fits_shwi_p (dividend
)
1740 && wi::fits_shwi_p (divisor
))
1742 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1743 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1745 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1747 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1750 quotient
[0] = HOST_WIDE_INT_MIN
;
1763 quotient
[0] = o0
/ o1
;
1766 remainder
[0] = o0
% o1
;
1774 && wi::fits_uhwi_p (dividend
)
1775 && wi::fits_uhwi_p (divisor
))
1777 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1778 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1781 quotient
[0] = o0
/ o1
;
1784 remainder
[0] = o0
% o1
;
1790 /* Make the divisor and dividend positive and remember what we
1794 if (wi::neg_p (dividend
))
1796 neg_dividend
= -dividend
;
1797 dividend
= neg_dividend
;
1798 dividend_neg
= true;
1800 if (wi::neg_p (divisor
))
1802 neg_divisor
= -divisor
;
1803 divisor
= neg_divisor
;
1808 wi_unpack (b_dividend
, dividend
.get_val (), dividend
.get_len (),
1809 dividend_blocks_needed
, dividend_prec
, sgn
);
1810 wi_unpack (b_divisor
, divisor
.get_val (), divisor
.get_len (),
1811 divisor_blocks_needed
, divisor_prec
, sgn
);
1813 m
= dividend_blocks_needed
;
1814 while (m
> 1 && b_dividend
[m
- 1] == 0)
1817 n
= divisor_blocks_needed
;
1818 while (n
> 1 && b_divisor
[n
- 1] == 0)
1821 memset (b_quotient
, 0, sizeof (b_quotient
));
1823 divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
1825 unsigned int quotient_len
= 0;
1828 wi_pack ((unsigned HOST_WIDE_INT
*) quotient
, b_quotient
, m
);
1829 quotient_len
= canonize (quotient
, (m
+ 1) / 2, dividend_prec
);
1830 /* The quotient is neg if exactly one of the divisor or dividend is
1832 if (dividend_neg
!= divisor_neg
)
1833 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
1834 quotient_len
, dividend_prec
,
1840 wi_pack ((unsigned HOST_WIDE_INT
*) remainder
, b_remainder
, n
);
1841 *remainder_len
= canonize (remainder
, (n
+ 1) / 2, dividend_prec
);
1842 /* The remainder is always the same sign as the dividend. */
1844 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
1845 *remainder_len
, dividend_prec
,
1849 return quotient_len
;
1853 * Shifting, rotating and extraction.
1856 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1857 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1859 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1860 unsigned int xlen
, unsigned int precision
,
1863 /* Split the shift into a whole-block shift and a subblock shift. */
1864 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1865 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1867 /* The whole-block shift fills with zeros. */
1868 unsigned int len
= BLOCKS_NEEDED (precision
);
1869 for (unsigned int i
= 0; i
< skip
; ++i
)
1872 /* It's easier to handle the simple block case specially. */
1873 if (small_shift
== 0)
1874 for (unsigned int i
= skip
; i
< len
; ++i
)
1875 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
1878 /* The first unfilled output block is a left shift of the first
1879 block in XVAL. The other output blocks contain bits from two
1880 consecutive input blocks. */
1881 unsigned HOST_WIDE_INT carry
= 0;
1882 for (unsigned int i
= skip
; i
< len
; ++i
)
1884 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
1885 val
[i
] = (x
<< small_shift
) | carry
;
1886 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1889 return canonize (val
, len
, precision
);
1892 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1893 number of blocks in VAL. The input has XPRECISION bits and the
1894 output has XPRECISION - SHIFT bits. */
1896 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1897 unsigned int xlen
, unsigned int xprecision
,
1900 /* Split the shift into a whole-block shift and a subblock shift. */
1901 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1902 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1904 /* Work out how many blocks are needed to store the significant bits
1905 (excluding the upper zeros or signs). */
1906 unsigned int len
= BLOCKS_NEEDED (xprecision
- shift
);
1908 /* It's easier to handle the simple block case specially. */
1909 if (small_shift
== 0)
1910 for (unsigned int i
= 0; i
< len
; ++i
)
1911 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
1914 /* Each output block but the last is a combination of two input blocks.
1915 The last block is a right shift of the last block in XVAL. */
1916 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
1917 for (unsigned int i
= 0; i
< len
; ++i
)
1919 val
[i
] = curr
>> small_shift
;
1920 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
1921 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1927 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1928 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1929 VAL has PRECISION bits. */
1931 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1932 unsigned int xlen
, unsigned int xprecision
,
1933 unsigned int precision
, unsigned int shift
)
1935 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1937 /* The value we just created has precision XPRECISION - SHIFT.
1938 Zero-extend it to wider precisions. */
1939 if (precision
> xprecision
- shift
)
1941 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1943 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
1944 else if (val
[len
- 1] < 0)
1946 /* Add a new block with a zero. */
1951 return canonize (val
, len
, precision
);
1954 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1955 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1956 VAL has PRECISION bits. */
1958 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1959 unsigned int xlen
, unsigned int xprecision
,
1960 unsigned int precision
, unsigned int shift
)
1962 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1964 /* The value we just created has precision XPRECISION - SHIFT.
1965 Sign-extend it to wider types. */
1966 if (precision
> xprecision
- shift
)
1968 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1970 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
1972 return canonize (val
, len
, precision
);
1975 /* Return the number of leading (upper) zeros in X. */
1977 wi::clz (const wide_int_ref
&x
)
1979 /* Calculate how many bits there above the highest represented block. */
1980 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1982 unsigned HOST_WIDE_INT high
= x
.uhigh ();
1984 /* The upper -COUNT bits of HIGH are not part of the value.
1986 high
= (high
<< -count
) >> -count
;
1987 else if (x
.sign_mask () < 0)
1988 /* The upper bit is set, so there are no leading zeros. */
1991 /* We don't need to look below HIGH. Either HIGH is nonzero,
1992 or the top bit of the block below is nonzero; clz_hwi is
1993 HOST_BITS_PER_WIDE_INT in the latter case. */
1994 return count
+ clz_hwi (high
);
1997 /* Return the number of redundant sign bits in X. (That is, the number
1998 of bits immediately below the sign bit that have the same value as
2001 wi::clrsb (const wide_int_ref
&x
)
2003 /* Calculate how many bits there above the highest represented block. */
2004 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2006 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2007 unsigned HOST_WIDE_INT mask
= -1;
2010 /* The upper -COUNT bits of HIGH are not part of the value.
2011 Clear them from both MASK and HIGH. */
2016 /* If the top bit is 1, count the number of leading 1s. If the top
2017 bit is zero, count the number of leading zeros. */
2018 if (high
> mask
/ 2)
2021 /* There are no sign bits below the top block, so we don't need to look
2022 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2024 return count
+ clz_hwi (high
) - 1;
2027 /* Return the number of trailing (lower) zeros in X. */
2029 wi::ctz (const wide_int_ref
&x
)
2031 if (x
.len
== 1 && x
.ulow () == 0)
2034 /* Having dealt with the zero case, there must be a block with a
2035 nonzero bit. We don't care about the bits above the first 1. */
2037 while (x
.val
[i
] == 0)
2039 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2042 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2045 wi::exact_log2 (const wide_int_ref
&x
)
2047 /* Reject cases where there are implicit -1 blocks above HIGH. */
2048 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2051 /* Set CRUX to the index of the entry that should be nonzero.
2052 If the top block is zero then the next lowest block (if any)
2053 must have the high bit set. */
2054 unsigned int crux
= x
.len
- 1;
2055 if (crux
> 0 && x
.val
[crux
] == 0)
2058 /* Check that all lower blocks are zero. */
2059 for (unsigned int i
= 0; i
< crux
; ++i
)
2063 /* Get a zero-extended form of block CRUX. */
2064 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2065 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2066 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2068 /* Now it's down to whether HWI is a power of 2. */
2069 int res
= ::exact_log2 (hwi
);
2071 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2075 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2077 wi::floor_log2 (const wide_int_ref
&x
)
2079 return x
.precision
- 1 - clz (x
);
2082 /* Return the index of the first (lowest) set bit in X, counting from 1.
2083 Return 0 if X is 0. */
2085 wi::ffs (const wide_int_ref
&x
)
2087 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2090 /* Return true if sign-extending X to have precision PRECISION would give
2091 the minimum signed value at that precision. */
2093 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2095 return ctz (x
) + 1 == int (precision
);
2098 /* Return true if X represents the minimum signed value. */
2100 wi::only_sign_bit_p (const wide_int_ref
&x
)
2102 return only_sign_bit_p (x
, x
.precision
);
2106 * Private utilities.
2109 void gt_ggc_mx (widest_int
*) { }
2110 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2111 void gt_pch_nx (widest_int
*) { }
2113 template void wide_int::dump () const;
2114 template void generic_wide_int
<wide_int_ref_storage
<false> >::dump () const;
2115 template void generic_wide_int
<wide_int_ref_storage
<true> >::dump () const;
2116 template void offset_int::dump () const;
2117 template void widest_int::dump () const;