1 /* Operations with very long integers.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #define HOST_BITS_PER_HALF_WIDE_INT 32
34 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
35 # define HOST_HALF_WIDE_INT long
36 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
37 # define HOST_HALF_WIDE_INT int
39 #error Please add support for HOST_HALF_WIDE_INT
42 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
43 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
44 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
45 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
46 typedef unsigned HOST_WIDE_INT UWtype
;
47 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
48 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
49 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
51 typedef unsigned int UDWtype
__attribute__ ((mode (DI
)));
53 typedef unsigned int UDWtype
__attribute__ ((mode (TI
)));
58 static const HOST_WIDE_INT zeros
[WIDE_INT_MAX_ELTS
] = {};
64 /* Quantities to deal with values that hold half of a wide int. Used
65 in multiply and divide. */
66 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
68 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
69 #define BLOCKS_NEEDED(PREC) \
70 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
71 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
73 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
74 based on the top existing bit of VAL. */
76 static unsigned HOST_WIDE_INT
77 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
79 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? (HOST_WIDE_INT
) -1 : 0;
82 /* Convert the integer in VAL to canonical form, returning its new length.
83 LEN is the number of blocks currently in VAL and PRECISION is the number
84 of bits in the integer it represents.
86 This function only changes the representation, not the value. */
88 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
90 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
94 if (len
> blocks_needed
)
101 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
102 val
[len
- 1] = top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
103 if (top
!= 0 && top
!= (HOST_WIDE_INT
)-1)
106 /* At this point we know that the top is either 0 or -1. Find the
107 first block that is not a copy of this. */
108 for (i
= len
- 2; i
>= 0; i
--)
110 HOST_WIDE_INT x
= val
[i
];
113 if (SIGN_MASK (x
) == top
)
116 /* We need an extra block because the top bit block i does
117 not match the extension. */
122 /* The number is 0 or -1. */
127 * Conversion routines in and out of wide_int.
130 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
131 result for an integer with precision PRECISION. Return the length
132 of VAL (after any canonization. */
134 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
135 unsigned int xlen
, unsigned int precision
, bool need_canon
)
137 for (unsigned i
= 0; i
< xlen
; i
++)
139 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
142 /* Construct a wide int from a buffer of length LEN. BUFFER will be
143 read according to byte endianess and word endianess of the target.
144 Only the lower BUFFER_LEN bytes of the result are set; the remaining
145 high bytes are cleared. */
147 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
149 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
150 wide_int result
= wide_int::create (precision
);
151 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
153 /* We have to clear all the bits ourself, as we merely or in values
155 unsigned int len
= BLOCKS_NEEDED (precision
);
156 HOST_WIDE_INT
*val
= result
.write_val ();
157 for (unsigned int i
= 0; i
< len
; ++i
)
160 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
164 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
165 unsigned HOST_WIDE_INT value
;
167 if (buffer_len
> UNITS_PER_WORD
)
169 unsigned int word
= byte
/ UNITS_PER_WORD
;
171 if (WORDS_BIG_ENDIAN
)
172 word
= (words
- 1) - word
;
174 offset
= word
* UNITS_PER_WORD
;
176 if (BYTES_BIG_ENDIAN
)
177 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
179 offset
+= byte
% UNITS_PER_WORD
;
182 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
184 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
186 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
187 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
190 result
.set_len (canonize (val
, len
, precision
));
195 /* Sets RESULT from X, the sign is taken according to SGN. */
197 wi::to_mpz (const wide_int_ref
&x
, mpz_t result
, signop sgn
)
199 int len
= x
.get_len ();
200 const HOST_WIDE_INT
*v
= x
.get_val ();
201 int excess
= len
* HOST_BITS_PER_WIDE_INT
- x
.get_precision ();
203 if (wi::neg_p (x
, sgn
))
205 /* We use ones complement to avoid -x80..0 edge case that -
207 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
208 for (int i
= 0; i
< len
; i
++)
211 t
[len
- 1] = (unsigned HOST_WIDE_INT
) t
[len
- 1] << excess
>> excess
;
212 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
213 mpz_com (result
, result
);
217 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
218 for (int i
= 0; i
< len
- 1; i
++)
220 t
[len
- 1] = (unsigned HOST_WIDE_INT
) v
[len
- 1] << excess
>> excess
;
221 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
224 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
227 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
228 values of VAL will be wrapped; otherwise, they will be set to the
229 appropriate minimum or maximum TYPE bound. */
231 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
234 unsigned int prec
= TYPE_PRECISION (type
);
235 wide_int res
= wide_int::create (prec
);
243 get_type_static_bounds (type
, min
, max
);
245 if (mpz_cmp (x
, min
) < 0)
247 else if (mpz_cmp (x
, max
) > 0)
254 /* Determine the number of unsigned HOST_WIDE_INTs that are required
255 for representing the value. The code to calculate count is
256 extracted from the GMP manual, section "Integer Import and Export":
257 http://gmplib.org/manual/Integer-Import-and-Export.html */
258 numb
= CHAR_BIT
* sizeof (HOST_WIDE_INT
);
259 count
= (mpz_sizeinbase (x
, 2) + numb
- 1) / numb
;
260 HOST_WIDE_INT
*val
= res
.write_val ();
261 /* Write directly to the wide_int storage if possible, otherwise leave
262 GMP to allocate the memory for us. It might be slightly more efficient
263 to use mpz_tdiv_r_2exp for the latter case, but the situation is
264 pathological and it seems safer to operate on the original mpz value
266 void *valres
= mpz_export (count
<= WIDE_INT_MAX_ELTS
? val
: 0,
267 &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
273 count
= MIN (count
, BLOCKS_NEEDED (prec
));
276 memcpy (val
, valres
, count
* sizeof (HOST_WIDE_INT
));
279 res
.set_len (canonize (val
, count
, prec
));
288 * Largest and smallest values in a mode.
291 /* Return the largest SGNed number that is representable in PRECISION bits.
293 TODO: There is still code from the double_int era that trys to
294 make up for the fact that double int's could not represent the
295 min and max values of all types. This code should be removed
296 because the min and max values can always be represented in
297 wide_ints and int-csts. */
299 wi::max_value (unsigned int precision
, signop sgn
)
301 gcc_checking_assert (precision
!= 0);
303 /* The unsigned max is just all ones. */
304 return shwi (-1, precision
);
306 /* The signed max is all ones except the top bit. This must be
307 explicitly represented. */
308 return mask (precision
- 1, false, precision
);
311 /* Return the largest SGNed number that is representable in PRECISION bits. */
313 wi::min_value (unsigned int precision
, signop sgn
)
315 gcc_checking_assert (precision
!= 0);
317 return uhwi (0, precision
);
319 /* The signed min is all zeros except the top bit. This must be
320 explicitly represented. */
321 return wi::set_bit_in_zero (precision
- 1, precision
);
328 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
329 signedness SGN, to an integer that has PRECISION bits. Store the blocks
330 in VAL and return the number of blocks used.
332 This function can handle both extension (PRECISION > XPRECISION)
333 and truncation (PRECISION < XPRECISION). */
335 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
336 unsigned int xlen
, unsigned int xprecision
,
337 unsigned int precision
, signop sgn
)
339 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
340 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
341 for (unsigned i
= 0; i
< len
; i
++)
344 if (precision
> xprecision
)
346 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
351 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
352 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
353 else if (val
[len
- 1] < 0)
355 while (len
< BLOCKS_NEEDED (xprecision
))
357 if (small_xprecision
)
358 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
365 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
366 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
369 len
= canonize (val
, len
, precision
);
374 /* This function hides the fact that we cannot rely on the bits beyond
375 the precision. This issue comes up in the relational comparisions
376 where we do allow comparisons of values of different precisions. */
377 static inline HOST_WIDE_INT
378 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
379 unsigned int blocks_needed
, unsigned int small_prec
,
380 unsigned int index
, signop sgn
)
385 else if (index
< blocks_needed
|| sgn
== SIGNED
)
386 /* Signed or within the precision. */
387 val
= SIGN_MASK (a
[len
- 1]);
389 /* Unsigned extension beyond the precision. */
392 if (small_prec
&& index
== blocks_needed
- 1)
393 return (sgn
== SIGNED
394 ? sext_hwi (val
, small_prec
)
395 : zext_hwi (val
, small_prec
));
400 /* Find the highest bit represented in a wide int. This will in
401 general have the same value as the sign bit. */
402 static inline HOST_WIDE_INT
403 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
405 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
406 unsigned HOST_WIDE_INT val
= a
[len
- 1];
409 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
413 * Comparisons, note that only equality is an operator. The other
414 * comparisons cannot be operators since they are inherently signed or
415 * unsigned and C++ has no such operators.
418 /* Return true if OP0 == OP1. */
420 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
421 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
425 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
427 if (op0len
!= op1len
)
430 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
432 /* It does not matter if we zext or sext here, we just have to
433 do both the same way. */
434 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
440 if (op0
[l0
] != op1
[l0
])
448 /* Return true if OP0 < OP1 using signed comparisons. */
450 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
451 unsigned int precision
,
452 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
454 HOST_WIDE_INT s0
, s1
;
455 unsigned HOST_WIDE_INT u0
, u1
;
456 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
457 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
458 int l
= MAX (op0len
- 1, op1len
- 1);
460 /* Only the top block is compared as signed. The rest are unsigned
462 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
463 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
472 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
473 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
485 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
488 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
489 unsigned int precision
,
490 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
492 HOST_WIDE_INT s0
, s1
;
493 unsigned HOST_WIDE_INT u0
, u1
;
494 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
495 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
496 int l
= MAX (op0len
- 1, op1len
- 1);
498 /* Only the top block is compared as signed. The rest are unsigned
500 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
501 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
510 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
511 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
523 /* Return true if OP0 < OP1 using unsigned comparisons. */
525 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
526 unsigned int precision
,
527 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
529 unsigned HOST_WIDE_INT x0
;
530 unsigned HOST_WIDE_INT x1
;
531 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
532 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
533 int l
= MAX (op0len
- 1, op1len
- 1);
537 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
538 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
549 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
550 unsigned compares. */
552 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
553 unsigned int precision
,
554 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
556 unsigned HOST_WIDE_INT x0
;
557 unsigned HOST_WIDE_INT x1
;
558 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
559 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
560 int l
= MAX (op0len
- 1, op1len
- 1);
564 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
565 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
580 /* Sign-extend the number represented by XVAL and XLEN into VAL,
581 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
582 and VAL have PRECISION bits. */
584 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
585 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
587 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
588 /* Extending beyond the precision is a no-op. If we have only stored
589 OFFSET bits or fewer, the rest are already signs. */
590 if (offset
>= precision
|| len
>= xlen
)
592 for (unsigned i
= 0; i
< xlen
; ++i
)
596 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
597 for (unsigned int i
= 0; i
< len
; i
++)
601 val
[len
] = sext_hwi (xval
[len
], suboffset
);
604 return canonize (val
, len
, precision
);
607 /* Zero-extend the number represented by XVAL and XLEN into VAL,
608 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
609 and VAL have PRECISION bits. */
611 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
612 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
614 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
615 /* Extending beyond the precision is a no-op. If we have only stored
616 OFFSET bits or fewer, and the upper stored bit is zero, then there
618 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
620 for (unsigned i
= 0; i
< xlen
; ++i
)
624 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
625 for (unsigned int i
= 0; i
< len
; i
++)
626 val
[i
] = i
< xlen
? xval
[i
] : -1;
628 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
631 return canonize (val
, len
+ 1, precision
);
635 * Masking, inserting, shifting, rotating.
638 /* Insert WIDTH bits from Y into X starting at START. */
640 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
647 unsigned int precision
= x
.get_precision ();
648 if (start
>= precision
)
651 gcc_checking_assert (precision
>= width
);
653 if (start
+ width
>= precision
)
654 width
= precision
- start
;
656 mask
= wi::shifted_mask (start
, width
, false, precision
);
657 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
660 tmp
= wi::bit_and_not (x
, mask
);
661 result
= result
| tmp
;
666 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
667 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
670 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
671 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
673 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
674 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
676 if (block
+ 1 >= xlen
)
678 /* The operation either affects the last current block or needs
680 unsigned int len
= block
+ 1;
681 for (unsigned int i
= 0; i
< len
; i
++)
682 val
[i
] = safe_uhwi (xval
, xlen
, i
);
683 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
685 /* If the bit we just set is at the msb of the block, make sure
686 that any higher bits are zeros. */
687 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
693 for (unsigned int i
= 0; i
< xlen
; i
++)
695 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
696 return canonize (val
, xlen
, precision
);
702 wide_int_storage::bswap () const
704 wide_int result
= wide_int::create (precision
);
706 unsigned int len
= BLOCKS_NEEDED (precision
);
707 unsigned int xlen
= get_len ();
708 const HOST_WIDE_INT
*xval
= get_val ();
709 HOST_WIDE_INT
*val
= result
.write_val ();
711 /* This is not a well defined operation if the precision is not a
713 gcc_assert ((precision
& 0x7) == 0);
715 for (i
= 0; i
< len
; i
++)
718 /* Only swap the bytes that are not the padding. */
719 for (s
= 0; s
< precision
; s
+= 8)
721 unsigned int d
= precision
- s
- 8;
722 unsigned HOST_WIDE_INT byte
;
724 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
725 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
727 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
729 block
= d
/ HOST_BITS_PER_WIDE_INT
;
730 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
732 val
[block
] |= byte
<< offset
;
735 result
.set_len (canonize (val
, len
, precision
));
739 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
740 above that up to PREC are zeros. The result is inverted if NEGATE
741 is true. Return the number of blocks in VAL. */
743 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
748 val
[0] = negate
? 0 : -1;
753 val
[0] = negate
? -1 : 0;
758 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
759 val
[i
++] = negate
? 0 : -1;
761 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
764 HOST_WIDE_INT last
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
765 val
[i
++] = negate
? ~last
: last
;
768 val
[i
++] = negate
? -1 : 0;
773 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
774 bits are ones, and the bits above that up to PREC are zeros. The result
775 is inverted if NEGATE is true. Return the number of blocks in VAL. */
777 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
778 bool negate
, unsigned int prec
)
780 if (start
>= prec
|| width
== 0)
782 val
[0] = negate
? -1 : 0;
786 if (width
> prec
- start
)
787 width
= prec
- start
;
788 unsigned int end
= start
+ width
;
791 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
792 val
[i
++] = negate
? -1 : 0;
794 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
797 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
799 if (shift
< HOST_BITS_PER_WIDE_INT
)
802 block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - block
- 1;
803 val
[i
++] = negate
? ~block
: block
;
808 val
[i
++] = negate
? block
: ~block
;
811 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
813 val
[i
++] = negate
? 0 : -1;
815 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
819 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
820 val
[i
++] = negate
? ~block
: block
;
823 val
[i
++] = negate
? -1 : 0;
829 * logical operations.
832 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
834 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
835 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
836 unsigned int op1len
, unsigned int prec
)
840 bool need_canon
= true;
842 unsigned int len
= MAX (op0len
, op1len
);
845 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
863 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
879 val
[l0
] = op0
[l0
] & op1
[l0
];
884 len
= canonize (val
, len
, prec
);
889 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
891 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
892 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
893 unsigned int op1len
, unsigned int prec
)
898 bool need_canon
= true;
900 unsigned int len
= MAX (op0len
, op1len
);
903 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
921 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
937 val
[l0
] = op0
[l0
] & ~op1
[l0
];
942 len
= canonize (val
, len
, prec
);
947 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
949 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
950 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
951 unsigned int op1len
, unsigned int prec
)
956 bool need_canon
= true;
958 unsigned int len
= MAX (op0len
, op1len
);
961 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
979 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
995 val
[l0
] = op0
[l0
] | op1
[l0
];
1000 len
= canonize (val
, len
, prec
);
1005 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1007 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1008 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1009 unsigned int op1len
, unsigned int prec
)
1012 int l0
= op0len
- 1;
1013 int l1
= op1len
- 1;
1014 bool need_canon
= true;
1016 unsigned int len
= MAX (op0len
, op1len
);
1019 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1037 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1053 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1058 len
= canonize (val
, len
, prec
);
1063 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1065 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1066 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1067 unsigned int op1len
, unsigned int prec
)
1070 int l0
= op0len
- 1;
1071 int l1
= op1len
- 1;
1073 unsigned int len
= MAX (op0len
, op1len
);
1076 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1079 val
[l0
] = op0
[l0
] ^ op1mask
;
1086 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1089 val
[l1
] = op0mask
^ op1
[l1
];
1096 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1100 return canonize (val
, len
, prec
);
1107 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1108 whether the result overflows when OP0 and OP1 are treated as having
1109 signedness SGN. Return the number of blocks in VAL. */
1111 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1112 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1113 unsigned int op1len
, unsigned int prec
,
1114 signop sgn
, bool *overflow
)
1116 unsigned HOST_WIDE_INT o0
= 0;
1117 unsigned HOST_WIDE_INT o1
= 0;
1118 unsigned HOST_WIDE_INT x
= 0;
1119 unsigned HOST_WIDE_INT carry
= 0;
1120 unsigned HOST_WIDE_INT old_carry
= 0;
1121 unsigned HOST_WIDE_INT mask0
, mask1
;
1124 unsigned int len
= MAX (op0len
, op1len
);
1125 mask0
= -top_bit_of (op0
, op0len
, prec
);
1126 mask1
= -top_bit_of (op1
, op1len
, prec
);
1127 /* Add all of the explicitly defined elements. */
1129 for (i
= 0; i
< len
; i
++)
1131 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1132 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1133 x
= o0
+ o1
+ carry
;
1136 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1139 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1141 val
[len
] = mask0
+ mask1
+ carry
;
1148 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1151 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1152 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1156 /* Put the MSB of X and O0 and in the top of the HWI. */
1160 *overflow
= (x
<= o0
);
1162 *overflow
= (x
< o0
);
1166 return canonize (val
, len
, prec
);
1169 /* Subroutines of the multiplication and division operations. Unpack
1170 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1171 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1172 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1174 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
, const HOST_WIDE_INT
*input
,
1175 unsigned int in_len
, unsigned int out_len
,
1176 unsigned int prec
, signop sgn
)
1180 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1181 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1186 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1187 mask
&= HALF_INT_MASK
;
1192 for (i
= 0; i
< blocks_needed
- 1; i
++)
1194 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1196 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1199 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1203 x
= sext_hwi (x
, small_prec
);
1205 x
= zext_hwi (x
, small_prec
);
1208 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1210 /* Smear the sign bit. */
1215 /* The inverse of wi_unpack. IN_LEN is the the number of input
1216 blocks. The number of output blocks will be half this amount. */
1218 wi_pack (unsigned HOST_WIDE_INT
*result
,
1219 const unsigned HOST_HALF_WIDE_INT
*input
,
1220 unsigned int in_len
)
1225 while (i
+ 2 < in_len
)
1227 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1228 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1]
1229 << HOST_BITS_PER_HALF_WIDE_INT
);
1233 /* Handle the case where in_len is odd. For this we zero extend. */
1235 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
];
1237 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1238 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1] << HOST_BITS_PER_HALF_WIDE_INT
);
1241 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1244 If HIGH is not set, throw away the upper half after the check is
1245 made to see if it overflows. Unfortunately there is no better way
1246 to check for overflow than to do this. If OVERFLOW is nonnull,
1247 record in *OVERFLOW whether the result overflowed. SGN controls
1248 the signedness and is used to check overflow or if HIGH is set. */
1250 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1251 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1252 unsigned int op2len
, unsigned int prec
, signop sgn
,
1253 bool *overflow
, bool high
)
1255 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1258 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1259 unsigned int half_blocks_needed
= blocks_needed
* 2;
1260 /* The sizes here are scaled to support a 2x largest mode by 2x
1261 largest mode yielding a 4x largest mode result. This is what is
1264 unsigned HOST_HALF_WIDE_INT
1265 u
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1266 unsigned HOST_HALF_WIDE_INT
1267 v
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1268 /* The '2' in 'R' is because we are internally doing a full
1270 unsigned HOST_HALF_WIDE_INT
1271 r
[2 * 4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1272 HOST_WIDE_INT mask
= ((HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1274 /* If the top level routine did not really pass in an overflow, then
1275 just make sure that we never attempt to set it. */
1276 bool needs_overflow
= (overflow
!= 0);
1280 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1281 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1283 /* This is a surprisingly common case, so do it first. */
1284 if (op1
== 0 || op2
== 0)
1291 if (sgn
== UNSIGNED
)
1293 /* If the inputs are single HWIs and the output has room for at
1294 least two HWIs, we can use umul_ppmm directly. */
1295 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1296 && wi::fits_uhwi_p (op1
)
1297 && wi::fits_uhwi_p (op2
))
1299 /* This case never overflows. */
1305 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1306 if (val
[1] < 0 && prec
> HOST_BITS_PER_WIDE_INT
* 2)
1311 return 1 + (val
[1] != 0 || val
[0] < 0);
1313 /* Likewise if the output is a full single HWI, except that the
1314 upper HWI of the result is only used for determining overflow.
1315 (We handle this case inline when overflow isn't needed.) */
1316 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1318 unsigned HOST_WIDE_INT upper
;
1319 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1321 *overflow
= (upper
!= 0);
1329 /* Handle multiplications by 1. */
1334 val
[0] = wi::neg_p (op2
, sgn
) ? -1 : 0;
1337 for (i
= 0; i
< op2len
; i
++)
1345 val
[0] = wi::neg_p (op1
, sgn
) ? -1 : 0;
1348 for (i
= 0; i
< op1len
; i
++)
1353 /* If we need to check for overflow, we can only do half wide
1354 multiplies quickly because we need to look at the top bits to
1355 check for the overflow. */
1356 if ((high
|| needs_overflow
)
1357 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1359 unsigned HOST_WIDE_INT r
;
1363 o0
= op1
.to_shwi ();
1364 o1
= op2
.to_shwi ();
1368 o0
= op1
.to_uhwi ();
1369 o1
= op2
.to_uhwi ();
1377 if ((HOST_WIDE_INT
) r
!= sext_hwi (r
, prec
))
1382 if ((r
>> prec
) != 0)
1386 val
[0] = high
? r
>> prec
: r
;
1390 /* We do unsigned mul and then correct it. */
1391 wi_unpack (u
, op1val
, op1len
, half_blocks_needed
, prec
, SIGNED
);
1392 wi_unpack (v
, op2val
, op2len
, half_blocks_needed
, prec
, SIGNED
);
1394 /* The 2 is for a full mult. */
1395 memset (r
, 0, half_blocks_needed
* 2
1396 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1398 for (j
= 0; j
< half_blocks_needed
; j
++)
1401 for (i
= 0; i
< half_blocks_needed
; i
++)
1403 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1405 r
[i
+ j
] = t
& HALF_INT_MASK
;
1406 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1408 r
[j
+ half_blocks_needed
] = k
;
1411 /* We did unsigned math above. For signed we must adjust the
1412 product (assuming we need to see that). */
1413 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1415 unsigned HOST_WIDE_INT b
;
1416 if (wi::neg_p (op1
))
1419 for (i
= 0; i
< half_blocks_needed
; i
++)
1421 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1422 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1423 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1424 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1427 if (wi::neg_p (op2
))
1430 for (i
= 0; i
< half_blocks_needed
; i
++)
1432 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1433 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1434 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1435 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1444 /* For unsigned, overflow is true if any of the top bits are set.
1445 For signed, overflow is true if any of the top bits are not equal
1447 if (sgn
== UNSIGNED
)
1451 top
= r
[(half_blocks_needed
) - 1];
1452 top
= SIGN_MASK (top
<< (HOST_BITS_PER_WIDE_INT
/ 2));
1456 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1457 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1463 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1464 wi_pack ((unsigned HOST_WIDE_INT
*) val
,
1465 &r
[half_blocks_needed
], half_blocks_needed
);
1466 return canonize (val
, blocks_needed
, prec
);
1470 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1471 wi_pack ((unsigned HOST_WIDE_INT
*) val
, r
, half_blocks_needed
);
1472 return canonize (val
, blocks_needed
, prec
);
1476 /* Compute the population count of X. */
1478 wi::popcount (const wide_int_ref
&x
)
1483 /* The high order block is special if it is the last block and the
1484 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1485 have to clear out any ones above the precision before doing
1486 popcount on this block. */
1487 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1488 unsigned int stop
= x
.len
;
1491 count
= popcount_hwi (x
.uhigh () << -count
);
1496 if (x
.sign_mask () >= 0)
1500 for (i
= 0; i
< stop
; ++i
)
1501 count
+= popcount_hwi (x
.val
[i
]);
1506 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1507 whether the result overflows when OP0 and OP1 are treated as having
1508 signedness SGN. Return the number of blocks in VAL. */
1510 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1511 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1512 unsigned int op1len
, unsigned int prec
,
1513 signop sgn
, bool *overflow
)
1515 unsigned HOST_WIDE_INT o0
= 0;
1516 unsigned HOST_WIDE_INT o1
= 0;
1517 unsigned HOST_WIDE_INT x
= 0;
1518 /* We implement subtraction as an in place negate and add. Negation
1519 is just inversion and add 1, so we can do the add of 1 by just
1520 starting the borrow in of the first element at 1. */
1521 unsigned HOST_WIDE_INT borrow
= 0;
1522 unsigned HOST_WIDE_INT old_borrow
= 0;
1524 unsigned HOST_WIDE_INT mask0
, mask1
;
1527 unsigned int len
= MAX (op0len
, op1len
);
1528 mask0
= -top_bit_of (op0
, op0len
, prec
);
1529 mask1
= -top_bit_of (op1
, op1len
, prec
);
1531 /* Subtract all of the explicitly defined elements. */
1532 for (i
= 0; i
< len
; i
++)
1534 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1535 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1536 x
= o0
- o1
- borrow
;
1538 old_borrow
= borrow
;
1539 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1542 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1544 val
[len
] = mask0
- mask1
- borrow
;
1551 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1554 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1555 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1559 /* Put the MSB of X and O0 and in the top of the HWI. */
1563 *overflow
= (x
>= o0
);
1565 *overflow
= (x
> o0
);
1569 return canonize (val
, len
, prec
);
1577 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1578 algorithm is a small modification of the algorithm in Hacker's
1579 Delight by Warren, which itself is a small modification of Knuth's
1580 algorithm. M is the number of significant elements of U however
1581 there needs to be at least one extra element of B_DIVIDEND
1582 allocated, N is the number of elements of B_DIVISOR. */
1584 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1585 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1586 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1587 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1590 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1591 HOST_WIDE_INT and stored in the lower bits of each word. This
1592 algorithm should work properly on both 32 and 64 bit
1594 unsigned HOST_WIDE_INT b
1595 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1596 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1597 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1598 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1602 /* Single digit divisor. */
1606 for (j
= m
- 1; j
>= 0; j
--)
1608 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1609 k
= ((k
* b
+ b_dividend
[j
])
1610 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1611 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1617 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1621 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1622 algorithm, we can overwrite b_dividend and b_divisor, so we do
1624 for (i
= n
- 1; i
> 0; i
--)
1625 b_divisor
[i
] = (b_divisor
[i
] << s
)
1626 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1627 b_divisor
[0] = b_divisor
[0] << s
;
1629 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1630 for (i
= m
- 1; i
> 0; i
--)
1631 b_dividend
[i
] = (b_dividend
[i
] << s
)
1632 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1633 b_dividend
[0] = b_dividend
[0] << s
;
1637 for (j
= m
- n
; j
>= 0; j
--)
1639 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1640 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1642 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1645 rhat
+= b_divisor
[n
-1];
1650 /* Multiply and subtract. */
1652 for (i
= 0; i
< n
; i
++)
1654 p
= qhat
* b_divisor
[i
];
1655 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1656 b_dividend
[i
+ j
] = t
;
1657 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1658 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1660 t
= b_dividend
[j
+n
] - k
;
1661 b_dividend
[j
+n
] = t
;
1663 b_quotient
[j
] = qhat
;
1668 for (i
= 0; i
< n
; i
++)
1670 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1671 b_dividend
[i
+j
] = t
;
1672 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1674 b_dividend
[j
+n
] += k
;
1678 for (i
= 0; i
< n
; i
++)
1679 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1680 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1682 for (i
= 0; i
< n
; i
++)
1683 b_remainder
[i
] = b_dividend
[i
];
1687 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1688 the result. If QUOTIENT is nonnull, store the value of the quotient
1689 there and return the number of blocks in it. The return value is
1690 not defined otherwise. If REMAINDER is nonnull, store the value
1691 of the remainder there and store the number of blocks in
1692 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1693 the division overflowed. */
1695 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1696 HOST_WIDE_INT
*remainder
,
1697 const HOST_WIDE_INT
*dividend_val
,
1698 unsigned int dividend_len
, unsigned int dividend_prec
,
1699 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1700 unsigned int divisor_prec
, signop sgn
,
1703 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1704 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1705 unsigned HOST_HALF_WIDE_INT
1706 b_quotient
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1707 unsigned HOST_HALF_WIDE_INT
1708 b_remainder
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1709 unsigned HOST_HALF_WIDE_INT
1710 b_dividend
[(4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1711 unsigned HOST_HALF_WIDE_INT
1712 b_divisor
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1714 bool dividend_neg
= false;
1715 bool divisor_neg
= false;
1716 bool overflow
= false;
1717 wide_int neg_dividend
, neg_divisor
;
1719 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1721 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1726 /* The smallest signed number / -1 causes overflow. The dividend_len
1727 check is for speed rather than correctness. */
1729 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1731 && wi::only_sign_bit_p (dividend
))
1734 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1735 (signed min / -1) has the same representation as the orignal dividend.
1736 We have traditionally made division by zero act as division by one,
1737 so there too we use the original dividend. */
1748 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1749 quotient
[i
] = dividend_val
[i
];
1750 return dividend_len
;
1756 /* Do it on the host if you can. */
1758 && wi::fits_shwi_p (dividend
)
1759 && wi::fits_shwi_p (divisor
))
1761 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1762 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1764 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1766 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1769 quotient
[0] = HOST_WIDE_INT_MIN
;
1782 quotient
[0] = o0
/ o1
;
1785 remainder
[0] = o0
% o1
;
1793 && wi::fits_uhwi_p (dividend
)
1794 && wi::fits_uhwi_p (divisor
))
1796 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1797 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1800 quotient
[0] = o0
/ o1
;
1803 remainder
[0] = o0
% o1
;
1809 /* Make the divisor and dividend positive and remember what we
1813 if (wi::neg_p (dividend
))
1815 neg_dividend
= -dividend
;
1816 dividend
= neg_dividend
;
1817 dividend_neg
= true;
1819 if (wi::neg_p (divisor
))
1821 neg_divisor
= -divisor
;
1822 divisor
= neg_divisor
;
1827 wi_unpack (b_dividend
, dividend
.get_val (), dividend
.get_len (),
1828 dividend_blocks_needed
, dividend_prec
, sgn
);
1829 wi_unpack (b_divisor
, divisor
.get_val (), divisor
.get_len (),
1830 divisor_blocks_needed
, divisor_prec
, sgn
);
1832 m
= dividend_blocks_needed
;
1834 while (m
> 1 && b_dividend
[m
- 1] == 0)
1837 n
= divisor_blocks_needed
;
1838 while (n
> 1 && b_divisor
[n
- 1] == 0)
1841 memset (b_quotient
, 0, sizeof (b_quotient
));
1843 divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
1845 unsigned int quotient_len
= 0;
1848 wi_pack ((unsigned HOST_WIDE_INT
*) quotient
, b_quotient
, m
);
1849 quotient_len
= canonize (quotient
, (m
+ 1) / 2, dividend_prec
);
1850 /* The quotient is neg if exactly one of the divisor or dividend is
1852 if (dividend_neg
!= divisor_neg
)
1853 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
1854 quotient_len
, dividend_prec
,
1860 wi_pack ((unsigned HOST_WIDE_INT
*) remainder
, b_remainder
, n
);
1861 *remainder_len
= canonize (remainder
, (n
+ 1) / 2, dividend_prec
);
1862 /* The remainder is always the same sign as the dividend. */
1864 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
1865 *remainder_len
, dividend_prec
,
1869 return quotient_len
;
1873 * Shifting, rotating and extraction.
1876 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1877 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1879 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1880 unsigned int xlen
, unsigned int precision
,
1883 /* Split the shift into a whole-block shift and a subblock shift. */
1884 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1885 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1887 /* The whole-block shift fills with zeros. */
1888 unsigned int len
= BLOCKS_NEEDED (precision
);
1889 for (unsigned int i
= 0; i
< skip
; ++i
)
1892 /* It's easier to handle the simple block case specially. */
1893 if (small_shift
== 0)
1894 for (unsigned int i
= skip
; i
< len
; ++i
)
1895 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
1898 /* The first unfilled output block is a left shift of the first
1899 block in XVAL. The other output blocks contain bits from two
1900 consecutive input blocks. */
1901 unsigned HOST_WIDE_INT carry
= 0;
1902 for (unsigned int i
= skip
; i
< len
; ++i
)
1904 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
1905 val
[i
] = (x
<< small_shift
) | carry
;
1906 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1909 return canonize (val
, len
, precision
);
1912 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1913 number of blocks in VAL. The input has XPRECISION bits and the
1914 output has XPRECISION - SHIFT bits. */
1916 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1917 unsigned int xlen
, unsigned int xprecision
,
1920 /* Split the shift into a whole-block shift and a subblock shift. */
1921 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1922 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1924 /* Work out how many blocks are needed to store the significant bits
1925 (excluding the upper zeros or signs). */
1926 unsigned int len
= BLOCKS_NEEDED (xprecision
- shift
);
1928 /* It's easier to handle the simple block case specially. */
1929 if (small_shift
== 0)
1930 for (unsigned int i
= 0; i
< len
; ++i
)
1931 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
1934 /* Each output block but the last is a combination of two input blocks.
1935 The last block is a right shift of the last block in XVAL. */
1936 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
1937 for (unsigned int i
= 0; i
< len
; ++i
)
1939 val
[i
] = curr
>> small_shift
;
1940 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
1941 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1947 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1948 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1949 VAL has PRECISION bits. */
1951 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1952 unsigned int xlen
, unsigned int xprecision
,
1953 unsigned int precision
, unsigned int shift
)
1955 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1957 /* The value we just created has precision XPRECISION - SHIFT.
1958 Zero-extend it to wider precisions. */
1959 if (precision
> xprecision
- shift
)
1961 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1963 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
1964 else if (val
[len
- 1] < 0)
1966 /* Add a new block with a zero. */
1971 return canonize (val
, len
, precision
);
1974 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1975 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1976 VAL has PRECISION bits. */
1978 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1979 unsigned int xlen
, unsigned int xprecision
,
1980 unsigned int precision
, unsigned int shift
)
1982 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1984 /* The value we just created has precision XPRECISION - SHIFT.
1985 Sign-extend it to wider types. */
1986 if (precision
> xprecision
- shift
)
1988 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1990 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
1992 return canonize (val
, len
, precision
);
1995 /* Return the number of leading (upper) zeros in X. */
1997 wi::clz (const wide_int_ref
&x
)
1999 /* Calculate how many bits there above the highest represented block. */
2000 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2002 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2004 /* The upper -COUNT bits of HIGH are not part of the value.
2006 high
= (high
<< -count
) >> -count
;
2007 else if (x
.sign_mask () < 0)
2008 /* The upper bit is set, so there are no leading zeros. */
2011 /* We don't need to look below HIGH. Either HIGH is nonzero,
2012 or the top bit of the block below is nonzero; clz_hwi is
2013 HOST_BITS_PER_WIDE_INT in the latter case. */
2014 return count
+ clz_hwi (high
);
2017 /* Return the number of redundant sign bits in X. (That is, the number
2018 of bits immediately below the sign bit that have the same value as
2021 wi::clrsb (const wide_int_ref
&x
)
2023 /* Calculate how many bits there above the highest represented block. */
2024 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2026 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2027 unsigned HOST_WIDE_INT mask
= -1;
2030 /* The upper -COUNT bits of HIGH are not part of the value.
2031 Clear them from both MASK and HIGH. */
2036 /* If the top bit is 1, count the number of leading 1s. If the top
2037 bit is zero, count the number of leading zeros. */
2038 if (high
> mask
/ 2)
2041 /* There are no sign bits below the top block, so we don't need to look
2042 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2044 return count
+ clz_hwi (high
) - 1;
2047 /* Return the number of trailing (lower) zeros in X. */
2049 wi::ctz (const wide_int_ref
&x
)
2051 if (x
.len
== 1 && x
.ulow () == 0)
2054 /* Having dealt with the zero case, there must be a block with a
2055 nonzero bit. We don't care about the bits above the first 1. */
2057 while (x
.val
[i
] == 0)
2059 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2062 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2065 wi::exact_log2 (const wide_int_ref
&x
)
2067 /* Reject cases where there are implicit -1 blocks above HIGH. */
2068 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2071 /* Set CRUX to the index of the entry that should be nonzero.
2072 If the top block is zero then the next lowest block (if any)
2073 must have the high bit set. */
2074 unsigned int crux
= x
.len
- 1;
2075 if (crux
> 0 && x
.val
[crux
] == 0)
2078 /* Check that all lower blocks are zero. */
2079 for (unsigned int i
= 0; i
< crux
; ++i
)
2083 /* Get a zero-extended form of block CRUX. */
2084 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2085 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2086 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2088 /* Now it's down to whether HWI is a power of 2. */
2089 int res
= ::exact_log2 (hwi
);
2091 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2095 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2097 wi::floor_log2 (const wide_int_ref
&x
)
2099 return x
.precision
- 1 - clz (x
);
2102 /* Return the index of the first (lowest) set bit in X, counting from 1.
2103 Return 0 if X is 0. */
2105 wi::ffs (const wide_int_ref
&x
)
2107 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2110 /* Return true if sign-extending X to have precision PRECISION would give
2111 the minimum signed value at that precision. */
2113 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2115 return ctz (x
) + 1 == int (precision
);
2118 /* Return true if X represents the minimum signed value. */
2120 wi::only_sign_bit_p (const wide_int_ref
&x
)
2122 return only_sign_bit_p (x
, x
.precision
);
2126 * Private utilities.
2129 void gt_ggc_mx (widest_int
*) { }
2130 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2131 void gt_pch_nx (widest_int
*) { }
2133 template void wide_int::dump () const;
2134 template void generic_wide_int
<wide_int_ref_storage
<false> >::dump () const;
2135 template void generic_wide_int
<wide_int_ref_storage
<true> >::dump () const;
2136 template void offset_int::dump () const;
2137 template void widest_int::dump () const;