1 /* Operations with very long integers.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #define HOST_BITS_PER_HALF_WIDE_INT 32
34 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
35 # define HOST_HALF_WIDE_INT long
36 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
37 # define HOST_HALF_WIDE_INT int
39 #error Please add support for HOST_HALF_WIDE_INT
42 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
43 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
44 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
45 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
46 typedef unsigned HOST_WIDE_INT UWtype
;
47 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
48 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
49 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
51 typedef unsigned int UDWtype
__attribute__ ((mode (DI
)));
53 typedef unsigned int UDWtype
__attribute__ ((mode (TI
)));
58 static const HOST_WIDE_INT zeros
[WIDE_INT_MAX_ELTS
] = {};
64 /* Quantities to deal with values that hold half of a wide int. Used
65 in multiply and divide. */
66 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
68 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
69 #define BLOCKS_NEEDED(PREC) \
70 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
71 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
73 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
74 based on the top existing bit of VAL. */
76 static unsigned HOST_WIDE_INT
77 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
79 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? (HOST_WIDE_INT
) -1 : 0;
82 /* Convert the integer in VAL to canonical form, returning its new length.
83 LEN is the number of blocks currently in VAL and PRECISION is the number
84 of bits in the integer it represents.
86 This function only changes the representation, not the value. */
88 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
90 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
94 if (len
> blocks_needed
)
101 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
102 val
[len
- 1] = top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
103 if (top
!= 0 && top
!= (HOST_WIDE_INT
)-1)
106 /* At this point we know that the top is either 0 or -1. Find the
107 first block that is not a copy of this. */
108 for (i
= len
- 2; i
>= 0; i
--)
110 HOST_WIDE_INT x
= val
[i
];
113 if (SIGN_MASK (x
) == top
)
116 /* We need an extra block because the top bit block i does
117 not match the extension. */
122 /* The number is 0 or -1. */
127 * Conversion routines in and out of wide_int.
130 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
131 result for an integer with precision PRECISION. Return the length
132 of VAL (after any canonization. */
134 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
135 unsigned int xlen
, unsigned int precision
, bool need_canon
)
137 for (unsigned i
= 0; i
< xlen
; i
++)
139 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
142 /* Construct a wide int from a buffer of length LEN. BUFFER will be
143 read according to byte endianess and word endianess of the target.
144 Only the lower BUFFER_LEN bytes of the result are set; the remaining
145 high bytes are cleared. */
147 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
149 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
150 wide_int result
= wide_int::create (precision
);
151 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
153 /* We have to clear all the bits ourself, as we merely or in values
155 unsigned int len
= BLOCKS_NEEDED (precision
);
156 HOST_WIDE_INT
*val
= result
.write_val ();
157 for (unsigned int i
= 0; i
< len
; ++i
)
160 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
164 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
165 unsigned HOST_WIDE_INT value
;
167 if (buffer_len
> UNITS_PER_WORD
)
169 unsigned int word
= byte
/ UNITS_PER_WORD
;
171 if (WORDS_BIG_ENDIAN
)
172 word
= (words
- 1) - word
;
174 offset
= word
* UNITS_PER_WORD
;
176 if (BYTES_BIG_ENDIAN
)
177 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
179 offset
+= byte
% UNITS_PER_WORD
;
182 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
184 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
186 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
187 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
190 result
.set_len (canonize (val
, len
, precision
));
195 /* Sets RESULT from X, the sign is taken according to SGN. */
197 wi::to_mpz (const wide_int_ref
&x
, mpz_t result
, signop sgn
)
199 int len
= x
.get_len ();
200 const HOST_WIDE_INT
*v
= x
.get_val ();
201 int excess
= len
* HOST_BITS_PER_WIDE_INT
- x
.get_precision ();
203 if (wi::neg_p (x
, sgn
))
205 /* We use ones complement to avoid -x80..0 edge case that -
207 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
208 for (int i
= 0; i
< len
; i
++)
211 t
[len
- 1] = (unsigned HOST_WIDE_INT
) t
[len
- 1] << excess
>> excess
;
212 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
213 mpz_com (result
, result
);
217 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
218 for (int i
= 0; i
< len
- 1; i
++)
220 t
[len
- 1] = (unsigned HOST_WIDE_INT
) v
[len
- 1] << excess
>> excess
;
221 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
224 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
227 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
228 values of VAL will be wrapped; otherwise, they will be set to the
229 appropriate minimum or maximum TYPE bound. */
231 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
234 unsigned int prec
= TYPE_PRECISION (type
);
235 wide_int res
= wide_int::create (prec
);
243 get_type_static_bounds (type
, min
, max
);
245 if (mpz_cmp (x
, min
) < 0)
247 else if (mpz_cmp (x
, max
) > 0)
254 /* Determine the number of unsigned HOST_WIDE_INTs that are required
255 for representing the absolute value. The code to calculate count is
256 extracted from the GMP manual, section "Integer Import and Export":
257 http://gmplib.org/manual/Integer-Import-and-Export.html */
258 numb
= CHAR_BIT
* sizeof (HOST_WIDE_INT
);
259 count
= (mpz_sizeinbase (x
, 2) + numb
- 1) / numb
;
260 HOST_WIDE_INT
*val
= res
.write_val ();
261 /* Read the absolute value.
263 Write directly to the wide_int storage if possible, otherwise leave
264 GMP to allocate the memory for us. It might be slightly more efficient
265 to use mpz_tdiv_r_2exp for the latter case, but the situation is
266 pathological and it seems safer to operate on the original mpz value
268 void *valres
= mpz_export (count
<= WIDE_INT_MAX_ELTS
? val
: 0,
269 &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
275 count
= MIN (count
, BLOCKS_NEEDED (prec
));
278 memcpy (val
, valres
, count
* sizeof (HOST_WIDE_INT
));
281 /* Zero-extend the absolute value to PREC bits. */
282 if (count
< BLOCKS_NEEDED (prec
) && val
[count
- 1] < 0)
285 count
= canonize (val
, count
, prec
);
295 * Largest and smallest values in a mode.
298 /* Return the largest SGNed number that is representable in PRECISION bits.
300 TODO: There is still code from the double_int era that trys to
301 make up for the fact that double int's could not represent the
302 min and max values of all types. This code should be removed
303 because the min and max values can always be represented in
304 wide_ints and int-csts. */
306 wi::max_value (unsigned int precision
, signop sgn
)
308 gcc_checking_assert (precision
!= 0);
310 /* The unsigned max is just all ones. */
311 return shwi (-1, precision
);
313 /* The signed max is all ones except the top bit. This must be
314 explicitly represented. */
315 return mask (precision
- 1, false, precision
);
318 /* Return the largest SGNed number that is representable in PRECISION bits. */
320 wi::min_value (unsigned int precision
, signop sgn
)
322 gcc_checking_assert (precision
!= 0);
324 return uhwi (0, precision
);
326 /* The signed min is all zeros except the top bit. This must be
327 explicitly represented. */
328 return wi::set_bit_in_zero (precision
- 1, precision
);
335 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
336 signedness SGN, to an integer that has PRECISION bits. Store the blocks
337 in VAL and return the number of blocks used.
339 This function can handle both extension (PRECISION > XPRECISION)
340 and truncation (PRECISION < XPRECISION). */
342 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
343 unsigned int xlen
, unsigned int xprecision
,
344 unsigned int precision
, signop sgn
)
346 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
347 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
348 for (unsigned i
= 0; i
< len
; i
++)
351 if (precision
> xprecision
)
353 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
358 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
359 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
360 else if (val
[len
- 1] < 0)
362 while (len
< BLOCKS_NEEDED (xprecision
))
364 if (small_xprecision
)
365 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
372 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
373 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
376 len
= canonize (val
, len
, precision
);
381 /* This function hides the fact that we cannot rely on the bits beyond
382 the precision. This issue comes up in the relational comparisions
383 where we do allow comparisons of values of different precisions. */
384 static inline HOST_WIDE_INT
385 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
386 unsigned int blocks_needed
, unsigned int small_prec
,
387 unsigned int index
, signop sgn
)
392 else if (index
< blocks_needed
|| sgn
== SIGNED
)
393 /* Signed or within the precision. */
394 val
= SIGN_MASK (a
[len
- 1]);
396 /* Unsigned extension beyond the precision. */
399 if (small_prec
&& index
== blocks_needed
- 1)
400 return (sgn
== SIGNED
401 ? sext_hwi (val
, small_prec
)
402 : zext_hwi (val
, small_prec
));
407 /* Find the highest bit represented in a wide int. This will in
408 general have the same value as the sign bit. */
409 static inline HOST_WIDE_INT
410 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
412 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
413 unsigned HOST_WIDE_INT val
= a
[len
- 1];
416 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
420 * Comparisons, note that only equality is an operator. The other
421 * comparisons cannot be operators since they are inherently signed or
422 * unsigned and C++ has no such operators.
425 /* Return true if OP0 == OP1. */
427 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
428 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
432 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
434 if (op0len
!= op1len
)
437 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
439 /* It does not matter if we zext or sext here, we just have to
440 do both the same way. */
441 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
447 if (op0
[l0
] != op1
[l0
])
455 /* Return true if OP0 < OP1 using signed comparisons. */
457 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
458 unsigned int precision
,
459 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
461 HOST_WIDE_INT s0
, s1
;
462 unsigned HOST_WIDE_INT u0
, u1
;
463 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
464 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
465 int l
= MAX (op0len
- 1, op1len
- 1);
467 /* Only the top block is compared as signed. The rest are unsigned
469 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
470 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
479 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
480 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
492 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
495 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
496 unsigned int precision
,
497 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
499 HOST_WIDE_INT s0
, s1
;
500 unsigned HOST_WIDE_INT u0
, u1
;
501 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
502 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
503 int l
= MAX (op0len
- 1, op1len
- 1);
505 /* Only the top block is compared as signed. The rest are unsigned
507 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
508 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
517 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
518 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
530 /* Return true if OP0 < OP1 using unsigned comparisons. */
532 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
533 unsigned int precision
,
534 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
536 unsigned HOST_WIDE_INT x0
;
537 unsigned HOST_WIDE_INT x1
;
538 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
539 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
540 int l
= MAX (op0len
- 1, op1len
- 1);
544 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
545 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
556 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
557 unsigned compares. */
559 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
560 unsigned int precision
,
561 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
563 unsigned HOST_WIDE_INT x0
;
564 unsigned HOST_WIDE_INT x1
;
565 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
566 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
567 int l
= MAX (op0len
- 1, op1len
- 1);
571 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
572 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
587 /* Sign-extend the number represented by XVAL and XLEN into VAL,
588 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
589 and VAL have PRECISION bits. */
591 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
592 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
594 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
595 /* Extending beyond the precision is a no-op. If we have only stored
596 OFFSET bits or fewer, the rest are already signs. */
597 if (offset
>= precision
|| len
>= xlen
)
599 for (unsigned i
= 0; i
< xlen
; ++i
)
603 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
604 for (unsigned int i
= 0; i
< len
; i
++)
608 val
[len
] = sext_hwi (xval
[len
], suboffset
);
611 return canonize (val
, len
, precision
);
614 /* Zero-extend the number represented by XVAL and XLEN into VAL,
615 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
616 and VAL have PRECISION bits. */
618 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
619 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
621 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
622 /* Extending beyond the precision is a no-op. If we have only stored
623 OFFSET bits or fewer, and the upper stored bit is zero, then there
625 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
627 for (unsigned i
= 0; i
< xlen
; ++i
)
631 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
632 for (unsigned int i
= 0; i
< len
; i
++)
633 val
[i
] = i
< xlen
? xval
[i
] : -1;
635 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
638 return canonize (val
, len
+ 1, precision
);
642 * Masking, inserting, shifting, rotating.
645 /* Insert WIDTH bits from Y into X starting at START. */
647 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
654 unsigned int precision
= x
.get_precision ();
655 if (start
>= precision
)
658 gcc_checking_assert (precision
>= width
);
660 if (start
+ width
>= precision
)
661 width
= precision
- start
;
663 mask
= wi::shifted_mask (start
, width
, false, precision
);
664 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
667 tmp
= wi::bit_and_not (x
, mask
);
668 result
= result
| tmp
;
673 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
674 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
677 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
678 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
680 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
681 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
683 if (block
+ 1 >= xlen
)
685 /* The operation either affects the last current block or needs
687 unsigned int len
= block
+ 1;
688 for (unsigned int i
= 0; i
< len
; i
++)
689 val
[i
] = safe_uhwi (xval
, xlen
, i
);
690 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
692 /* If the bit we just set is at the msb of the block, make sure
693 that any higher bits are zeros. */
694 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
700 for (unsigned int i
= 0; i
< xlen
; i
++)
702 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
703 return canonize (val
, xlen
, precision
);
709 wide_int_storage::bswap () const
711 wide_int result
= wide_int::create (precision
);
713 unsigned int len
= BLOCKS_NEEDED (precision
);
714 unsigned int xlen
= get_len ();
715 const HOST_WIDE_INT
*xval
= get_val ();
716 HOST_WIDE_INT
*val
= result
.write_val ();
718 /* This is not a well defined operation if the precision is not a
720 gcc_assert ((precision
& 0x7) == 0);
722 for (i
= 0; i
< len
; i
++)
725 /* Only swap the bytes that are not the padding. */
726 for (s
= 0; s
< precision
; s
+= 8)
728 unsigned int d
= precision
- s
- 8;
729 unsigned HOST_WIDE_INT byte
;
731 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
732 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
734 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
736 block
= d
/ HOST_BITS_PER_WIDE_INT
;
737 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
739 val
[block
] |= byte
<< offset
;
742 result
.set_len (canonize (val
, len
, precision
));
746 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
747 above that up to PREC are zeros. The result is inverted if NEGATE
748 is true. Return the number of blocks in VAL. */
750 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
755 val
[0] = negate
? 0 : -1;
760 val
[0] = negate
? -1 : 0;
765 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
766 val
[i
++] = negate
? 0 : -1;
768 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
771 HOST_WIDE_INT last
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
772 val
[i
++] = negate
? ~last
: last
;
775 val
[i
++] = negate
? -1 : 0;
780 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
781 bits are ones, and the bits above that up to PREC are zeros. The result
782 is inverted if NEGATE is true. Return the number of blocks in VAL. */
784 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
785 bool negate
, unsigned int prec
)
787 if (start
>= prec
|| width
== 0)
789 val
[0] = negate
? -1 : 0;
793 if (width
> prec
- start
)
794 width
= prec
- start
;
795 unsigned int end
= start
+ width
;
798 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
799 val
[i
++] = negate
? -1 : 0;
801 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
804 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
806 if (shift
< HOST_BITS_PER_WIDE_INT
)
809 block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - block
- 1;
810 val
[i
++] = negate
? ~block
: block
;
815 val
[i
++] = negate
? block
: ~block
;
818 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
820 val
[i
++] = negate
? 0 : -1;
822 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
826 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
827 val
[i
++] = negate
? ~block
: block
;
830 val
[i
++] = negate
? -1 : 0;
836 * logical operations.
839 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
841 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
842 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
843 unsigned int op1len
, unsigned int prec
)
847 bool need_canon
= true;
849 unsigned int len
= MAX (op0len
, op1len
);
852 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
870 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
886 val
[l0
] = op0
[l0
] & op1
[l0
];
891 len
= canonize (val
, len
, prec
);
896 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
898 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
899 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
900 unsigned int op1len
, unsigned int prec
)
905 bool need_canon
= true;
907 unsigned int len
= MAX (op0len
, op1len
);
910 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
928 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
944 val
[l0
] = op0
[l0
] & ~op1
[l0
];
949 len
= canonize (val
, len
, prec
);
954 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
956 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
957 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
958 unsigned int op1len
, unsigned int prec
)
963 bool need_canon
= true;
965 unsigned int len
= MAX (op0len
, op1len
);
968 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
986 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1002 val
[l0
] = op0
[l0
] | op1
[l0
];
1007 len
= canonize (val
, len
, prec
);
1012 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1014 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1015 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1016 unsigned int op1len
, unsigned int prec
)
1019 int l0
= op0len
- 1;
1020 int l1
= op1len
- 1;
1021 bool need_canon
= true;
1023 unsigned int len
= MAX (op0len
, op1len
);
1026 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1044 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1060 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1065 len
= canonize (val
, len
, prec
);
1070 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1072 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1073 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1074 unsigned int op1len
, unsigned int prec
)
1077 int l0
= op0len
- 1;
1078 int l1
= op1len
- 1;
1080 unsigned int len
= MAX (op0len
, op1len
);
1083 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1086 val
[l0
] = op0
[l0
] ^ op1mask
;
1093 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1096 val
[l1
] = op0mask
^ op1
[l1
];
1103 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1107 return canonize (val
, len
, prec
);
1114 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1115 whether the result overflows when OP0 and OP1 are treated as having
1116 signedness SGN. Return the number of blocks in VAL. */
1118 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1119 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1120 unsigned int op1len
, unsigned int prec
,
1121 signop sgn
, bool *overflow
)
1123 unsigned HOST_WIDE_INT o0
= 0;
1124 unsigned HOST_WIDE_INT o1
= 0;
1125 unsigned HOST_WIDE_INT x
= 0;
1126 unsigned HOST_WIDE_INT carry
= 0;
1127 unsigned HOST_WIDE_INT old_carry
= 0;
1128 unsigned HOST_WIDE_INT mask0
, mask1
;
1131 unsigned int len
= MAX (op0len
, op1len
);
1132 mask0
= -top_bit_of (op0
, op0len
, prec
);
1133 mask1
= -top_bit_of (op1
, op1len
, prec
);
1134 /* Add all of the explicitly defined elements. */
1136 for (i
= 0; i
< len
; i
++)
1138 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1139 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1140 x
= o0
+ o1
+ carry
;
1143 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1146 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1148 val
[len
] = mask0
+ mask1
+ carry
;
1155 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1158 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1159 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1163 /* Put the MSB of X and O0 and in the top of the HWI. */
1167 *overflow
= (x
<= o0
);
1169 *overflow
= (x
< o0
);
1173 return canonize (val
, len
, prec
);
1176 /* Subroutines of the multiplication and division operations. Unpack
1177 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1178 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1179 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1181 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
, const HOST_WIDE_INT
*input
,
1182 unsigned int in_len
, unsigned int out_len
,
1183 unsigned int prec
, signop sgn
)
1187 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1188 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1193 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1194 mask
&= HALF_INT_MASK
;
1199 for (i
= 0; i
< blocks_needed
- 1; i
++)
1201 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1203 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1206 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1210 x
= sext_hwi (x
, small_prec
);
1212 x
= zext_hwi (x
, small_prec
);
1215 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1217 /* Smear the sign bit. */
1222 /* The inverse of wi_unpack. IN_LEN is the the number of input
1223 blocks. The number of output blocks will be half this amount. */
1225 wi_pack (unsigned HOST_WIDE_INT
*result
,
1226 const unsigned HOST_HALF_WIDE_INT
*input
,
1227 unsigned int in_len
)
1232 while (i
+ 2 < in_len
)
1234 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1235 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1]
1236 << HOST_BITS_PER_HALF_WIDE_INT
);
1240 /* Handle the case where in_len is odd. For this we zero extend. */
1242 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
];
1244 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1245 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1] << HOST_BITS_PER_HALF_WIDE_INT
);
1248 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1251 If HIGH is not set, throw away the upper half after the check is
1252 made to see if it overflows. Unfortunately there is no better way
1253 to check for overflow than to do this. If OVERFLOW is nonnull,
1254 record in *OVERFLOW whether the result overflowed. SGN controls
1255 the signedness and is used to check overflow or if HIGH is set. */
1257 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1258 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1259 unsigned int op2len
, unsigned int prec
, signop sgn
,
1260 bool *overflow
, bool high
)
1262 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1265 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1266 unsigned int half_blocks_needed
= blocks_needed
* 2;
1267 /* The sizes here are scaled to support a 2x largest mode by 2x
1268 largest mode yielding a 4x largest mode result. This is what is
1271 unsigned HOST_HALF_WIDE_INT
1272 u
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1273 unsigned HOST_HALF_WIDE_INT
1274 v
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1275 /* The '2' in 'R' is because we are internally doing a full
1277 unsigned HOST_HALF_WIDE_INT
1278 r
[2 * 4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1279 HOST_WIDE_INT mask
= ((HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1281 /* If the top level routine did not really pass in an overflow, then
1282 just make sure that we never attempt to set it. */
1283 bool needs_overflow
= (overflow
!= 0);
1287 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1288 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1290 /* This is a surprisingly common case, so do it first. */
1291 if (op1
== 0 || op2
== 0)
1298 if (sgn
== UNSIGNED
)
1300 /* If the inputs are single HWIs and the output has room for at
1301 least two HWIs, we can use umul_ppmm directly. */
1302 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1303 && wi::fits_uhwi_p (op1
)
1304 && wi::fits_uhwi_p (op2
))
1306 /* This case never overflows. */
1312 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1313 if (val
[1] < 0 && prec
> HOST_BITS_PER_WIDE_INT
* 2)
1318 return 1 + (val
[1] != 0 || val
[0] < 0);
1320 /* Likewise if the output is a full single HWI, except that the
1321 upper HWI of the result is only used for determining overflow.
1322 (We handle this case inline when overflow isn't needed.) */
1323 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1325 unsigned HOST_WIDE_INT upper
;
1326 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1328 *overflow
= (upper
!= 0);
1336 /* Handle multiplications by 1. */
1341 val
[0] = wi::neg_p (op2
, sgn
) ? -1 : 0;
1344 for (i
= 0; i
< op2len
; i
++)
1352 val
[0] = wi::neg_p (op1
, sgn
) ? -1 : 0;
1355 for (i
= 0; i
< op1len
; i
++)
1360 /* If we need to check for overflow, we can only do half wide
1361 multiplies quickly because we need to look at the top bits to
1362 check for the overflow. */
1363 if ((high
|| needs_overflow
)
1364 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1366 unsigned HOST_WIDE_INT r
;
1370 o0
= op1
.to_shwi ();
1371 o1
= op2
.to_shwi ();
1375 o0
= op1
.to_uhwi ();
1376 o1
= op2
.to_uhwi ();
1384 if ((HOST_WIDE_INT
) r
!= sext_hwi (r
, prec
))
1389 if ((r
>> prec
) != 0)
1393 val
[0] = high
? r
>> prec
: r
;
1397 /* We do unsigned mul and then correct it. */
1398 wi_unpack (u
, op1val
, op1len
, half_blocks_needed
, prec
, SIGNED
);
1399 wi_unpack (v
, op2val
, op2len
, half_blocks_needed
, prec
, SIGNED
);
1401 /* The 2 is for a full mult. */
1402 memset (r
, 0, half_blocks_needed
* 2
1403 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1405 for (j
= 0; j
< half_blocks_needed
; j
++)
1408 for (i
= 0; i
< half_blocks_needed
; i
++)
1410 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1412 r
[i
+ j
] = t
& HALF_INT_MASK
;
1413 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1415 r
[j
+ half_blocks_needed
] = k
;
1418 /* We did unsigned math above. For signed we must adjust the
1419 product (assuming we need to see that). */
1420 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1422 unsigned HOST_WIDE_INT b
;
1423 if (wi::neg_p (op1
))
1426 for (i
= 0; i
< half_blocks_needed
; i
++)
1428 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1429 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1430 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1431 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1434 if (wi::neg_p (op2
))
1437 for (i
= 0; i
< half_blocks_needed
; i
++)
1439 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1440 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1441 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1442 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1451 /* For unsigned, overflow is true if any of the top bits are set.
1452 For signed, overflow is true if any of the top bits are not equal
1454 if (sgn
== UNSIGNED
)
1458 top
= r
[(half_blocks_needed
) - 1];
1459 top
= SIGN_MASK (top
<< (HOST_BITS_PER_WIDE_INT
/ 2));
1463 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1464 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1470 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1471 wi_pack ((unsigned HOST_WIDE_INT
*) val
,
1472 &r
[half_blocks_needed
], half_blocks_needed
);
1473 return canonize (val
, blocks_needed
, prec
);
1477 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1478 wi_pack ((unsigned HOST_WIDE_INT
*) val
, r
, half_blocks_needed
);
1479 return canonize (val
, blocks_needed
, prec
);
1483 /* Compute the population count of X. */
1485 wi::popcount (const wide_int_ref
&x
)
1490 /* The high order block is special if it is the last block and the
1491 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1492 have to clear out any ones above the precision before doing
1493 popcount on this block. */
1494 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1495 unsigned int stop
= x
.len
;
1498 count
= popcount_hwi (x
.uhigh () << -count
);
1503 if (x
.sign_mask () >= 0)
1507 for (i
= 0; i
< stop
; ++i
)
1508 count
+= popcount_hwi (x
.val
[i
]);
1513 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1514 whether the result overflows when OP0 and OP1 are treated as having
1515 signedness SGN. Return the number of blocks in VAL. */
1517 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1518 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1519 unsigned int op1len
, unsigned int prec
,
1520 signop sgn
, bool *overflow
)
1522 unsigned HOST_WIDE_INT o0
= 0;
1523 unsigned HOST_WIDE_INT o1
= 0;
1524 unsigned HOST_WIDE_INT x
= 0;
1525 /* We implement subtraction as an in place negate and add. Negation
1526 is just inversion and add 1, so we can do the add of 1 by just
1527 starting the borrow in of the first element at 1. */
1528 unsigned HOST_WIDE_INT borrow
= 0;
1529 unsigned HOST_WIDE_INT old_borrow
= 0;
1531 unsigned HOST_WIDE_INT mask0
, mask1
;
1534 unsigned int len
= MAX (op0len
, op1len
);
1535 mask0
= -top_bit_of (op0
, op0len
, prec
);
1536 mask1
= -top_bit_of (op1
, op1len
, prec
);
1538 /* Subtract all of the explicitly defined elements. */
1539 for (i
= 0; i
< len
; i
++)
1541 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1542 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1543 x
= o0
- o1
- borrow
;
1545 old_borrow
= borrow
;
1546 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1549 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1551 val
[len
] = mask0
- mask1
- borrow
;
1558 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1561 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1562 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1566 /* Put the MSB of X and O0 and in the top of the HWI. */
1570 *overflow
= (x
>= o0
);
1572 *overflow
= (x
> o0
);
1576 return canonize (val
, len
, prec
);
1584 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1585 algorithm is a small modification of the algorithm in Hacker's
1586 Delight by Warren, which itself is a small modification of Knuth's
1587 algorithm. M is the number of significant elements of U however
1588 there needs to be at least one extra element of B_DIVIDEND
1589 allocated, N is the number of elements of B_DIVISOR. */
1591 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1592 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1593 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1594 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1597 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1598 HOST_WIDE_INT and stored in the lower bits of each word. This
1599 algorithm should work properly on both 32 and 64 bit
1601 unsigned HOST_WIDE_INT b
1602 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1603 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1604 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1605 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1609 /* Single digit divisor. */
1613 for (j
= m
- 1; j
>= 0; j
--)
1615 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1616 k
= ((k
* b
+ b_dividend
[j
])
1617 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1618 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1624 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1628 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1629 algorithm, we can overwrite b_dividend and b_divisor, so we do
1631 for (i
= n
- 1; i
> 0; i
--)
1632 b_divisor
[i
] = (b_divisor
[i
] << s
)
1633 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1634 b_divisor
[0] = b_divisor
[0] << s
;
1636 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1637 for (i
= m
- 1; i
> 0; i
--)
1638 b_dividend
[i
] = (b_dividend
[i
] << s
)
1639 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1640 b_dividend
[0] = b_dividend
[0] << s
;
1644 for (j
= m
- n
; j
>= 0; j
--)
1646 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1647 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1649 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1652 rhat
+= b_divisor
[n
-1];
1657 /* Multiply and subtract. */
1659 for (i
= 0; i
< n
; i
++)
1661 p
= qhat
* b_divisor
[i
];
1662 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1663 b_dividend
[i
+ j
] = t
;
1664 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1665 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1667 t
= b_dividend
[j
+n
] - k
;
1668 b_dividend
[j
+n
] = t
;
1670 b_quotient
[j
] = qhat
;
1675 for (i
= 0; i
< n
; i
++)
1677 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1678 b_dividend
[i
+j
] = t
;
1679 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1681 b_dividend
[j
+n
] += k
;
1685 for (i
= 0; i
< n
; i
++)
1686 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1687 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1689 for (i
= 0; i
< n
; i
++)
1690 b_remainder
[i
] = b_dividend
[i
];
1694 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1695 the result. If QUOTIENT is nonnull, store the value of the quotient
1696 there and return the number of blocks in it. The return value is
1697 not defined otherwise. If REMAINDER is nonnull, store the value
1698 of the remainder there and store the number of blocks in
1699 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1700 the division overflowed. */
1702 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1703 HOST_WIDE_INT
*remainder
,
1704 const HOST_WIDE_INT
*dividend_val
,
1705 unsigned int dividend_len
, unsigned int dividend_prec
,
1706 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1707 unsigned int divisor_prec
, signop sgn
,
1710 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1711 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1712 unsigned HOST_HALF_WIDE_INT
1713 b_quotient
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1714 unsigned HOST_HALF_WIDE_INT
1715 b_remainder
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1716 unsigned HOST_HALF_WIDE_INT
1717 b_dividend
[(4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1718 unsigned HOST_HALF_WIDE_INT
1719 b_divisor
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1721 bool dividend_neg
= false;
1722 bool divisor_neg
= false;
1723 bool overflow
= false;
1724 wide_int neg_dividend
, neg_divisor
;
1726 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1728 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1733 /* The smallest signed number / -1 causes overflow. The dividend_len
1734 check is for speed rather than correctness. */
1736 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1738 && wi::only_sign_bit_p (dividend
))
1741 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1742 (signed min / -1) has the same representation as the orignal dividend.
1743 We have traditionally made division by zero act as division by one,
1744 so there too we use the original dividend. */
1755 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1756 quotient
[i
] = dividend_val
[i
];
1757 return dividend_len
;
1763 /* Do it on the host if you can. */
1765 && wi::fits_shwi_p (dividend
)
1766 && wi::fits_shwi_p (divisor
))
1768 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1769 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1771 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1773 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1776 quotient
[0] = HOST_WIDE_INT_MIN
;
1789 quotient
[0] = o0
/ o1
;
1792 remainder
[0] = o0
% o1
;
1800 && wi::fits_uhwi_p (dividend
)
1801 && wi::fits_uhwi_p (divisor
))
1803 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1804 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1807 quotient
[0] = o0
/ o1
;
1810 remainder
[0] = o0
% o1
;
1816 /* Make the divisor and dividend positive and remember what we
1820 if (wi::neg_p (dividend
))
1822 neg_dividend
= -dividend
;
1823 dividend
= neg_dividend
;
1824 dividend_neg
= true;
1826 if (wi::neg_p (divisor
))
1828 neg_divisor
= -divisor
;
1829 divisor
= neg_divisor
;
1834 wi_unpack (b_dividend
, dividend
.get_val (), dividend
.get_len (),
1835 dividend_blocks_needed
, dividend_prec
, sgn
);
1836 wi_unpack (b_divisor
, divisor
.get_val (), divisor
.get_len (),
1837 divisor_blocks_needed
, divisor_prec
, sgn
);
1839 m
= dividend_blocks_needed
;
1841 while (m
> 1 && b_dividend
[m
- 1] == 0)
1844 n
= divisor_blocks_needed
;
1845 while (n
> 1 && b_divisor
[n
- 1] == 0)
1848 memset (b_quotient
, 0, sizeof (b_quotient
));
1850 divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
1852 unsigned int quotient_len
= 0;
1855 wi_pack ((unsigned HOST_WIDE_INT
*) quotient
, b_quotient
, m
);
1856 quotient_len
= canonize (quotient
, (m
+ 1) / 2, dividend_prec
);
1857 /* The quotient is neg if exactly one of the divisor or dividend is
1859 if (dividend_neg
!= divisor_neg
)
1860 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
1861 quotient_len
, dividend_prec
,
1867 wi_pack ((unsigned HOST_WIDE_INT
*) remainder
, b_remainder
, n
);
1868 *remainder_len
= canonize (remainder
, (n
+ 1) / 2, dividend_prec
);
1869 /* The remainder is always the same sign as the dividend. */
1871 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
1872 *remainder_len
, dividend_prec
,
1876 return quotient_len
;
1880 * Shifting, rotating and extraction.
1883 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1884 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1886 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1887 unsigned int xlen
, unsigned int precision
,
1890 /* Split the shift into a whole-block shift and a subblock shift. */
1891 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1892 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1894 /* The whole-block shift fills with zeros. */
1895 unsigned int len
= BLOCKS_NEEDED (precision
);
1896 for (unsigned int i
= 0; i
< skip
; ++i
)
1899 /* It's easier to handle the simple block case specially. */
1900 if (small_shift
== 0)
1901 for (unsigned int i
= skip
; i
< len
; ++i
)
1902 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
1905 /* The first unfilled output block is a left shift of the first
1906 block in XVAL. The other output blocks contain bits from two
1907 consecutive input blocks. */
1908 unsigned HOST_WIDE_INT carry
= 0;
1909 for (unsigned int i
= skip
; i
< len
; ++i
)
1911 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
1912 val
[i
] = (x
<< small_shift
) | carry
;
1913 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1916 return canonize (val
, len
, precision
);
1919 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1920 number of blocks in VAL. The input has XPRECISION bits and the
1921 output has XPRECISION - SHIFT bits. */
1923 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1924 unsigned int xlen
, unsigned int xprecision
,
1927 /* Split the shift into a whole-block shift and a subblock shift. */
1928 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1929 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1931 /* Work out how many blocks are needed to store the significant bits
1932 (excluding the upper zeros or signs). */
1933 unsigned int len
= BLOCKS_NEEDED (xprecision
- shift
);
1935 /* It's easier to handle the simple block case specially. */
1936 if (small_shift
== 0)
1937 for (unsigned int i
= 0; i
< len
; ++i
)
1938 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
1941 /* Each output block but the last is a combination of two input blocks.
1942 The last block is a right shift of the last block in XVAL. */
1943 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
1944 for (unsigned int i
= 0; i
< len
; ++i
)
1946 val
[i
] = curr
>> small_shift
;
1947 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
1948 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1954 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1955 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1956 VAL has PRECISION bits. */
1958 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1959 unsigned int xlen
, unsigned int xprecision
,
1960 unsigned int precision
, unsigned int shift
)
1962 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1964 /* The value we just created has precision XPRECISION - SHIFT.
1965 Zero-extend it to wider precisions. */
1966 if (precision
> xprecision
- shift
)
1968 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1970 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
1971 else if (val
[len
- 1] < 0)
1973 /* Add a new block with a zero. */
1978 return canonize (val
, len
, precision
);
1981 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1982 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1983 VAL has PRECISION bits. */
1985 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1986 unsigned int xlen
, unsigned int xprecision
,
1987 unsigned int precision
, unsigned int shift
)
1989 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1991 /* The value we just created has precision XPRECISION - SHIFT.
1992 Sign-extend it to wider types. */
1993 if (precision
> xprecision
- shift
)
1995 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1997 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
1999 return canonize (val
, len
, precision
);
2002 /* Return the number of leading (upper) zeros in X. */
2004 wi::clz (const wide_int_ref
&x
)
2006 /* Calculate how many bits there above the highest represented block. */
2007 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2009 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2011 /* The upper -COUNT bits of HIGH are not part of the value.
2013 high
= (high
<< -count
) >> -count
;
2014 else if (x
.sign_mask () < 0)
2015 /* The upper bit is set, so there are no leading zeros. */
2018 /* We don't need to look below HIGH. Either HIGH is nonzero,
2019 or the top bit of the block below is nonzero; clz_hwi is
2020 HOST_BITS_PER_WIDE_INT in the latter case. */
2021 return count
+ clz_hwi (high
);
2024 /* Return the number of redundant sign bits in X. (That is, the number
2025 of bits immediately below the sign bit that have the same value as
2028 wi::clrsb (const wide_int_ref
&x
)
2030 /* Calculate how many bits there above the highest represented block. */
2031 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2033 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2034 unsigned HOST_WIDE_INT mask
= -1;
2037 /* The upper -COUNT bits of HIGH are not part of the value.
2038 Clear them from both MASK and HIGH. */
2043 /* If the top bit is 1, count the number of leading 1s. If the top
2044 bit is zero, count the number of leading zeros. */
2045 if (high
> mask
/ 2)
2048 /* There are no sign bits below the top block, so we don't need to look
2049 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2051 return count
+ clz_hwi (high
) - 1;
2054 /* Return the number of trailing (lower) zeros in X. */
2056 wi::ctz (const wide_int_ref
&x
)
2058 if (x
.len
== 1 && x
.ulow () == 0)
2061 /* Having dealt with the zero case, there must be a block with a
2062 nonzero bit. We don't care about the bits above the first 1. */
2064 while (x
.val
[i
] == 0)
2066 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2069 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2072 wi::exact_log2 (const wide_int_ref
&x
)
2074 /* Reject cases where there are implicit -1 blocks above HIGH. */
2075 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2078 /* Set CRUX to the index of the entry that should be nonzero.
2079 If the top block is zero then the next lowest block (if any)
2080 must have the high bit set. */
2081 unsigned int crux
= x
.len
- 1;
2082 if (crux
> 0 && x
.val
[crux
] == 0)
2085 /* Check that all lower blocks are zero. */
2086 for (unsigned int i
= 0; i
< crux
; ++i
)
2090 /* Get a zero-extended form of block CRUX. */
2091 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2092 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2093 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2095 /* Now it's down to whether HWI is a power of 2. */
2096 int res
= ::exact_log2 (hwi
);
2098 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2102 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2104 wi::floor_log2 (const wide_int_ref
&x
)
2106 return x
.precision
- 1 - clz (x
);
2109 /* Return the index of the first (lowest) set bit in X, counting from 1.
2110 Return 0 if X is 0. */
2112 wi::ffs (const wide_int_ref
&x
)
2114 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2117 /* Return true if sign-extending X to have precision PRECISION would give
2118 the minimum signed value at that precision. */
2120 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2122 return ctz (x
) + 1 == int (precision
);
2125 /* Return true if X represents the minimum signed value. */
2127 wi::only_sign_bit_p (const wide_int_ref
&x
)
2129 return only_sign_bit_p (x
, x
.precision
);
2133 * Private utilities.
2136 void gt_ggc_mx (widest_int
*) { }
2137 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2138 void gt_pch_nx (widest_int
*) { }
2140 template void wide_int::dump () const;
2141 template void generic_wide_int
<wide_int_ref_storage
<false> >::dump () const;
2142 template void generic_wide_int
<wide_int_ref_storage
<true> >::dump () const;
2143 template void offset_int::dump () const;
2144 template void widest_int::dump () const;