1 /* Operations with very long integers.
2 Copyright (C) 2012-2013 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
31 #define HOST_BITS_PER_HALF_WIDE_INT 32
32 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
33 # define HOST_HALF_WIDE_INT long
34 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
35 # define HOST_HALF_WIDE_INT int
37 #error Please add support for HOST_HALF_WIDE_INT
40 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
41 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
42 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
43 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
44 typedef unsigned HOST_WIDE_INT UWtype
;
45 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
46 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
47 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
49 typedef unsigned int UDWtype
__attribute__ ((mode (DI
)));
51 typedef unsigned int UDWtype
__attribute__ ((mode (TI
)));
56 static const HOST_WIDE_INT zeros
[WIDE_INT_MAX_ELTS
] = {};
62 /* Quantities to deal with values that hold half of a wide int. Used
63 in multiply and divide. */
64 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
66 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
67 #define BLOCKS_NEEDED(PREC) \
68 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
69 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
71 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
72 based on the top existing bit of VAL. */
74 static unsigned HOST_WIDE_INT
75 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
77 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? (HOST_WIDE_INT
) -1 : 0;
80 /* Convert the integer in VAL to canonical form, returning its new length.
81 LEN is the number of blocks currently in VAL and PRECISION is the number
82 of bits in the integer it represents.
84 This function only changes the representation, not the value. */
86 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
88 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
92 if (len
> blocks_needed
)
99 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
100 val
[len
- 1] = top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
101 if (top
!= 0 && top
!= (HOST_WIDE_INT
)-1)
104 /* At this point we know that the top is either 0 or -1. Find the
105 first block that is not a copy of this. */
106 for (i
= len
- 2; i
>= 0; i
--)
108 HOST_WIDE_INT x
= val
[i
];
111 if (SIGN_MASK (x
) == top
)
114 /* We need an extra block because the top bit block i does
115 not match the extension. */
120 /* The number is 0 or -1. */
125 * Conversion routines in and out of wide_int.
128 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
129 result for an integer with precision PRECISION. Return the length
130 of VAL (after any canonization. */
132 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
133 unsigned int xlen
, unsigned int precision
, bool need_canon
)
135 for (unsigned i
= 0; i
< xlen
; i
++)
137 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
140 /* Construct a wide int from a buffer of length LEN. BUFFER will be
141 read according to byte endianess and word endianess of the target.
142 Only the lower BUFFER_LEN bytes of the result are set; the remaining
143 high bytes are cleared. */
145 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
147 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
148 wide_int result
= wide_int::create (precision
);
149 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
151 /* We have to clear all the bits ourself, as we merely or in values
153 unsigned int len
= BLOCKS_NEEDED (precision
);
154 HOST_WIDE_INT
*val
= result
.write_val ();
155 for (unsigned int i
= 0; i
< len
; ++i
)
158 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
162 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
163 unsigned HOST_WIDE_INT value
;
165 if (buffer_len
> UNITS_PER_WORD
)
167 unsigned int word
= byte
/ UNITS_PER_WORD
;
169 if (WORDS_BIG_ENDIAN
)
170 word
= (words
- 1) - word
;
172 offset
= word
* UNITS_PER_WORD
;
174 if (BYTES_BIG_ENDIAN
)
175 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
177 offset
+= byte
% UNITS_PER_WORD
;
180 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
182 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
184 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
185 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
188 result
.set_len (canonize (val
, len
, precision
));
193 /* Sets RESULT from X, the sign is taken according to SGN. */
195 wi::to_mpz (const wide_int_ref
&x
, mpz_t result
, signop sgn
)
197 int len
= x
.get_len ();
198 const HOST_WIDE_INT
*v
= x
.get_val ();
199 int excess
= len
* HOST_BITS_PER_WIDE_INT
- x
.get_precision ();
201 if (wi::neg_p (x
, sgn
))
203 /* We use ones complement to avoid -x80..0 edge case that -
205 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
206 for (int i
= 0; i
< len
; i
++)
209 t
[len
- 1] = (unsigned HOST_WIDE_INT
) t
[len
- 1] << excess
>> excess
;
210 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
211 mpz_com (result
, result
);
215 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
216 for (int i
= 0; i
< len
- 1; i
++)
218 t
[len
- 1] = (unsigned HOST_WIDE_INT
) v
[len
- 1] << excess
>> excess
;
219 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
222 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
225 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
226 values of VAL will be wrapped; otherwise, they will be set to the
227 appropriate minimum or maximum TYPE bound. */
229 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
232 int prec
= TYPE_PRECISION (type
);
233 wide_int res
= wide_int::create (prec
);
241 get_type_static_bounds (type
, min
, max
);
243 if (mpz_cmp (x
, min
) < 0)
245 else if (mpz_cmp (x
, max
) > 0)
252 /* Determine the number of unsigned HOST_WIDE_INTs that are required
253 for representing the value. The code to calculate count is
254 extracted from the GMP manual, section "Integer Import and Export":
255 http://gmplib.org/manual/Integer-Import-and-Export.html */
256 numb
= 8 * sizeof(HOST_WIDE_INT
);
257 count
= (mpz_sizeinbase (x
, 2) + numb
- 1) / numb
;
258 HOST_WIDE_INT
*val
= res
.write_val ();
259 mpz_export (val
, &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
274 * Largest and smallest values in a mode.
277 /* Return the largest SGNed number that is representable in PRECISION bits.
279 TODO: There is still code from the double_int era that trys to
280 make up for the fact that double int's could not represent the
281 min and max values of all types. This code should be removed
282 because the min and max values can always be represented in
283 wide_ints and int-csts. */
285 wi::max_value (unsigned int precision
, signop sgn
)
287 gcc_checking_assert (precision
!= 0);
289 /* The unsigned max is just all ones. */
290 return shwi (-1, precision
);
292 /* The signed max is all ones except the top bit. This must be
293 explicitly represented. */
294 return mask (precision
- 1, false, precision
);
297 /* Return the largest SGNed number that is representable in PRECISION bits. */
299 wi::min_value (unsigned int precision
, signop sgn
)
301 gcc_checking_assert (precision
!= 0);
303 return uhwi (0, precision
);
305 /* The signed min is all zeros except the top bit. This must be
306 explicitly represented. */
307 return wi::set_bit_in_zero (precision
- 1, precision
);
314 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
315 signedness SGN, to an integer that has PRECISION bits. Store the blocks
316 in VAL and return the number of blocks used.
318 This function can handle both extension (PRECISION > XPRECISION)
319 and truncation (PRECISION < XPRECISION). */
321 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
322 unsigned int xlen
, unsigned int xprecision
,
323 unsigned int precision
, signop sgn
)
325 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
326 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
327 for (unsigned i
= 0; i
< len
; i
++)
330 if (precision
> xprecision
)
332 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
337 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
338 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
339 else if (val
[len
- 1] < 0)
341 while (len
< BLOCKS_NEEDED (xprecision
))
343 if (small_xprecision
)
344 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
351 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
352 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
355 len
= canonize (val
, len
, precision
);
360 /* This function hides the fact that we cannot rely on the bits beyond
361 the precision. This issue comes up in the relational comparisions
362 where we do allow comparisons of values of different precisions. */
363 static inline HOST_WIDE_INT
364 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
365 unsigned int blocks_needed
, unsigned int small_prec
,
366 unsigned int index
, signop sgn
)
371 else if (index
< blocks_needed
|| sgn
== SIGNED
)
372 /* Signed or within the precision. */
373 val
= SIGN_MASK (a
[len
- 1]);
375 /* Unsigned extension beyond the precision. */
378 if (small_prec
&& index
== blocks_needed
- 1)
379 return (sgn
== SIGNED
380 ? sext_hwi (val
, small_prec
)
381 : zext_hwi (val
, small_prec
));
386 /* Find the highest bit represented in a wide int. This will in
387 general have the same value as the sign bit. */
388 static inline HOST_WIDE_INT
389 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
391 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
392 unsigned HOST_WIDE_INT val
= a
[len
- 1];
395 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
399 * Comparisons, note that only equality is an operator. The other
400 * comparisons cannot be operators since they are inherently signed or
401 * unsigned and C++ has no such operators.
404 /* Return true if OP0 == OP1. */
406 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
407 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
411 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
413 if (op0len
!= op1len
)
416 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
418 /* It does not matter if we zext or sext here, we just have to
419 do both the same way. */
420 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
426 if (op0
[l0
] != op1
[l0
])
434 /* Return true if OP0 < OP1 using signed comparisons. */
436 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
437 unsigned int precision
,
438 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
440 HOST_WIDE_INT s0
, s1
;
441 unsigned HOST_WIDE_INT u0
, u1
;
442 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
443 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
444 int l
= MAX (op0len
- 1, op1len
- 1);
446 /* Only the top block is compared as signed. The rest are unsigned
448 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
449 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
458 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
459 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
471 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
474 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
475 unsigned int precision
,
476 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
478 HOST_WIDE_INT s0
, s1
;
479 unsigned HOST_WIDE_INT u0
, u1
;
480 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
481 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
482 int l
= MAX (op0len
- 1, op1len
- 1);
484 /* Only the top block is compared as signed. The rest are unsigned
486 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
487 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
496 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
497 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
509 /* Return true if OP0 < OP1 using unsigned comparisons. */
511 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
512 unsigned int precision
,
513 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
515 unsigned HOST_WIDE_INT x0
;
516 unsigned HOST_WIDE_INT x1
;
517 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
518 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
519 int l
= MAX (op0len
- 1, op1len
- 1);
523 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
524 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
535 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
536 unsigned compares. */
538 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
539 unsigned int precision
,
540 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
542 unsigned HOST_WIDE_INT x0
;
543 unsigned HOST_WIDE_INT x1
;
544 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
545 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
546 int l
= MAX (op0len
- 1, op1len
- 1);
550 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
551 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
566 /* Sign-extend the number represented by XVAL and XLEN into VAL,
567 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
568 and VAL have PRECISION bits. */
570 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
571 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
573 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
574 /* Extending beyond the precision is a no-op. If we have only stored
575 OFFSET bits or fewer, the rest are already signs. */
576 if (offset
>= precision
|| len
>= xlen
)
578 for (unsigned i
= 0; i
< xlen
; ++i
)
582 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
583 for (unsigned int i
= 0; i
< len
; i
++)
587 val
[len
] = sext_hwi (xval
[len
], suboffset
);
590 return canonize (val
, len
, precision
);
593 /* Zero-extend the number represented by XVAL and XLEN into VAL,
594 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
595 and VAL have PRECISION bits. */
597 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
598 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
600 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
601 /* Extending beyond the precision is a no-op. If we have only stored
602 OFFSET bits or fewer, and the upper stored bit is zero, then there
604 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
606 for (unsigned i
= 0; i
< xlen
; ++i
)
610 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
611 for (unsigned int i
= 0; i
< len
; i
++)
612 val
[i
] = i
< xlen
? xval
[i
] : -1;
614 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
617 return canonize (val
, len
+ 1, precision
);
621 * Masking, inserting, shifting, rotating.
624 /* Insert WIDTH bits from Y into X starting at START. */
626 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
633 unsigned int precision
= x
.get_precision ();
634 if (start
>= precision
)
637 gcc_checking_assert (precision
>= width
);
639 if (start
+ width
>= precision
)
640 width
= precision
- start
;
642 mask
= wi::shifted_mask (start
, width
, false, precision
);
643 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
646 tmp
= wi::bit_and_not (x
, mask
);
647 result
= result
| tmp
;
652 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
653 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
656 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
657 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
659 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
660 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
662 if (block
+ 1 >= xlen
)
664 /* The operation either affects the last current block or needs
666 unsigned int len
= block
+ 1;
667 for (unsigned int i
= 0; i
< len
; i
++)
668 val
[i
] = safe_uhwi (xval
, xlen
, i
);
669 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
671 /* If the bit we just set is at the msb of the block, make sure
672 that any higher bits are zeros. */
673 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
679 for (unsigned int i
= 0; i
< xlen
; i
++)
681 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
682 return canonize (val
, xlen
, precision
);
688 wide_int_storage::bswap () const
690 wide_int result
= wide_int::create (precision
);
692 unsigned int len
= BLOCKS_NEEDED (precision
);
693 unsigned int xlen
= get_len ();
694 const HOST_WIDE_INT
*xval
= get_val ();
695 HOST_WIDE_INT
*val
= result
.write_val ();
697 /* This is not a well defined operation if the precision is not a
699 gcc_assert ((precision
& 0x7) == 0);
701 for (i
= 0; i
< len
; i
++)
704 /* Only swap the bytes that are not the padding. */
705 for (s
= 0; s
< precision
; s
+= 8)
707 unsigned int d
= precision
- s
- 8;
708 unsigned HOST_WIDE_INT byte
;
710 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
711 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
713 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
715 block
= d
/ HOST_BITS_PER_WIDE_INT
;
716 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
718 val
[block
] |= byte
<< offset
;
721 result
.set_len (canonize (val
, len
, precision
));
725 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
726 above that up to PREC are zeros. The result is inverted if NEGATE
727 is true. Return the number of blocks in VAL. */
729 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
734 val
[0] = negate
? 0 : -1;
739 val
[0] = negate
? -1 : 0;
744 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
745 val
[i
++] = negate
? 0 : -1;
747 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
750 HOST_WIDE_INT last
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
751 val
[i
++] = negate
? ~last
: last
;
754 val
[i
++] = negate
? -1 : 0;
759 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
760 bits are ones, and the bits above that up to PREC are zeros. The result
761 is inverted if NEGATE is true. Return the number of blocks in VAL. */
763 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
764 bool negate
, unsigned int prec
)
766 if (start
>= prec
|| width
== 0)
768 val
[0] = negate
? -1 : 0;
772 if (width
> prec
- start
)
773 width
= prec
- start
;
774 unsigned int end
= start
+ width
;
777 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
778 val
[i
++] = negate
? -1 : 0;
780 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
783 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
785 if (shift
< HOST_BITS_PER_WIDE_INT
)
788 block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - block
- 1;
789 val
[i
++] = negate
? ~block
: block
;
794 val
[i
++] = negate
? block
: ~block
;
797 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
799 val
[i
++] = negate
? 0 : -1;
801 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
805 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
806 val
[i
++] = negate
? ~block
: block
;
809 val
[i
++] = negate
? -1 : 0;
815 * logical operations.
818 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
820 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
821 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
822 unsigned int op1len
, unsigned int prec
)
826 bool need_canon
= true;
828 unsigned int len
= MAX (op0len
, op1len
);
831 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
849 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
865 val
[l0
] = op0
[l0
] & op1
[l0
];
870 len
= canonize (val
, len
, prec
);
875 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
877 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
878 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
879 unsigned int op1len
, unsigned int prec
)
884 bool need_canon
= true;
886 unsigned int len
= MAX (op0len
, op1len
);
889 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
907 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
923 val
[l0
] = op0
[l0
] & ~op1
[l0
];
928 len
= canonize (val
, len
, prec
);
933 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
935 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
936 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
937 unsigned int op1len
, unsigned int prec
)
942 bool need_canon
= true;
944 unsigned int len
= MAX (op0len
, op1len
);
947 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
965 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
981 val
[l0
] = op0
[l0
] | op1
[l0
];
986 len
= canonize (val
, len
, prec
);
991 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
993 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
994 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
995 unsigned int op1len
, unsigned int prec
)
1000 bool need_canon
= true;
1002 unsigned int len
= MAX (op0len
, op1len
);
1005 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1023 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1039 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1044 len
= canonize (val
, len
, prec
);
1049 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1051 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1052 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1053 unsigned int op1len
, unsigned int prec
)
1056 int l0
= op0len
- 1;
1057 int l1
= op1len
- 1;
1059 unsigned int len
= MAX (op0len
, op1len
);
1062 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1065 val
[l0
] = op0
[l0
] ^ op1mask
;
1072 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1075 val
[l1
] = op0mask
^ op1
[l1
];
1082 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1086 return canonize (val
, len
, prec
);
1093 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1094 whether the result overflows when OP0 and OP1 are treated as having
1095 signedness SGN. Return the number of blocks in VAL. */
1097 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1098 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1099 unsigned int op1len
, unsigned int prec
,
1100 signop sgn
, bool *overflow
)
1102 unsigned HOST_WIDE_INT o0
= 0;
1103 unsigned HOST_WIDE_INT o1
= 0;
1104 unsigned HOST_WIDE_INT x
= 0;
1105 unsigned HOST_WIDE_INT carry
= 0;
1106 unsigned HOST_WIDE_INT old_carry
= 0;
1107 unsigned HOST_WIDE_INT mask0
, mask1
;
1110 unsigned int len
= MAX (op0len
, op1len
);
1111 mask0
= -top_bit_of (op0
, op0len
, prec
);
1112 mask1
= -top_bit_of (op1
, op1len
, prec
);
1113 /* Add all of the explicitly defined elements. */
1115 for (i
= 0; i
< len
; i
++)
1117 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1118 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1119 x
= o0
+ o1
+ carry
;
1122 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1125 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1127 val
[len
] = mask0
+ mask1
+ carry
;
1134 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1137 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1138 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1142 /* Put the MSB of X and O0 and in the top of the HWI. */
1146 *overflow
= (x
<= o0
);
1148 *overflow
= (x
< o0
);
1152 return canonize (val
, len
, prec
);
1155 /* Subroutines of the multiplication and division operations. Unpack
1156 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1157 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1158 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1160 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
, const HOST_WIDE_INT
*input
,
1161 unsigned int in_len
, unsigned int out_len
,
1162 unsigned int prec
, signop sgn
)
1166 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1167 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1172 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1173 mask
&= HALF_INT_MASK
;
1178 for (i
= 0; i
< blocks_needed
- 1; i
++)
1180 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1182 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1185 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1189 x
= sext_hwi (x
, small_prec
);
1191 x
= zext_hwi (x
, small_prec
);
1194 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1196 /* Smear the sign bit. */
1201 /* The inverse of wi_unpack. IN_LEN is the the number of input
1202 blocks. The number of output blocks will be half this amount. */
1204 wi_pack (unsigned HOST_WIDE_INT
*result
,
1205 const unsigned HOST_HALF_WIDE_INT
*input
,
1206 unsigned int in_len
)
1211 while (i
+ 2 < in_len
)
1213 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1214 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1]
1215 << HOST_BITS_PER_HALF_WIDE_INT
);
1219 /* Handle the case where in_len is odd. For this we zero extend. */
1221 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
];
1223 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1224 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1] << HOST_BITS_PER_HALF_WIDE_INT
);
1227 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1230 If HIGH is not set, throw away the upper half after the check is
1231 made to see if it overflows. Unfortunately there is no better way
1232 to check for overflow than to do this. If OVERFLOW is nonnull,
1233 record in *OVERFLOW whether the result overflowed. SGN controls
1234 the signedness and is used to check overflow or if HIGH is set. */
1236 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1237 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1238 unsigned int op2len
, unsigned int prec
, signop sgn
,
1239 bool *overflow
, bool high
)
1241 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1244 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1245 unsigned int half_blocks_needed
= blocks_needed
* 2;
1246 /* The sizes here are scaled to support a 2x largest mode by 2x
1247 largest mode yielding a 4x largest mode result. This is what is
1250 unsigned HOST_HALF_WIDE_INT
1251 u
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1252 unsigned HOST_HALF_WIDE_INT
1253 v
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1254 /* The '2' in 'R' is because we are internally doing a full
1256 unsigned HOST_HALF_WIDE_INT
1257 r
[2 * 4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1258 HOST_WIDE_INT mask
= ((HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1260 /* If the top level routine did not really pass in an overflow, then
1261 just make sure that we never attempt to set it. */
1262 bool needs_overflow
= (overflow
!= 0);
1266 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1267 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1269 /* This is a surprisingly common case, so do it first. */
1270 if (op1
== 0 || op2
== 0)
1277 if (sgn
== UNSIGNED
)
1279 /* If the inputs are single HWIs and the output has room for at
1280 least two HWIs, we can use umul_ppmm directly. */
1281 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1282 && wi::fits_uhwi_p (op1
)
1283 && wi::fits_uhwi_p (op2
))
1285 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1286 return 1 + (val
[1] != 0 || val
[0] < 0);
1288 /* Likewise if the output is a full single HWI, except that the
1289 upper HWI of the result is only used for determining overflow.
1290 (We handle this case inline when overflow isn't needed.) */
1291 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1293 unsigned HOST_WIDE_INT upper
;
1294 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1296 *overflow
= (upper
!= 0);
1302 /* Handle multiplications by 1. */
1305 for (i
= 0; i
< op2len
; i
++)
1311 for (i
= 0; i
< op1len
; i
++)
1316 /* If we need to check for overflow, we can only do half wide
1317 multiplies quickly because we need to look at the top bits to
1318 check for the overflow. */
1319 if ((high
|| needs_overflow
)
1320 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1322 unsigned HOST_WIDE_INT r
;
1326 o0
= op1
.to_shwi ();
1327 o1
= op2
.to_shwi ();
1331 o0
= op1
.to_uhwi ();
1332 o1
= op2
.to_uhwi ();
1340 if ((HOST_WIDE_INT
) r
!= sext_hwi (r
, prec
))
1345 if ((r
>> prec
) != 0)
1349 val
[0] = high
? r
>> prec
: r
;
1353 /* We do unsigned mul and then correct it. */
1354 wi_unpack (u
, op1val
, op1len
, half_blocks_needed
, prec
, SIGNED
);
1355 wi_unpack (v
, op2val
, op2len
, half_blocks_needed
, prec
, SIGNED
);
1357 /* The 2 is for a full mult. */
1358 memset (r
, 0, half_blocks_needed
* 2
1359 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1361 for (j
= 0; j
< half_blocks_needed
; j
++)
1364 for (i
= 0; i
< half_blocks_needed
; i
++)
1366 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1368 r
[i
+ j
] = t
& HALF_INT_MASK
;
1369 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1371 r
[j
+ half_blocks_needed
] = k
;
1374 /* We did unsigned math above. For signed we must adjust the
1375 product (assuming we need to see that). */
1376 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1378 unsigned HOST_WIDE_INT b
;
1379 if (wi::neg_p (op1
))
1382 for (i
= 0; i
< half_blocks_needed
; i
++)
1384 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1385 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1386 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1387 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1390 if (wi::neg_p (op2
))
1393 for (i
= 0; i
< half_blocks_needed
; i
++)
1395 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1396 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1397 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1398 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1407 /* For unsigned, overflow is true if any of the top bits are set.
1408 For signed, overflow is true if any of the top bits are not equal
1410 if (sgn
== UNSIGNED
)
1414 top
= r
[(half_blocks_needed
) - 1];
1415 top
= SIGN_MASK (top
<< (HOST_BITS_PER_WIDE_INT
/ 2));
1419 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1420 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1426 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1427 wi_pack ((unsigned HOST_WIDE_INT
*) val
,
1428 &r
[half_blocks_needed
], half_blocks_needed
);
1429 return canonize (val
, blocks_needed
, prec
);
1433 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1434 wi_pack ((unsigned HOST_WIDE_INT
*) val
, r
, half_blocks_needed
);
1435 return canonize (val
, blocks_needed
, prec
);
1439 /* Compute the population count of X. */
1441 wi::popcount (const wide_int_ref
&x
)
1446 /* The high order block is special if it is the last block and the
1447 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1448 have to clear out any ones above the precision before doing
1449 popcount on this block. */
1450 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1451 unsigned int stop
= x
.len
;
1454 count
= popcount_hwi (x
.uhigh () << -count
);
1459 if (x
.sign_mask () >= 0)
1463 for (i
= 0; i
< stop
; ++i
)
1464 count
+= popcount_hwi (x
.val
[i
]);
1469 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1470 whether the result overflows when OP0 and OP1 are treated as having
1471 signedness SGN. Return the number of blocks in VAL. */
1473 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1474 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1475 unsigned int op1len
, unsigned int prec
,
1476 signop sgn
, bool *overflow
)
1478 unsigned HOST_WIDE_INT o0
= 0;
1479 unsigned HOST_WIDE_INT o1
= 0;
1480 unsigned HOST_WIDE_INT x
= 0;
1481 /* We implement subtraction as an in place negate and add. Negation
1482 is just inversion and add 1, so we can do the add of 1 by just
1483 starting the borrow in of the first element at 1. */
1484 unsigned HOST_WIDE_INT borrow
= 0;
1485 unsigned HOST_WIDE_INT old_borrow
= 0;
1487 unsigned HOST_WIDE_INT mask0
, mask1
;
1490 unsigned int len
= MAX (op0len
, op1len
);
1491 mask0
= -top_bit_of (op0
, op0len
, prec
);
1492 mask1
= -top_bit_of (op1
, op1len
, prec
);
1494 /* Subtract all of the explicitly defined elements. */
1495 for (i
= 0; i
< len
; i
++)
1497 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1498 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1499 x
= o0
- o1
- borrow
;
1501 old_borrow
= borrow
;
1502 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1505 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1507 val
[len
] = mask0
- mask1
- borrow
;
1514 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1517 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1518 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1522 /* Put the MSB of X and O0 and in the top of the HWI. */
1526 *overflow
= (x
>= o0
);
1528 *overflow
= (x
> o0
);
1532 return canonize (val
, len
, prec
);
1540 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1541 algorithm is a small modification of the algorithm in Hacker's
1542 Delight by Warren, which itself is a small modification of Knuth's
1543 algorithm. M is the number of significant elements of U however
1544 there needs to be at least one extra element of B_DIVIDEND
1545 allocated, N is the number of elements of B_DIVISOR. */
1547 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1548 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1549 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1550 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1553 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1554 HOST_WIDE_INT and stored in the lower bits of each word. This
1555 algorithm should work properly on both 32 and 64 bit
1557 unsigned HOST_WIDE_INT b
1558 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1559 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1560 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1561 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1565 /* Single digit divisor. */
1569 for (j
= m
- 1; j
>= 0; j
--)
1571 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1572 k
= ((k
* b
+ b_dividend
[j
])
1573 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1574 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1580 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1584 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1585 algorithm, we can overwrite b_dividend and b_divisor, so we do
1587 for (i
= n
- 1; i
> 0; i
--)
1588 b_divisor
[i
] = (b_divisor
[i
] << s
)
1589 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1590 b_divisor
[0] = b_divisor
[0] << s
;
1592 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1593 for (i
= m
- 1; i
> 0; i
--)
1594 b_dividend
[i
] = (b_dividend
[i
] << s
)
1595 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1596 b_dividend
[0] = b_dividend
[0] << s
;
1600 for (j
= m
- n
; j
>= 0; j
--)
1602 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1603 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1605 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1608 rhat
+= b_divisor
[n
-1];
1613 /* Multiply and subtract. */
1615 for (i
= 0; i
< n
; i
++)
1617 p
= qhat
* b_divisor
[i
];
1618 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1619 b_dividend
[i
+ j
] = t
;
1620 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1621 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1623 t
= b_dividend
[j
+n
] - k
;
1624 b_dividend
[j
+n
] = t
;
1626 b_quotient
[j
] = qhat
;
1631 for (i
= 0; i
< n
; i
++)
1633 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1634 b_dividend
[i
+j
] = t
;
1635 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1637 b_dividend
[j
+n
] += k
;
1641 for (i
= 0; i
< n
; i
++)
1642 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1643 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1645 for (i
= 0; i
< n
; i
++)
1646 b_remainder
[i
] = b_dividend
[i
];
1650 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1651 the result. If QUOTIENT is nonnull, store the value of the quotient
1652 there and return the number of blocks in it. The return value is
1653 not defined otherwise. If REMAINDER is nonnull, store the value
1654 of the remainder there and store the number of blocks in
1655 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1656 the division overflowed. */
1658 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1659 HOST_WIDE_INT
*remainder
,
1660 const HOST_WIDE_INT
*dividend_val
,
1661 unsigned int dividend_len
, unsigned int dividend_prec
,
1662 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1663 unsigned int divisor_prec
, signop sgn
,
1666 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1667 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1668 unsigned HOST_HALF_WIDE_INT
1669 b_quotient
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1670 unsigned HOST_HALF_WIDE_INT
1671 b_remainder
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1672 unsigned HOST_HALF_WIDE_INT
1673 b_dividend
[(4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1674 unsigned HOST_HALF_WIDE_INT
1675 b_divisor
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1677 bool dividend_neg
= false;
1678 bool divisor_neg
= false;
1679 bool overflow
= false;
1680 wide_int neg_dividend
, neg_divisor
;
1682 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1684 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1689 /* The smallest signed number / -1 causes overflow. The dividend_len
1690 check is for speed rather than correctness. */
1692 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1694 && wi::only_sign_bit_p (dividend
))
1697 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1698 (signed min / -1) has the same representation as the orignal dividend.
1699 We have traditionally made division by zero act as division by one,
1700 so there too we use the original dividend. */
1711 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1712 quotient
[i
] = dividend_val
[i
];
1713 return dividend_len
;
1719 /* Do it on the host if you can. */
1721 && wi::fits_shwi_p (dividend
)
1722 && wi::fits_shwi_p (divisor
))
1724 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1725 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1727 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1729 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1732 quotient
[0] = HOST_WIDE_INT_MIN
;
1745 quotient
[0] = o0
/ o1
;
1748 remainder
[0] = o0
% o1
;
1756 && wi::fits_uhwi_p (dividend
)
1757 && wi::fits_uhwi_p (divisor
))
1759 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1760 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1763 quotient
[0] = o0
/ o1
;
1766 remainder
[0] = o0
% o1
;
1772 /* Make the divisor and dividend positive and remember what we
1776 if (wi::neg_p (dividend
))
1778 neg_dividend
= -dividend
;
1779 dividend
= neg_dividend
;
1780 dividend_neg
= true;
1782 if (wi::neg_p (divisor
))
1784 neg_divisor
= -divisor
;
1785 divisor
= neg_divisor
;
1790 wi_unpack (b_dividend
, dividend
.get_val (), dividend
.get_len (),
1791 dividend_blocks_needed
, dividend_prec
, sgn
);
1792 wi_unpack (b_divisor
, divisor
.get_val (), divisor
.get_len (),
1793 divisor_blocks_needed
, divisor_prec
, sgn
);
1795 m
= dividend_blocks_needed
;
1796 while (m
> 1 && b_dividend
[m
- 1] == 0)
1799 n
= divisor_blocks_needed
;
1800 while (n
> 1 && b_divisor
[n
- 1] == 0)
1803 memset (b_quotient
, 0, sizeof (b_quotient
));
1805 divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
1807 unsigned int quotient_len
= 0;
1810 wi_pack ((unsigned HOST_WIDE_INT
*) quotient
, b_quotient
, m
);
1811 quotient_len
= canonize (quotient
, (m
+ 1) / 2, dividend_prec
);
1812 /* The quotient is neg if exactly one of the divisor or dividend is
1814 if (dividend_neg
!= divisor_neg
)
1815 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
1816 quotient_len
, dividend_prec
,
1822 wi_pack ((unsigned HOST_WIDE_INT
*) remainder
, b_remainder
, n
);
1823 *remainder_len
= canonize (remainder
, (n
+ 1) / 2, dividend_prec
);
1824 /* The remainder is always the same sign as the dividend. */
1826 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
1827 *remainder_len
, dividend_prec
,
1831 return quotient_len
;
1835 * Shifting, rotating and extraction.
1838 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1839 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1841 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1842 unsigned int xlen
, unsigned int precision
,
1845 /* Split the shift into a whole-block shift and a subblock shift. */
1846 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1847 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1849 /* The whole-block shift fills with zeros. */
1850 unsigned int len
= BLOCKS_NEEDED (precision
);
1851 for (unsigned int i
= 0; i
< skip
; ++i
)
1854 /* It's easier to handle the simple block case specially. */
1855 if (small_shift
== 0)
1856 for (unsigned int i
= skip
; i
< len
; ++i
)
1857 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
1860 /* The first unfilled output block is a left shift of the first
1861 block in XVAL. The other output blocks contain bits from two
1862 consecutive input blocks. */
1863 unsigned HOST_WIDE_INT carry
= 0;
1864 for (unsigned int i
= skip
; i
< len
; ++i
)
1866 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
1867 val
[i
] = (x
<< small_shift
) | carry
;
1868 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1871 return canonize (val
, len
, precision
);
1874 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1875 number of blocks in VAL. The input has XPRECISION bits and the
1876 output has XPRECISION - SHIFT bits. */
1878 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1879 unsigned int xlen
, unsigned int xprecision
,
1882 /* Split the shift into a whole-block shift and a subblock shift. */
1883 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1884 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1886 /* Work out how many blocks are needed to store the significant bits
1887 (excluding the upper zeros or signs). */
1888 unsigned int len
= BLOCKS_NEEDED (xprecision
- shift
);
1890 /* It's easier to handle the simple block case specially. */
1891 if (small_shift
== 0)
1892 for (unsigned int i
= 0; i
< len
; ++i
)
1893 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
1896 /* Each output block but the last is a combination of two input blocks.
1897 The last block is a right shift of the last block in XVAL. */
1898 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
1899 for (unsigned int i
= 0; i
< len
; ++i
)
1901 val
[i
] = curr
>> small_shift
;
1902 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
1903 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1909 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1910 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1911 VAL has PRECISION bits. */
1913 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1914 unsigned int xlen
, unsigned int xprecision
,
1915 unsigned int precision
, unsigned int shift
)
1917 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1919 /* The value we just created has precision XPRECISION - SHIFT.
1920 Zero-extend it to wider precisions. */
1921 if (precision
> xprecision
- shift
)
1923 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1925 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
1926 else if (val
[len
- 1] < 0)
1928 /* Add a new block with a zero. */
1933 return canonize (val
, len
, precision
);
1936 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1937 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1938 VAL has PRECISION bits. */
1940 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1941 unsigned int xlen
, unsigned int xprecision
,
1942 unsigned int precision
, unsigned int shift
)
1944 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1946 /* The value we just created has precision XPRECISION - SHIFT.
1947 Sign-extend it to wider types. */
1948 if (precision
> xprecision
- shift
)
1950 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1952 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
1954 return canonize (val
, len
, precision
);
1957 /* Return the number of leading (upper) zeros in X. */
1959 wi::clz (const wide_int_ref
&x
)
1961 /* Calculate how many bits there above the highest represented block. */
1962 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1964 unsigned HOST_WIDE_INT high
= x
.uhigh ();
1966 /* The upper -COUNT bits of HIGH are not part of the value.
1968 high
= (high
<< -count
) >> -count
;
1969 else if (x
.sign_mask () < 0)
1970 /* The upper bit is set, so there are no leading zeros. */
1973 /* We don't need to look below HIGH. Either HIGH is nonzero,
1974 or the top bit of the block below is nonzero; clz_hwi is
1975 HOST_BITS_PER_WIDE_INT in the latter case. */
1976 return count
+ clz_hwi (high
);
1979 /* Return the number of redundant sign bits in X. (That is, the number
1980 of bits immediately below the sign bit that have the same value as
1983 wi::clrsb (const wide_int_ref
&x
)
1985 /* Calculate how many bits there above the highest represented block. */
1986 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1988 unsigned HOST_WIDE_INT high
= x
.uhigh ();
1989 unsigned HOST_WIDE_INT mask
= -1;
1992 /* The upper -COUNT bits of HIGH are not part of the value.
1993 Clear them from both MASK and HIGH. */
1998 /* If the top bit is 1, count the number of leading 1s. If the top
1999 bit is zero, count the number of leading zeros. */
2000 if (high
> mask
/ 2)
2003 /* There are no sign bits below the top block, so we don't need to look
2004 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2006 return count
+ clz_hwi (high
) - 1;
2009 /* Return the number of trailing (lower) zeros in X. */
2011 wi::ctz (const wide_int_ref
&x
)
2013 if (x
.len
== 1 && x
.ulow () == 0)
2016 /* Having dealt with the zero case, there must be a block with a
2017 nonzero bit. We don't care about the bits above the first 1. */
2019 while (x
.val
[i
] == 0)
2021 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2024 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2027 wi::exact_log2 (const wide_int_ref
&x
)
2029 /* Reject cases where there are implicit -1 blocks above HIGH. */
2030 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2033 /* Set CRUX to the index of the entry that should be nonzero.
2034 If the top block is zero then the next lowest block (if any)
2035 must have the high bit set. */
2036 unsigned int crux
= x
.len
- 1;
2037 if (crux
> 0 && x
.val
[crux
] == 0)
2040 /* Check that all lower blocks are zero. */
2041 for (unsigned int i
= 0; i
< crux
; ++i
)
2045 /* Get a zero-extended form of block CRUX. */
2046 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2047 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2048 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2050 /* Now it's down to whether HWI is a power of 2. */
2051 int res
= ::exact_log2 (hwi
);
2053 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2057 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2059 wi::floor_log2 (const wide_int_ref
&x
)
2061 return x
.precision
- 1 - clz (x
);
2064 /* Return the index of the first (lowest) set bit in X, counting from 1.
2065 Return 0 if X is 0. */
2067 wi::ffs (const wide_int_ref
&x
)
2069 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2072 /* Return true if sign-extending X to have precision PRECISION would give
2073 the minimum signed value at that precision. */
2075 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2077 return ctz (x
) + 1 == int (precision
);
2080 /* Return true if X represents the minimum signed value. */
2082 wi::only_sign_bit_p (const wide_int_ref
&x
)
2084 return only_sign_bit_p (x
, x
.precision
);
2088 * Private utilities.
2091 void gt_ggc_mx (widest_int
*) { }
2092 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2093 void gt_pch_nx (widest_int
*) { }
2095 template void wide_int::dump () const;
2096 template void generic_wide_int
<wide_int_ref_storage
<false> >::dump () const;
2097 template void generic_wide_int
<wide_int_ref_storage
<true> >::dump () const;
2098 template void offset_int::dump () const;
2099 template void widest_int::dump () const;