1 /* Operations with very long integers.
2 Copyright (C) 2012-2013 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
31 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__))
32 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
33 typedef unsigned HOST_WIDE_INT UWtype
;
34 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
35 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
36 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
38 typedef unsigned int UDWtype
__attribute__ ((mode (DI
)));
40 typedef unsigned int UDWtype
__attribute__ ((mode (TI
)));
45 static const HOST_WIDE_INT zeros
[WIDE_INT_MAX_ELTS
] = {};
51 /* Quantities to deal with values that hold half of a wide int. Used
52 in multiply and divide. */
53 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
55 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
56 #define BLOCKS_NEEDED(PREC) \
57 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
58 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
60 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
61 based on the top existing bit of VAL. */
63 static unsigned HOST_WIDE_INT
64 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
66 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? (HOST_WIDE_INT
) -1 : 0;
69 /* Convert the integer in VAL to canonical form, returning its new length.
70 LEN is the number of blocks currently in VAL and PRECISION is the number
71 of bits in the integer it represents.
73 This function only changes the representation, not the value. */
75 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
77 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
81 if (len
> blocks_needed
)
88 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
89 val
[len
- 1] = top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
90 if (top
!= 0 && top
!= (HOST_WIDE_INT
)-1)
93 /* At this point we know that the top is either 0 or -1. Find the
94 first block that is not a copy of this. */
95 for (i
= len
- 2; i
>= 0; i
--)
97 HOST_WIDE_INT x
= val
[i
];
100 if (SIGN_MASK (x
) == top
)
103 /* We need an extra block because the top bit block i does
104 not match the extension. */
109 /* The number is 0 or -1. */
114 * Conversion routines in and out of wide_int.
117 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
118 result for an integer with precision PRECISION. Return the length
119 of VAL (after any canonization. */
121 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
122 unsigned int xlen
, unsigned int precision
, bool need_canon
)
124 for (unsigned i
= 0; i
< xlen
; i
++)
126 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
129 /* Construct a wide int from a buffer of length LEN. BUFFER will be
130 read according to byte endianess and word endianess of the target.
131 Only the lower BUFFER_LEN bytes of the result are set; the remaining
132 high bytes are cleared. */
134 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
136 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
137 wide_int result
= wide_int::create (precision
);
138 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
140 /* We have to clear all the bits ourself, as we merely or in values
142 unsigned int len
= BLOCKS_NEEDED (precision
);
143 HOST_WIDE_INT
*val
= result
.write_val ();
144 for (unsigned int i
= 0; i
< len
; ++i
)
147 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
151 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
152 unsigned HOST_WIDE_INT value
;
154 if (buffer_len
> UNITS_PER_WORD
)
156 unsigned int word
= byte
/ UNITS_PER_WORD
;
158 if (WORDS_BIG_ENDIAN
)
159 word
= (words
- 1) - word
;
161 offset
= word
* UNITS_PER_WORD
;
163 if (BYTES_BIG_ENDIAN
)
164 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
166 offset
+= byte
% UNITS_PER_WORD
;
169 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
171 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
173 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
174 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
177 result
.set_len (canonize (val
, len
, precision
));
182 /* Sets RESULT from X, the sign is taken according to SGN. */
184 wi::to_mpz (const wide_int_ref
&x
, mpz_t result
, signop sgn
)
186 int len
= x
.get_len ();
187 const HOST_WIDE_INT
*v
= x
.get_val ();
188 int excess
= len
* HOST_BITS_PER_WIDE_INT
- x
.get_precision ();
190 if (wi::neg_p (x
, sgn
))
192 /* We use ones complement to avoid -x80..0 edge case that -
194 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
195 for (int i
= 0; i
< len
; i
++)
198 t
[len
- 1] = (unsigned HOST_WIDE_INT
) t
[len
- 1] << excess
>> excess
;
199 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
200 mpz_com (result
, result
);
204 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
205 for (int i
= 0; i
< len
- 1; i
++)
207 t
[len
- 1] = (unsigned HOST_WIDE_INT
) v
[len
- 1] << excess
>> excess
;
208 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
211 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
214 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
215 values of VAL will be wrapped; otherwise, they will be set to the
216 appropriate minimum or maximum TYPE bound. */
218 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
221 int prec
= TYPE_PRECISION (type
);
222 wide_int res
= wide_int::create (prec
);
230 get_type_static_bounds (type
, min
, max
);
232 if (mpz_cmp (x
, min
) < 0)
234 else if (mpz_cmp (x
, max
) > 0)
241 /* Determine the number of unsigned HOST_WIDE_INTs that are required
242 for representing the value. The code to calculate count is
243 extracted from the GMP manual, section "Integer Import and Export":
244 http://gmplib.org/manual/Integer-Import-and-Export.html */
245 numb
= 8 * sizeof(HOST_WIDE_INT
);
246 count
= (mpz_sizeinbase (x
, 2) + numb
- 1) / numb
;
247 HOST_WIDE_INT
*val
= res
.write_val ();
248 mpz_export (val
, &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
263 * Largest and smallest values in a mode.
266 /* Return the largest SGNed number that is representable in PRECISION bits.
268 TODO: There is still code from the double_int era that trys to
269 make up for the fact that double int's could not represent the
270 min and max values of all types. This code should be removed
271 because the min and max values can always be represented in
272 wide_ints and int-csts. */
274 wi::max_value (unsigned int precision
, signop sgn
)
276 gcc_checking_assert (precision
!= 0);
278 /* The unsigned max is just all ones. */
279 return shwi (-1, precision
);
281 /* The signed max is all ones except the top bit. This must be
282 explicitly represented. */
283 return mask (precision
- 1, false, precision
);
286 /* Return the largest SGNed number that is representable in PRECISION bits. */
288 wi::min_value (unsigned int precision
, signop sgn
)
290 gcc_checking_assert (precision
!= 0);
292 return uhwi (0, precision
);
294 /* The signed min is all zeros except the top bit. This must be
295 explicitly represented. */
296 return wi::set_bit_in_zero (precision
- 1, precision
);
303 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
304 signedness SGN, to an integer that has PRECISION bits. Store the blocks
305 in VAL and return the number of blocks used.
307 This function can handle both extension (PRECISION > XPRECISION)
308 and truncation (PRECISION < XPRECISION). */
310 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
311 unsigned int xlen
, unsigned int xprecision
,
312 unsigned int precision
, signop sgn
)
314 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
315 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
316 for (unsigned i
= 0; i
< len
; i
++)
319 if (precision
> xprecision
)
321 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
326 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
327 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
328 else if (val
[len
- 1] < 0)
330 while (len
< BLOCKS_NEEDED (xprecision
))
332 if (small_xprecision
)
333 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
340 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
341 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
344 len
= canonize (val
, len
, precision
);
349 /* This function hides the fact that we cannot rely on the bits beyond
350 the precision. This issue comes up in the relational comparisions
351 where we do allow comparisons of values of different precisions. */
352 static inline HOST_WIDE_INT
353 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
354 unsigned int blocks_needed
, unsigned int small_prec
,
355 unsigned int index
, signop sgn
)
360 else if (index
< blocks_needed
|| sgn
== SIGNED
)
361 /* Signed or within the precision. */
362 val
= SIGN_MASK (a
[len
- 1]);
364 /* Unsigned extension beyond the precision. */
367 if (small_prec
&& index
== blocks_needed
- 1)
368 return (sgn
== SIGNED
369 ? sext_hwi (val
, small_prec
)
370 : zext_hwi (val
, small_prec
));
375 /* Find the highest bit represented in a wide int. This will in
376 general have the same value as the sign bit. */
377 static inline HOST_WIDE_INT
378 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
380 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
381 unsigned HOST_WIDE_INT val
= a
[len
- 1];
384 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
388 * Comparisons, note that only equality is an operator. The other
389 * comparisons cannot be operators since they are inherently signed or
390 * unsigned and C++ has no such operators.
393 /* Return true if OP0 == OP1. */
395 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
396 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
400 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
402 if (op0len
!= op1len
)
405 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
407 /* It does not matter if we zext or sext here, we just have to
408 do both the same way. */
409 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
415 if (op0
[l0
] != op1
[l0
])
423 /* Return true if OP0 < OP1 using signed comparisons. */
425 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
426 unsigned int precision
,
427 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
429 HOST_WIDE_INT s0
, s1
;
430 unsigned HOST_WIDE_INT u0
, u1
;
431 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
432 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
433 int l
= MAX (op0len
- 1, op1len
- 1);
435 /* Only the top block is compared as signed. The rest are unsigned
437 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
438 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
447 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
448 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
460 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
463 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
464 unsigned int precision
,
465 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
467 HOST_WIDE_INT s0
, s1
;
468 unsigned HOST_WIDE_INT u0
, u1
;
469 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
470 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
471 int l
= MAX (op0len
- 1, op1len
- 1);
473 /* Only the top block is compared as signed. The rest are unsigned
475 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
476 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
485 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
486 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
498 /* Return true if OP0 < OP1 using unsigned comparisons. */
500 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
501 unsigned int precision
,
502 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
504 unsigned HOST_WIDE_INT x0
;
505 unsigned HOST_WIDE_INT x1
;
506 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
507 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
508 int l
= MAX (op0len
- 1, op1len
- 1);
512 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
513 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
524 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
525 unsigned compares. */
527 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
528 unsigned int precision
,
529 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
531 unsigned HOST_WIDE_INT x0
;
532 unsigned HOST_WIDE_INT x1
;
533 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
534 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
535 int l
= MAX (op0len
- 1, op1len
- 1);
539 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
540 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
555 /* Sign-extend the number represented by XVAL and XLEN into VAL,
556 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
557 and VAL have PRECISION bits. */
559 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
560 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
562 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
563 /* Extending beyond the precision is a no-op. If we have only stored
564 OFFSET bits or fewer, the rest are already signs. */
565 if (offset
>= precision
|| len
>= xlen
)
567 for (unsigned i
= 0; i
< xlen
; ++i
)
571 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
572 for (unsigned int i
= 0; i
< len
; i
++)
576 val
[len
] = sext_hwi (xval
[len
], suboffset
);
579 return canonize (val
, len
, precision
);
582 /* Zero-extend the number represented by XVAL and XLEN into VAL,
583 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
584 and VAL have PRECISION bits. */
586 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
587 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
589 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
590 /* Extending beyond the precision is a no-op. If we have only stored
591 OFFSET bits or fewer, and the upper stored bit is zero, then there
593 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
595 for (unsigned i
= 0; i
< xlen
; ++i
)
599 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
600 for (unsigned int i
= 0; i
< len
; i
++)
601 val
[i
] = i
< xlen
? xval
[i
] : -1;
603 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
606 return canonize (val
, len
+ 1, precision
);
610 * Masking, inserting, shifting, rotating.
613 /* Insert WIDTH bits from Y into X starting at START. */
615 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
622 unsigned int precision
= x
.get_precision ();
623 if (start
>= precision
)
626 gcc_checking_assert (precision
>= width
);
628 if (start
+ width
>= precision
)
629 width
= precision
- start
;
631 mask
= wi::shifted_mask (start
, width
, false, precision
);
632 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
635 tmp
= wi::bit_and_not (x
, mask
);
636 result
= result
| tmp
;
641 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
642 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
645 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
646 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
648 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
649 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
651 if (block
+ 1 >= xlen
)
653 /* The operation either affects the last current block or needs
655 unsigned int len
= block
+ 1;
656 for (unsigned int i
= 0; i
< len
; i
++)
657 val
[i
] = safe_uhwi (xval
, xlen
, i
);
658 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
660 /* If the bit we just set is at the msb of the block, make sure
661 that any higher bits are zeros. */
662 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
668 for (unsigned int i
= 0; i
< xlen
; i
++)
670 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
671 return canonize (val
, xlen
, precision
);
677 wide_int_storage::bswap () const
679 wide_int result
= wide_int::create (precision
);
681 unsigned int len
= BLOCKS_NEEDED (precision
);
682 unsigned int xlen
= get_len ();
683 const HOST_WIDE_INT
*xval
= get_val ();
684 HOST_WIDE_INT
*val
= result
.write_val ();
686 /* This is not a well defined operation if the precision is not a
688 gcc_assert ((precision
& 0x7) == 0);
690 for (i
= 0; i
< len
; i
++)
693 /* Only swap the bytes that are not the padding. */
694 for (s
= 0; s
< precision
; s
+= 8)
696 unsigned int d
= precision
- s
- 8;
697 unsigned HOST_WIDE_INT byte
;
699 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
700 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
702 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
704 block
= d
/ HOST_BITS_PER_WIDE_INT
;
705 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
707 val
[block
] |= byte
<< offset
;
710 result
.set_len (canonize (val
, len
, precision
));
714 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
715 above that up to PREC are zeros. The result is inverted if NEGATE
716 is true. Return the number of blocks in VAL. */
718 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
723 val
[0] = negate
? 0 : -1;
728 val
[0] = negate
? -1 : 0;
733 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
734 val
[i
++] = negate
? 0 : -1;
736 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
739 HOST_WIDE_INT last
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
740 val
[i
++] = negate
? ~last
: last
;
743 val
[i
++] = negate
? -1 : 0;
748 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
749 bits are ones, and the bits above that up to PREC are zeros. The result
750 is inverted if NEGATE is true. Return the number of blocks in VAL. */
752 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
753 bool negate
, unsigned int prec
)
755 if (start
>= prec
|| width
== 0)
757 val
[0] = negate
? -1 : 0;
761 if (width
> prec
- start
)
762 width
= prec
- start
;
763 unsigned int end
= start
+ width
;
766 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
767 val
[i
++] = negate
? -1 : 0;
769 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
772 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
774 if (shift
< HOST_BITS_PER_WIDE_INT
)
777 block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - block
- 1;
778 val
[i
++] = negate
? ~block
: block
;
783 val
[i
++] = negate
? block
: ~block
;
786 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
788 val
[i
++] = negate
? 0 : -1;
790 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
794 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
795 val
[i
++] = negate
? ~block
: block
;
798 val
[i
++] = negate
? -1 : 0;
804 * logical operations.
807 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
809 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
810 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
811 unsigned int op1len
, unsigned int prec
)
815 bool need_canon
= true;
817 unsigned int len
= MAX (op0len
, op1len
);
820 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
838 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
854 val
[l0
] = op0
[l0
] & op1
[l0
];
859 len
= canonize (val
, len
, prec
);
864 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
866 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
867 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
868 unsigned int op1len
, unsigned int prec
)
873 bool need_canon
= true;
875 unsigned int len
= MAX (op0len
, op1len
);
878 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
896 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
912 val
[l0
] = op0
[l0
] & ~op1
[l0
];
917 len
= canonize (val
, len
, prec
);
922 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
924 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
925 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
926 unsigned int op1len
, unsigned int prec
)
931 bool need_canon
= true;
933 unsigned int len
= MAX (op0len
, op1len
);
936 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
954 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
970 val
[l0
] = op0
[l0
] | op1
[l0
];
975 len
= canonize (val
, len
, prec
);
980 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
982 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
983 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
984 unsigned int op1len
, unsigned int prec
)
989 bool need_canon
= true;
991 unsigned int len
= MAX (op0len
, op1len
);
994 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1012 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1028 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1033 len
= canonize (val
, len
, prec
);
1038 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1040 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1041 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1042 unsigned int op1len
, unsigned int prec
)
1045 int l0
= op0len
- 1;
1046 int l1
= op1len
- 1;
1048 unsigned int len
= MAX (op0len
, op1len
);
1051 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1054 val
[l0
] = op0
[l0
] ^ op1mask
;
1061 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1064 val
[l1
] = op0mask
^ op1
[l1
];
1071 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1075 return canonize (val
, len
, prec
);
1082 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1083 whether the result overflows when OP0 and OP1 are treated as having
1084 signedness SGN. Return the number of blocks in VAL. */
1086 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1087 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1088 unsigned int op1len
, unsigned int prec
,
1089 signop sgn
, bool *overflow
)
1091 unsigned HOST_WIDE_INT o0
= 0;
1092 unsigned HOST_WIDE_INT o1
= 0;
1093 unsigned HOST_WIDE_INT x
= 0;
1094 unsigned HOST_WIDE_INT carry
= 0;
1095 unsigned HOST_WIDE_INT old_carry
= 0;
1096 unsigned HOST_WIDE_INT mask0
, mask1
;
1099 unsigned int len
= MAX (op0len
, op1len
);
1100 mask0
= -top_bit_of (op0
, op0len
, prec
);
1101 mask1
= -top_bit_of (op1
, op1len
, prec
);
1102 /* Add all of the explicitly defined elements. */
1104 for (i
= 0; i
< len
; i
++)
1106 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1107 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1108 x
= o0
+ o1
+ carry
;
1111 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1114 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1116 val
[len
] = mask0
+ mask1
+ carry
;
1123 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1126 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1127 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1131 /* Put the MSB of X and O0 and in the top of the HWI. */
1135 *overflow
= (x
<= o0
);
1137 *overflow
= (x
< o0
);
1141 return canonize (val
, len
, prec
);
1144 /* Subroutines of the multiplication and division operations. Unpack
1145 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1146 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1147 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1149 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
, const HOST_WIDE_INT
*input
,
1150 unsigned int in_len
, unsigned int out_len
,
1151 unsigned int prec
, signop sgn
)
1155 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1156 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1161 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1162 mask
&= HALF_INT_MASK
;
1167 for (i
= 0; i
< blocks_needed
- 1; i
++)
1169 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1171 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1174 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1178 x
= sext_hwi (x
, small_prec
);
1180 x
= zext_hwi (x
, small_prec
);
1183 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1185 /* Smear the sign bit. */
1190 /* The inverse of wi_unpack. IN_LEN is the the number of input
1191 blocks. The number of output blocks will be half this amount. */
1193 wi_pack (unsigned HOST_WIDE_INT
*result
,
1194 const unsigned HOST_HALF_WIDE_INT
*input
,
1195 unsigned int in_len
)
1200 while (i
+ 2 < in_len
)
1202 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1203 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1]
1204 << HOST_BITS_PER_HALF_WIDE_INT
);
1208 /* Handle the case where in_len is odd. For this we zero extend. */
1210 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
];
1212 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1213 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1] << HOST_BITS_PER_HALF_WIDE_INT
);
1216 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1219 If HIGH is not set, throw away the upper half after the check is
1220 made to see if it overflows. Unfortunately there is no better way
1221 to check for overflow than to do this. If OVERFLOW is nonnull,
1222 record in *OVERFLOW whether the result overflowed. SGN controls
1223 the signedness and is used to check overflow or if HIGH is set. */
1225 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1226 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1227 unsigned int op2len
, unsigned int prec
, signop sgn
,
1228 bool *overflow
, bool high
)
1230 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1233 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1234 unsigned int half_blocks_needed
= blocks_needed
* 2;
1235 /* The sizes here are scaled to support a 2x largest mode by 2x
1236 largest mode yielding a 4x largest mode result. This is what is
1239 unsigned HOST_HALF_WIDE_INT
1240 u
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1241 unsigned HOST_HALF_WIDE_INT
1242 v
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1243 /* The '2' in 'R' is because we are internally doing a full
1245 unsigned HOST_HALF_WIDE_INT
1246 r
[2 * 4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1247 HOST_WIDE_INT mask
= ((HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1249 /* If the top level routine did not really pass in an overflow, then
1250 just make sure that we never attempt to set it. */
1251 bool needs_overflow
= (overflow
!= 0);
1255 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1256 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1258 /* This is a surprisingly common case, so do it first. */
1259 if (op1
== 0 || op2
== 0)
1266 if (sgn
== UNSIGNED
)
1268 /* If the inputs are single HWIs and the output has room for at
1269 least two HWIs, we can use umul_ppmm directly. */
1270 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1271 && wi::fits_uhwi_p (op1
)
1272 && wi::fits_uhwi_p (op2
))
1274 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1275 return 1 + (val
[1] != 0 || val
[0] < 0);
1277 /* Likewise if the output is a full single HWI, except that the
1278 upper HWI of the result is only used for determining overflow.
1279 (We handle this case inline when overflow isn't needed.) */
1280 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1282 unsigned HOST_WIDE_INT upper
;
1283 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1285 *overflow
= (upper
!= 0);
1291 /* Handle multiplications by 1. */
1294 for (i
= 0; i
< op2len
; i
++)
1300 for (i
= 0; i
< op1len
; i
++)
1305 /* If we need to check for overflow, we can only do half wide
1306 multiplies quickly because we need to look at the top bits to
1307 check for the overflow. */
1308 if ((high
|| needs_overflow
)
1309 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1311 unsigned HOST_WIDE_INT r
;
1315 o0
= op1
.to_shwi ();
1316 o1
= op2
.to_shwi ();
1320 o0
= op1
.to_uhwi ();
1321 o1
= op2
.to_uhwi ();
1329 if ((HOST_WIDE_INT
) r
!= sext_hwi (r
, prec
))
1334 if ((r
>> prec
) != 0)
1338 val
[0] = high
? r
>> prec
: r
;
1342 /* We do unsigned mul and then correct it. */
1343 wi_unpack (u
, op1val
, op1len
, half_blocks_needed
, prec
, SIGNED
);
1344 wi_unpack (v
, op2val
, op2len
, half_blocks_needed
, prec
, SIGNED
);
1346 /* The 2 is for a full mult. */
1347 memset (r
, 0, half_blocks_needed
* 2
1348 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1350 for (j
= 0; j
< half_blocks_needed
; j
++)
1353 for (i
= 0; i
< half_blocks_needed
; i
++)
1355 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1357 r
[i
+ j
] = t
& HALF_INT_MASK
;
1358 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1360 r
[j
+ half_blocks_needed
] = k
;
1363 /* We did unsigned math above. For signed we must adjust the
1364 product (assuming we need to see that). */
1365 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1367 unsigned HOST_WIDE_INT b
;
1368 if (wi::neg_p (op1
))
1371 for (i
= 0; i
< half_blocks_needed
; i
++)
1373 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1374 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1375 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1376 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1379 if (wi::neg_p (op2
))
1382 for (i
= 0; i
< half_blocks_needed
; i
++)
1384 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1385 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1386 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1387 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1396 /* For unsigned, overflow is true if any of the top bits are set.
1397 For signed, overflow is true if any of the top bits are not equal
1399 if (sgn
== UNSIGNED
)
1403 top
= r
[(half_blocks_needed
) - 1];
1404 top
= SIGN_MASK (top
<< (HOST_BITS_PER_WIDE_INT
/ 2));
1408 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1409 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1415 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1416 wi_pack ((unsigned HOST_WIDE_INT
*) val
,
1417 &r
[half_blocks_needed
], half_blocks_needed
);
1418 return canonize (val
, blocks_needed
, prec
);
1422 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1423 wi_pack ((unsigned HOST_WIDE_INT
*) val
, r
, half_blocks_needed
);
1424 return canonize (val
, blocks_needed
, prec
);
1428 /* Compute the population count of X. */
1430 wi::popcount (const wide_int_ref
&x
)
1435 /* The high order block is special if it is the last block and the
1436 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1437 have to clear out any ones above the precision before doing
1438 popcount on this block. */
1439 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1440 unsigned int stop
= x
.len
;
1443 count
= popcount_hwi (x
.uhigh () << -count
);
1448 if (x
.sign_mask () >= 0)
1452 for (i
= 0; i
< stop
; ++i
)
1453 count
+= popcount_hwi (x
.val
[i
]);
1458 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1459 whether the result overflows when OP0 and OP1 are treated as having
1460 signedness SGN. Return the number of blocks in VAL. */
1462 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1463 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1464 unsigned int op1len
, unsigned int prec
,
1465 signop sgn
, bool *overflow
)
1467 unsigned HOST_WIDE_INT o0
= 0;
1468 unsigned HOST_WIDE_INT o1
= 0;
1469 unsigned HOST_WIDE_INT x
= 0;
1470 /* We implement subtraction as an in place negate and add. Negation
1471 is just inversion and add 1, so we can do the add of 1 by just
1472 starting the borrow in of the first element at 1. */
1473 unsigned HOST_WIDE_INT borrow
= 0;
1474 unsigned HOST_WIDE_INT old_borrow
= 0;
1476 unsigned HOST_WIDE_INT mask0
, mask1
;
1479 unsigned int len
= MAX (op0len
, op1len
);
1480 mask0
= -top_bit_of (op0
, op0len
, prec
);
1481 mask1
= -top_bit_of (op1
, op1len
, prec
);
1483 /* Subtract all of the explicitly defined elements. */
1484 for (i
= 0; i
< len
; i
++)
1486 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1487 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1488 x
= o0
- o1
- borrow
;
1490 old_borrow
= borrow
;
1491 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1494 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1496 val
[len
] = mask0
- mask1
- borrow
;
1503 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1506 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1507 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1511 /* Put the MSB of X and O0 and in the top of the HWI. */
1515 *overflow
= (x
>= o0
);
1517 *overflow
= (x
> o0
);
1521 return canonize (val
, len
, prec
);
1529 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1530 algorithm is a small modification of the algorithm in Hacker's
1531 Delight by Warren, which itself is a small modification of Knuth's
1532 algorithm. M is the number of significant elements of U however
1533 there needs to be at least one extra element of B_DIVIDEND
1534 allocated, N is the number of elements of B_DIVISOR. */
1536 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1537 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1538 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1539 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1542 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1543 HOST_WIDE_INT and stored in the lower bits of each word. This
1544 algorithm should work properly on both 32 and 64 bit
1546 unsigned HOST_WIDE_INT b
1547 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1548 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1549 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1550 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1554 /* Single digit divisor. */
1558 for (j
= m
- 1; j
>= 0; j
--)
1560 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1561 k
= ((k
* b
+ b_dividend
[j
])
1562 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1563 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1569 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1573 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1574 algorithm, we can overwrite b_dividend and b_divisor, so we do
1576 for (i
= n
- 1; i
> 0; i
--)
1577 b_divisor
[i
] = (b_divisor
[i
] << s
)
1578 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1579 b_divisor
[0] = b_divisor
[0] << s
;
1581 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1582 for (i
= m
- 1; i
> 0; i
--)
1583 b_dividend
[i
] = (b_dividend
[i
] << s
)
1584 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1585 b_dividend
[0] = b_dividend
[0] << s
;
1589 for (j
= m
- n
; j
>= 0; j
--)
1591 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1592 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1594 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1597 rhat
+= b_divisor
[n
-1];
1602 /* Multiply and subtract. */
1604 for (i
= 0; i
< n
; i
++)
1606 p
= qhat
* b_divisor
[i
];
1607 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1608 b_dividend
[i
+ j
] = t
;
1609 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1610 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1612 t
= b_dividend
[j
+n
] - k
;
1613 b_dividend
[j
+n
] = t
;
1615 b_quotient
[j
] = qhat
;
1620 for (i
= 0; i
< n
; i
++)
1622 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1623 b_dividend
[i
+j
] = t
;
1624 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1626 b_dividend
[j
+n
] += k
;
1630 for (i
= 0; i
< n
; i
++)
1631 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1632 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1634 for (i
= 0; i
< n
; i
++)
1635 b_remainder
[i
] = b_dividend
[i
];
1639 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1640 the result. If QUOTIENT is nonnull, store the value of the quotient
1641 there and return the number of blocks in it. The return value is
1642 not defined otherwise. If REMAINDER is nonnull, store the value
1643 of the remainder there and store the number of blocks in
1644 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1645 the division overflowed. */
1647 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1648 HOST_WIDE_INT
*remainder
,
1649 const HOST_WIDE_INT
*dividend_val
,
1650 unsigned int dividend_len
, unsigned int dividend_prec
,
1651 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1652 unsigned int divisor_prec
, signop sgn
,
1655 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1656 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1657 unsigned HOST_HALF_WIDE_INT
1658 b_quotient
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1659 unsigned HOST_HALF_WIDE_INT
1660 b_remainder
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1661 unsigned HOST_HALF_WIDE_INT
1662 b_dividend
[(4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1663 unsigned HOST_HALF_WIDE_INT
1664 b_divisor
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1666 bool dividend_neg
= false;
1667 bool divisor_neg
= false;
1668 bool overflow
= false;
1669 wide_int neg_dividend
, neg_divisor
;
1671 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1673 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1678 /* The smallest signed number / -1 causes overflow. The dividend_len
1679 check is for speed rather than correctness. */
1681 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1683 && wi::only_sign_bit_p (dividend
))
1686 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1687 (signed min / -1) has the same representation as the orignal dividend.
1688 We have traditionally made division by zero act as division by one,
1689 so there too we use the original dividend. */
1700 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1701 quotient
[i
] = dividend_val
[i
];
1702 return dividend_len
;
1708 /* Do it on the host if you can. */
1710 && wi::fits_shwi_p (dividend
)
1711 && wi::fits_shwi_p (divisor
))
1713 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1714 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1716 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1718 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1721 quotient
[0] = HOST_WIDE_INT_MIN
;
1734 quotient
[0] = o0
/ o1
;
1737 remainder
[0] = o0
% o1
;
1745 && wi::fits_uhwi_p (dividend
)
1746 && wi::fits_uhwi_p (divisor
))
1748 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1749 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1752 quotient
[0] = o0
/ o1
;
1755 remainder
[0] = o0
% o1
;
1761 /* Make the divisor and dividend positive and remember what we
1765 if (wi::neg_p (dividend
))
1767 neg_dividend
= -dividend
;
1768 dividend
= neg_dividend
;
1769 dividend_neg
= true;
1771 if (wi::neg_p (divisor
))
1773 neg_divisor
= -divisor
;
1774 divisor
= neg_divisor
;
1779 wi_unpack (b_dividend
, dividend
.get_val (), dividend
.get_len (),
1780 dividend_blocks_needed
, dividend_prec
, sgn
);
1781 wi_unpack (b_divisor
, divisor
.get_val (), divisor
.get_len (),
1782 divisor_blocks_needed
, divisor_prec
, sgn
);
1784 m
= dividend_blocks_needed
;
1785 while (m
> 1 && b_dividend
[m
- 1] == 0)
1788 n
= divisor_blocks_needed
;
1789 while (n
> 1 && b_divisor
[n
- 1] == 0)
1792 memset (b_quotient
, 0, sizeof (b_quotient
));
1794 divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
1796 unsigned int quotient_len
= 0;
1799 wi_pack ((unsigned HOST_WIDE_INT
*) quotient
, b_quotient
, m
);
1800 quotient_len
= canonize (quotient
, (m
+ 1) / 2, dividend_prec
);
1801 /* The quotient is neg if exactly one of the divisor or dividend is
1803 if (dividend_neg
!= divisor_neg
)
1804 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
1805 quotient_len
, dividend_prec
,
1811 wi_pack ((unsigned HOST_WIDE_INT
*) remainder
, b_remainder
, n
);
1812 *remainder_len
= canonize (remainder
, (n
+ 1) / 2, dividend_prec
);
1813 /* The remainder is always the same sign as the dividend. */
1815 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
1816 *remainder_len
, dividend_prec
,
1820 return quotient_len
;
1824 * Shifting, rotating and extraction.
1827 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1828 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1830 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1831 unsigned int xlen
, unsigned int precision
,
1834 /* Split the shift into a whole-block shift and a subblock shift. */
1835 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1836 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1838 /* The whole-block shift fills with zeros. */
1839 unsigned int len
= BLOCKS_NEEDED (precision
);
1840 for (unsigned int i
= 0; i
< skip
; ++i
)
1843 /* It's easier to handle the simple block case specially. */
1844 if (small_shift
== 0)
1845 for (unsigned int i
= skip
; i
< len
; ++i
)
1846 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
1849 /* The first unfilled output block is a left shift of the first
1850 block in XVAL. The other output blocks contain bits from two
1851 consecutive input blocks. */
1852 unsigned HOST_WIDE_INT carry
= 0;
1853 for (unsigned int i
= skip
; i
< len
; ++i
)
1855 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
1856 val
[i
] = (x
<< small_shift
) | carry
;
1857 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1860 return canonize (val
, len
, precision
);
1863 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1864 number of blocks in VAL. The input has XPRECISION bits and the
1865 output has XPRECISION - SHIFT bits. */
1867 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1868 unsigned int xlen
, unsigned int xprecision
,
1871 /* Split the shift into a whole-block shift and a subblock shift. */
1872 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1873 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1875 /* Work out how many blocks are needed to store the significant bits
1876 (excluding the upper zeros or signs). */
1877 unsigned int len
= BLOCKS_NEEDED (xprecision
- shift
);
1879 /* It's easier to handle the simple block case specially. */
1880 if (small_shift
== 0)
1881 for (unsigned int i
= 0; i
< len
; ++i
)
1882 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
1885 /* Each output block but the last is a combination of two input blocks.
1886 The last block is a right shift of the last block in XVAL. */
1887 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
1888 for (unsigned int i
= 0; i
< len
; ++i
)
1890 val
[i
] = curr
>> small_shift
;
1891 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
1892 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1898 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1899 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1900 VAL has PRECISION bits. */
1902 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1903 unsigned int xlen
, unsigned int xprecision
,
1904 unsigned int precision
, unsigned int shift
)
1906 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1908 /* The value we just created has precision XPRECISION - SHIFT.
1909 Zero-extend it to wider precisions. */
1910 if (precision
> xprecision
- shift
)
1912 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1914 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
1915 else if (val
[len
- 1] < 0)
1917 /* Add a new block with a zero. */
1922 return canonize (val
, len
, precision
);
1925 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1926 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1927 VAL has PRECISION bits. */
1929 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1930 unsigned int xlen
, unsigned int xprecision
,
1931 unsigned int precision
, unsigned int shift
)
1933 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1935 /* The value we just created has precision XPRECISION - SHIFT.
1936 Sign-extend it to wider types. */
1937 if (precision
> xprecision
- shift
)
1939 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1941 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
1943 return canonize (val
, len
, precision
);
1946 /* Return the number of leading (upper) zeros in X. */
1948 wi::clz (const wide_int_ref
&x
)
1950 /* Calculate how many bits there above the highest represented block. */
1951 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1953 unsigned HOST_WIDE_INT high
= x
.uhigh ();
1955 /* The upper -COUNT bits of HIGH are not part of the value.
1957 high
= (high
<< -count
) >> -count
;
1958 else if (x
.sign_mask () < 0)
1959 /* The upper bit is set, so there are no leading zeros. */
1962 /* We don't need to look below HIGH. Either HIGH is nonzero,
1963 or the top bit of the block below is nonzero; clz_hwi is
1964 HOST_BITS_PER_WIDE_INT in the latter case. */
1965 return count
+ clz_hwi (high
);
1968 /* Return the number of redundant sign bits in X. (That is, the number
1969 of bits immediately below the sign bit that have the same value as
1972 wi::clrsb (const wide_int_ref
&x
)
1974 /* Calculate how many bits there above the highest represented block. */
1975 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1977 unsigned HOST_WIDE_INT high
= x
.uhigh ();
1978 unsigned HOST_WIDE_INT mask
= -1;
1981 /* The upper -COUNT bits of HIGH are not part of the value.
1982 Clear them from both MASK and HIGH. */
1987 /* If the top bit is 1, count the number of leading 1s. If the top
1988 bit is zero, count the number of leading zeros. */
1989 if (high
> mask
/ 2)
1992 /* There are no sign bits below the top block, so we don't need to look
1993 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
1995 return count
+ clz_hwi (high
) - 1;
1998 /* Return the number of trailing (lower) zeros in X. */
2000 wi::ctz (const wide_int_ref
&x
)
2002 if (x
.len
== 1 && x
.ulow () == 0)
2005 /* Having dealt with the zero case, there must be a block with a
2006 nonzero bit. We don't care about the bits above the first 1. */
2008 while (x
.val
[i
] == 0)
2010 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2013 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2016 wi::exact_log2 (const wide_int_ref
&x
)
2018 /* Reject cases where there are implicit -1 blocks above HIGH. */
2019 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2022 /* Set CRUX to the index of the entry that should be nonzero.
2023 If the top block is zero then the next lowest block (if any)
2024 must have the high bit set. */
2025 unsigned int crux
= x
.len
- 1;
2026 if (crux
> 0 && x
.val
[crux
] == 0)
2029 /* Check that all lower blocks are zero. */
2030 for (unsigned int i
= 0; i
< crux
; ++i
)
2034 /* Get a zero-extended form of block CRUX. */
2035 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2036 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2037 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2039 /* Now it's down to whether HWI is a power of 2. */
2040 int res
= ::exact_log2 (hwi
);
2042 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2046 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2048 wi::floor_log2 (const wide_int_ref
&x
)
2050 return x
.precision
- 1 - clz (x
);
2053 /* Return the index of the first (lowest) set bit in X, counting from 1.
2054 Return 0 if X is 0. */
2056 wi::ffs (const wide_int_ref
&x
)
2058 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2061 /* Return true if sign-extending X to have precision PRECISION would give
2062 the minimum signed value at that precision. */
2064 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2066 return ctz (x
) + 1 == int (precision
);
2069 /* Return true if X represents the minimum signed value. */
2071 wi::only_sign_bit_p (const wide_int_ref
&x
)
2073 return only_sign_bit_p (x
, x
.precision
);
2077 * Private utilities.
2080 void gt_ggc_mx (widest_int
*) { }
2081 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2082 void gt_pch_nx (widest_int
*) { }
2084 template void wide_int::dump () const;
2085 template void generic_wide_int
<wide_int_ref_storage
<false> >::dump () const;
2086 template void generic_wide_int
<wide_int_ref_storage
<true> >::dump () const;
2087 template void offset_int::dump () const;
2088 template void widest_int::dump () const;