1 /* Operations with very long integers.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
34 #define HOST_BITS_PER_HALF_WIDE_INT 32
35 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
36 # define HOST_HALF_WIDE_INT long
37 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
38 # define HOST_HALF_WIDE_INT int
40 #error Please add support for HOST_HALF_WIDE_INT
43 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
44 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
45 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
46 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
47 typedef unsigned HOST_WIDE_INT UWtype
;
48 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
49 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
50 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
52 typedef unsigned int UDWtype
__attribute__ ((mode (DI
)));
54 typedef unsigned int UDWtype
__attribute__ ((mode (TI
)));
59 static const HOST_WIDE_INT zeros
[WIDE_INT_MAX_ELTS
] = {};
65 /* Quantities to deal with values that hold half of a wide int. Used
66 in multiply and divide. */
67 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
69 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
70 #define BLOCKS_NEEDED(PREC) \
71 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
72 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
74 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
75 based on the top existing bit of VAL. */
77 static unsigned HOST_WIDE_INT
78 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
80 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? (HOST_WIDE_INT
) -1 : 0;
83 /* Convert the integer in VAL to canonical form, returning its new length.
84 LEN is the number of blocks currently in VAL and PRECISION is the number
85 of bits in the integer it represents.
87 This function only changes the representation, not the value. */
89 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
91 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
95 if (len
> blocks_needed
)
102 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
103 val
[len
- 1] = top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
104 if (top
!= 0 && top
!= (HOST_WIDE_INT
)-1)
107 /* At this point we know that the top is either 0 or -1. Find the
108 first block that is not a copy of this. */
109 for (i
= len
- 2; i
>= 0; i
--)
111 HOST_WIDE_INT x
= val
[i
];
114 if (SIGN_MASK (x
) == top
)
117 /* We need an extra block because the top bit block i does
118 not match the extension. */
123 /* The number is 0 or -1. */
128 * Conversion routines in and out of wide_int.
131 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
132 result for an integer with precision PRECISION. Return the length
133 of VAL (after any canonization. */
135 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
136 unsigned int xlen
, unsigned int precision
, bool need_canon
)
138 for (unsigned i
= 0; i
< xlen
; i
++)
140 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
143 /* Construct a wide int from a buffer of length LEN. BUFFER will be
144 read according to byte endianess and word endianess of the target.
145 Only the lower BUFFER_LEN bytes of the result are set; the remaining
146 high bytes are cleared. */
148 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
150 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
151 wide_int result
= wide_int::create (precision
);
152 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
154 /* We have to clear all the bits ourself, as we merely or in values
156 unsigned int len
= BLOCKS_NEEDED (precision
);
157 HOST_WIDE_INT
*val
= result
.write_val ();
158 for (unsigned int i
= 0; i
< len
; ++i
)
161 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
165 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
166 unsigned HOST_WIDE_INT value
;
168 if (buffer_len
> UNITS_PER_WORD
)
170 unsigned int word
= byte
/ UNITS_PER_WORD
;
172 if (WORDS_BIG_ENDIAN
)
173 word
= (words
- 1) - word
;
175 offset
= word
* UNITS_PER_WORD
;
177 if (BYTES_BIG_ENDIAN
)
178 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
180 offset
+= byte
% UNITS_PER_WORD
;
183 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
185 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
187 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
188 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
191 result
.set_len (canonize (val
, len
, precision
));
196 /* Sets RESULT from X, the sign is taken according to SGN. */
198 wi::to_mpz (const wide_int_ref
&x
, mpz_t result
, signop sgn
)
200 int len
= x
.get_len ();
201 const HOST_WIDE_INT
*v
= x
.get_val ();
202 int excess
= len
* HOST_BITS_PER_WIDE_INT
- x
.get_precision ();
204 if (wi::neg_p (x
, sgn
))
206 /* We use ones complement to avoid -x80..0 edge case that -
208 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
209 for (int i
= 0; i
< len
; i
++)
212 t
[len
- 1] = (unsigned HOST_WIDE_INT
) t
[len
- 1] << excess
>> excess
;
213 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
214 mpz_com (result
, result
);
218 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
219 for (int i
= 0; i
< len
- 1; i
++)
221 t
[len
- 1] = (unsigned HOST_WIDE_INT
) v
[len
- 1] << excess
>> excess
;
222 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
225 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
228 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
229 values of VAL will be wrapped; otherwise, they will be set to the
230 appropriate minimum or maximum TYPE bound. */
232 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
235 unsigned int prec
= TYPE_PRECISION (type
);
236 wide_int res
= wide_int::create (prec
);
244 get_type_static_bounds (type
, min
, max
);
246 if (mpz_cmp (x
, min
) < 0)
248 else if (mpz_cmp (x
, max
) > 0)
255 /* Determine the number of unsigned HOST_WIDE_INTs that are required
256 for representing the value. The code to calculate count is
257 extracted from the GMP manual, section "Integer Import and Export":
258 http://gmplib.org/manual/Integer-Import-and-Export.html */
259 numb
= CHAR_BIT
* sizeof (HOST_WIDE_INT
);
260 count
= (mpz_sizeinbase (x
, 2) + numb
- 1) / numb
;
261 HOST_WIDE_INT
*val
= res
.write_val ();
262 /* Write directly to the wide_int storage if possible, otherwise leave
263 GMP to allocate the memory for us. It might be slightly more efficient
264 to use mpz_tdiv_r_2exp for the latter case, but the situation is
265 pathological and it seems safer to operate on the original mpz value
267 void *valres
= mpz_export (count
<= WIDE_INT_MAX_ELTS
? val
: 0,
268 &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
274 count
= MIN (count
, BLOCKS_NEEDED (prec
));
277 memcpy (val
, valres
, count
* sizeof (HOST_WIDE_INT
));
280 res
.set_len (canonize (val
, count
, prec
));
289 * Largest and smallest values in a mode.
292 /* Return the largest SGNed number that is representable in PRECISION bits.
294 TODO: There is still code from the double_int era that trys to
295 make up for the fact that double int's could not represent the
296 min and max values of all types. This code should be removed
297 because the min and max values can always be represented in
298 wide_ints and int-csts. */
300 wi::max_value (unsigned int precision
, signop sgn
)
302 gcc_checking_assert (precision
!= 0);
304 /* The unsigned max is just all ones. */
305 return shwi (-1, precision
);
307 /* The signed max is all ones except the top bit. This must be
308 explicitly represented. */
309 return mask (precision
- 1, false, precision
);
312 /* Return the largest SGNed number that is representable in PRECISION bits. */
314 wi::min_value (unsigned int precision
, signop sgn
)
316 gcc_checking_assert (precision
!= 0);
318 return uhwi (0, precision
);
320 /* The signed min is all zeros except the top bit. This must be
321 explicitly represented. */
322 return wi::set_bit_in_zero (precision
- 1, precision
);
329 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
330 signedness SGN, to an integer that has PRECISION bits. Store the blocks
331 in VAL and return the number of blocks used.
333 This function can handle both extension (PRECISION > XPRECISION)
334 and truncation (PRECISION < XPRECISION). */
336 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
337 unsigned int xlen
, unsigned int xprecision
,
338 unsigned int precision
, signop sgn
)
340 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
341 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
342 for (unsigned i
= 0; i
< len
; i
++)
345 if (precision
> xprecision
)
347 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
352 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
353 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
354 else if (val
[len
- 1] < 0)
356 while (len
< BLOCKS_NEEDED (xprecision
))
358 if (small_xprecision
)
359 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
366 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
367 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
370 len
= canonize (val
, len
, precision
);
375 /* This function hides the fact that we cannot rely on the bits beyond
376 the precision. This issue comes up in the relational comparisions
377 where we do allow comparisons of values of different precisions. */
378 static inline HOST_WIDE_INT
379 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
380 unsigned int blocks_needed
, unsigned int small_prec
,
381 unsigned int index
, signop sgn
)
386 else if (index
< blocks_needed
|| sgn
== SIGNED
)
387 /* Signed or within the precision. */
388 val
= SIGN_MASK (a
[len
- 1]);
390 /* Unsigned extension beyond the precision. */
393 if (small_prec
&& index
== blocks_needed
- 1)
394 return (sgn
== SIGNED
395 ? sext_hwi (val
, small_prec
)
396 : zext_hwi (val
, small_prec
));
401 /* Find the highest bit represented in a wide int. This will in
402 general have the same value as the sign bit. */
403 static inline HOST_WIDE_INT
404 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
406 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
407 unsigned HOST_WIDE_INT val
= a
[len
- 1];
410 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
414 * Comparisons, note that only equality is an operator. The other
415 * comparisons cannot be operators since they are inherently signed or
416 * unsigned and C++ has no such operators.
419 /* Return true if OP0 == OP1. */
421 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
422 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
426 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
428 if (op0len
!= op1len
)
431 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
433 /* It does not matter if we zext or sext here, we just have to
434 do both the same way. */
435 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
441 if (op0
[l0
] != op1
[l0
])
449 /* Return true if OP0 < OP1 using signed comparisons. */
451 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
452 unsigned int precision
,
453 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
455 HOST_WIDE_INT s0
, s1
;
456 unsigned HOST_WIDE_INT u0
, u1
;
457 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
458 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
459 int l
= MAX (op0len
- 1, op1len
- 1);
461 /* Only the top block is compared as signed. The rest are unsigned
463 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
464 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
473 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
474 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
486 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
489 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
490 unsigned int precision
,
491 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
493 HOST_WIDE_INT s0
, s1
;
494 unsigned HOST_WIDE_INT u0
, u1
;
495 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
496 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
497 int l
= MAX (op0len
- 1, op1len
- 1);
499 /* Only the top block is compared as signed. The rest are unsigned
501 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
502 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
511 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
512 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
524 /* Return true if OP0 < OP1 using unsigned comparisons. */
526 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
527 unsigned int precision
,
528 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
530 unsigned HOST_WIDE_INT x0
;
531 unsigned HOST_WIDE_INT x1
;
532 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
533 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
534 int l
= MAX (op0len
- 1, op1len
- 1);
538 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
539 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
550 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
551 unsigned compares. */
553 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
554 unsigned int precision
,
555 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
557 unsigned HOST_WIDE_INT x0
;
558 unsigned HOST_WIDE_INT x1
;
559 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
560 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
561 int l
= MAX (op0len
- 1, op1len
- 1);
565 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
566 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
581 /* Sign-extend the number represented by XVAL and XLEN into VAL,
582 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
583 and VAL have PRECISION bits. */
585 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
586 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
588 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
589 /* Extending beyond the precision is a no-op. If we have only stored
590 OFFSET bits or fewer, the rest are already signs. */
591 if (offset
>= precision
|| len
>= xlen
)
593 for (unsigned i
= 0; i
< xlen
; ++i
)
597 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
598 for (unsigned int i
= 0; i
< len
; i
++)
602 val
[len
] = sext_hwi (xval
[len
], suboffset
);
605 return canonize (val
, len
, precision
);
608 /* Zero-extend the number represented by XVAL and XLEN into VAL,
609 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
610 and VAL have PRECISION bits. */
612 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
613 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
615 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
616 /* Extending beyond the precision is a no-op. If we have only stored
617 OFFSET bits or fewer, and the upper stored bit is zero, then there
619 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
621 for (unsigned i
= 0; i
< xlen
; ++i
)
625 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
626 for (unsigned int i
= 0; i
< len
; i
++)
627 val
[i
] = i
< xlen
? xval
[i
] : -1;
629 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
632 return canonize (val
, len
+ 1, precision
);
636 * Masking, inserting, shifting, rotating.
639 /* Insert WIDTH bits from Y into X starting at START. */
641 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
648 unsigned int precision
= x
.get_precision ();
649 if (start
>= precision
)
652 gcc_checking_assert (precision
>= width
);
654 if (start
+ width
>= precision
)
655 width
= precision
- start
;
657 mask
= wi::shifted_mask (start
, width
, false, precision
);
658 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
661 tmp
= wi::bit_and_not (x
, mask
);
662 result
= result
| tmp
;
667 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
668 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
671 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
672 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
674 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
675 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
677 if (block
+ 1 >= xlen
)
679 /* The operation either affects the last current block or needs
681 unsigned int len
= block
+ 1;
682 for (unsigned int i
= 0; i
< len
; i
++)
683 val
[i
] = safe_uhwi (xval
, xlen
, i
);
684 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
686 /* If the bit we just set is at the msb of the block, make sure
687 that any higher bits are zeros. */
688 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
694 for (unsigned int i
= 0; i
< xlen
; i
++)
696 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
697 return canonize (val
, xlen
, precision
);
703 wide_int_storage::bswap () const
705 wide_int result
= wide_int::create (precision
);
707 unsigned int len
= BLOCKS_NEEDED (precision
);
708 unsigned int xlen
= get_len ();
709 const HOST_WIDE_INT
*xval
= get_val ();
710 HOST_WIDE_INT
*val
= result
.write_val ();
712 /* This is not a well defined operation if the precision is not a
714 gcc_assert ((precision
& 0x7) == 0);
716 for (i
= 0; i
< len
; i
++)
719 /* Only swap the bytes that are not the padding. */
720 for (s
= 0; s
< precision
; s
+= 8)
722 unsigned int d
= precision
- s
- 8;
723 unsigned HOST_WIDE_INT byte
;
725 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
726 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
728 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
730 block
= d
/ HOST_BITS_PER_WIDE_INT
;
731 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
733 val
[block
] |= byte
<< offset
;
736 result
.set_len (canonize (val
, len
, precision
));
740 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
741 above that up to PREC are zeros. The result is inverted if NEGATE
742 is true. Return the number of blocks in VAL. */
744 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
749 val
[0] = negate
? 0 : -1;
754 val
[0] = negate
? -1 : 0;
759 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
760 val
[i
++] = negate
? 0 : -1;
762 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
765 HOST_WIDE_INT last
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
766 val
[i
++] = negate
? ~last
: last
;
769 val
[i
++] = negate
? -1 : 0;
774 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
775 bits are ones, and the bits above that up to PREC are zeros. The result
776 is inverted if NEGATE is true. Return the number of blocks in VAL. */
778 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
779 bool negate
, unsigned int prec
)
781 if (start
>= prec
|| width
== 0)
783 val
[0] = negate
? -1 : 0;
787 if (width
> prec
- start
)
788 width
= prec
- start
;
789 unsigned int end
= start
+ width
;
792 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
793 val
[i
++] = negate
? -1 : 0;
795 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
798 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
800 if (shift
< HOST_BITS_PER_WIDE_INT
)
803 block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - block
- 1;
804 val
[i
++] = negate
? ~block
: block
;
809 val
[i
++] = negate
? block
: ~block
;
812 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
814 val
[i
++] = negate
? 0 : -1;
816 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
820 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
821 val
[i
++] = negate
? ~block
: block
;
824 val
[i
++] = negate
? -1 : 0;
830 * logical operations.
833 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
835 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
836 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
837 unsigned int op1len
, unsigned int prec
)
841 bool need_canon
= true;
843 unsigned int len
= MAX (op0len
, op1len
);
846 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
864 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
880 val
[l0
] = op0
[l0
] & op1
[l0
];
885 len
= canonize (val
, len
, prec
);
890 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
892 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
893 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
894 unsigned int op1len
, unsigned int prec
)
899 bool need_canon
= true;
901 unsigned int len
= MAX (op0len
, op1len
);
904 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
922 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
938 val
[l0
] = op0
[l0
] & ~op1
[l0
];
943 len
= canonize (val
, len
, prec
);
948 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
950 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
951 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
952 unsigned int op1len
, unsigned int prec
)
957 bool need_canon
= true;
959 unsigned int len
= MAX (op0len
, op1len
);
962 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
980 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
996 val
[l0
] = op0
[l0
] | op1
[l0
];
1001 len
= canonize (val
, len
, prec
);
1006 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1008 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1009 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1010 unsigned int op1len
, unsigned int prec
)
1013 int l0
= op0len
- 1;
1014 int l1
= op1len
- 1;
1015 bool need_canon
= true;
1017 unsigned int len
= MAX (op0len
, op1len
);
1020 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1038 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1054 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1059 len
= canonize (val
, len
, prec
);
1064 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1066 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1067 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1068 unsigned int op1len
, unsigned int prec
)
1071 int l0
= op0len
- 1;
1072 int l1
= op1len
- 1;
1074 unsigned int len
= MAX (op0len
, op1len
);
1077 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1080 val
[l0
] = op0
[l0
] ^ op1mask
;
1087 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1090 val
[l1
] = op0mask
^ op1
[l1
];
1097 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1101 return canonize (val
, len
, prec
);
1108 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1109 whether the result overflows when OP0 and OP1 are treated as having
1110 signedness SGN. Return the number of blocks in VAL. */
1112 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1113 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1114 unsigned int op1len
, unsigned int prec
,
1115 signop sgn
, bool *overflow
)
1117 unsigned HOST_WIDE_INT o0
= 0;
1118 unsigned HOST_WIDE_INT o1
= 0;
1119 unsigned HOST_WIDE_INT x
= 0;
1120 unsigned HOST_WIDE_INT carry
= 0;
1121 unsigned HOST_WIDE_INT old_carry
= 0;
1122 unsigned HOST_WIDE_INT mask0
, mask1
;
1125 unsigned int len
= MAX (op0len
, op1len
);
1126 mask0
= -top_bit_of (op0
, op0len
, prec
);
1127 mask1
= -top_bit_of (op1
, op1len
, prec
);
1128 /* Add all of the explicitly defined elements. */
1130 for (i
= 0; i
< len
; i
++)
1132 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1133 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1134 x
= o0
+ o1
+ carry
;
1137 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1140 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1142 val
[len
] = mask0
+ mask1
+ carry
;
1149 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1152 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1153 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1157 /* Put the MSB of X and O0 and in the top of the HWI. */
1161 *overflow
= (x
<= o0
);
1163 *overflow
= (x
< o0
);
1167 return canonize (val
, len
, prec
);
1170 /* Subroutines of the multiplication and division operations. Unpack
1171 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1172 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1173 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1175 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
, const HOST_WIDE_INT
*input
,
1176 unsigned int in_len
, unsigned int out_len
,
1177 unsigned int prec
, signop sgn
)
1181 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1182 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1187 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1188 mask
&= HALF_INT_MASK
;
1193 for (i
= 0; i
< blocks_needed
- 1; i
++)
1195 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1197 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1200 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1204 x
= sext_hwi (x
, small_prec
);
1206 x
= zext_hwi (x
, small_prec
);
1209 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1211 /* Smear the sign bit. */
1216 /* The inverse of wi_unpack. IN_LEN is the the number of input
1217 blocks. The number of output blocks will be half this amount. */
1219 wi_pack (unsigned HOST_WIDE_INT
*result
,
1220 const unsigned HOST_HALF_WIDE_INT
*input
,
1221 unsigned int in_len
)
1226 while (i
+ 2 < in_len
)
1228 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1229 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1]
1230 << HOST_BITS_PER_HALF_WIDE_INT
);
1234 /* Handle the case where in_len is odd. For this we zero extend. */
1236 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
];
1238 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1239 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1] << HOST_BITS_PER_HALF_WIDE_INT
);
1242 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1245 If HIGH is not set, throw away the upper half after the check is
1246 made to see if it overflows. Unfortunately there is no better way
1247 to check for overflow than to do this. If OVERFLOW is nonnull,
1248 record in *OVERFLOW whether the result overflowed. SGN controls
1249 the signedness and is used to check overflow or if HIGH is set. */
1251 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1252 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1253 unsigned int op2len
, unsigned int prec
, signop sgn
,
1254 bool *overflow
, bool high
)
1256 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1259 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1260 unsigned int half_blocks_needed
= blocks_needed
* 2;
1261 /* The sizes here are scaled to support a 2x largest mode by 2x
1262 largest mode yielding a 4x largest mode result. This is what is
1265 unsigned HOST_HALF_WIDE_INT
1266 u
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1267 unsigned HOST_HALF_WIDE_INT
1268 v
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1269 /* The '2' in 'R' is because we are internally doing a full
1271 unsigned HOST_HALF_WIDE_INT
1272 r
[2 * 4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1273 HOST_WIDE_INT mask
= ((HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1275 /* If the top level routine did not really pass in an overflow, then
1276 just make sure that we never attempt to set it. */
1277 bool needs_overflow
= (overflow
!= 0);
1281 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1282 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1284 /* This is a surprisingly common case, so do it first. */
1285 if (op1
== 0 || op2
== 0)
1292 if (sgn
== UNSIGNED
)
1294 /* If the inputs are single HWIs and the output has room for at
1295 least two HWIs, we can use umul_ppmm directly. */
1296 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1297 && wi::fits_uhwi_p (op1
)
1298 && wi::fits_uhwi_p (op2
))
1300 /* This case never overflows. */
1306 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1307 if (val
[1] < 0 && prec
> HOST_BITS_PER_WIDE_INT
* 2)
1312 return 1 + (val
[1] != 0 || val
[0] < 0);
1314 /* Likewise if the output is a full single HWI, except that the
1315 upper HWI of the result is only used for determining overflow.
1316 (We handle this case inline when overflow isn't needed.) */
1317 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1319 unsigned HOST_WIDE_INT upper
;
1320 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1322 *overflow
= (upper
!= 0);
1330 /* Handle multiplications by 1. */
1335 val
[0] = wi::neg_p (op2
, sgn
) ? -1 : 0;
1338 for (i
= 0; i
< op2len
; i
++)
1346 val
[0] = wi::neg_p (op1
, sgn
) ? -1 : 0;
1349 for (i
= 0; i
< op1len
; i
++)
1354 /* If we need to check for overflow, we can only do half wide
1355 multiplies quickly because we need to look at the top bits to
1356 check for the overflow. */
1357 if ((high
|| needs_overflow
)
1358 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1360 unsigned HOST_WIDE_INT r
;
1364 o0
= op1
.to_shwi ();
1365 o1
= op2
.to_shwi ();
1369 o0
= op1
.to_uhwi ();
1370 o1
= op2
.to_uhwi ();
1378 if ((HOST_WIDE_INT
) r
!= sext_hwi (r
, prec
))
1383 if ((r
>> prec
) != 0)
1387 val
[0] = high
? r
>> prec
: r
;
1391 /* We do unsigned mul and then correct it. */
1392 wi_unpack (u
, op1val
, op1len
, half_blocks_needed
, prec
, SIGNED
);
1393 wi_unpack (v
, op2val
, op2len
, half_blocks_needed
, prec
, SIGNED
);
1395 /* The 2 is for a full mult. */
1396 memset (r
, 0, half_blocks_needed
* 2
1397 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1399 for (j
= 0; j
< half_blocks_needed
; j
++)
1402 for (i
= 0; i
< half_blocks_needed
; i
++)
1404 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1406 r
[i
+ j
] = t
& HALF_INT_MASK
;
1407 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1409 r
[j
+ half_blocks_needed
] = k
;
1412 /* We did unsigned math above. For signed we must adjust the
1413 product (assuming we need to see that). */
1414 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1416 unsigned HOST_WIDE_INT b
;
1417 if (wi::neg_p (op1
))
1420 for (i
= 0; i
< half_blocks_needed
; i
++)
1422 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1423 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1424 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1425 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1428 if (wi::neg_p (op2
))
1431 for (i
= 0; i
< half_blocks_needed
; i
++)
1433 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1434 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1435 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1436 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1445 /* For unsigned, overflow is true if any of the top bits are set.
1446 For signed, overflow is true if any of the top bits are not equal
1448 if (sgn
== UNSIGNED
)
1452 top
= r
[(half_blocks_needed
) - 1];
1453 top
= SIGN_MASK (top
<< (HOST_BITS_PER_WIDE_INT
/ 2));
1457 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1458 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1464 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1465 wi_pack ((unsigned HOST_WIDE_INT
*) val
,
1466 &r
[half_blocks_needed
], half_blocks_needed
);
1467 return canonize (val
, blocks_needed
, prec
);
1471 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1472 wi_pack ((unsigned HOST_WIDE_INT
*) val
, r
, half_blocks_needed
);
1473 return canonize (val
, blocks_needed
, prec
);
1477 /* Compute the population count of X. */
1479 wi::popcount (const wide_int_ref
&x
)
1484 /* The high order block is special if it is the last block and the
1485 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1486 have to clear out any ones above the precision before doing
1487 popcount on this block. */
1488 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1489 unsigned int stop
= x
.len
;
1492 count
= popcount_hwi (x
.uhigh () << -count
);
1497 if (x
.sign_mask () >= 0)
1501 for (i
= 0; i
< stop
; ++i
)
1502 count
+= popcount_hwi (x
.val
[i
]);
1507 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1508 whether the result overflows when OP0 and OP1 are treated as having
1509 signedness SGN. Return the number of blocks in VAL. */
1511 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1512 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1513 unsigned int op1len
, unsigned int prec
,
1514 signop sgn
, bool *overflow
)
1516 unsigned HOST_WIDE_INT o0
= 0;
1517 unsigned HOST_WIDE_INT o1
= 0;
1518 unsigned HOST_WIDE_INT x
= 0;
1519 /* We implement subtraction as an in place negate and add. Negation
1520 is just inversion and add 1, so we can do the add of 1 by just
1521 starting the borrow in of the first element at 1. */
1522 unsigned HOST_WIDE_INT borrow
= 0;
1523 unsigned HOST_WIDE_INT old_borrow
= 0;
1525 unsigned HOST_WIDE_INT mask0
, mask1
;
1528 unsigned int len
= MAX (op0len
, op1len
);
1529 mask0
= -top_bit_of (op0
, op0len
, prec
);
1530 mask1
= -top_bit_of (op1
, op1len
, prec
);
1532 /* Subtract all of the explicitly defined elements. */
1533 for (i
= 0; i
< len
; i
++)
1535 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1536 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1537 x
= o0
- o1
- borrow
;
1539 old_borrow
= borrow
;
1540 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1543 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1545 val
[len
] = mask0
- mask1
- borrow
;
1552 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1555 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1556 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1560 /* Put the MSB of X and O0 and in the top of the HWI. */
1564 *overflow
= (x
>= o0
);
1566 *overflow
= (x
> o0
);
1570 return canonize (val
, len
, prec
);
1578 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1579 algorithm is a small modification of the algorithm in Hacker's
1580 Delight by Warren, which itself is a small modification of Knuth's
1581 algorithm. M is the number of significant elements of U however
1582 there needs to be at least one extra element of B_DIVIDEND
1583 allocated, N is the number of elements of B_DIVISOR. */
1585 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1586 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1587 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1588 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1591 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1592 HOST_WIDE_INT and stored in the lower bits of each word. This
1593 algorithm should work properly on both 32 and 64 bit
1595 unsigned HOST_WIDE_INT b
1596 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1597 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1598 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1599 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1603 /* Single digit divisor. */
1607 for (j
= m
- 1; j
>= 0; j
--)
1609 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1610 k
= ((k
* b
+ b_dividend
[j
])
1611 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1612 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1618 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1622 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1623 algorithm, we can overwrite b_dividend and b_divisor, so we do
1625 for (i
= n
- 1; i
> 0; i
--)
1626 b_divisor
[i
] = (b_divisor
[i
] << s
)
1627 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1628 b_divisor
[0] = b_divisor
[0] << s
;
1630 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1631 for (i
= m
- 1; i
> 0; i
--)
1632 b_dividend
[i
] = (b_dividend
[i
] << s
)
1633 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1634 b_dividend
[0] = b_dividend
[0] << s
;
1638 for (j
= m
- n
; j
>= 0; j
--)
1640 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1641 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1643 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1646 rhat
+= b_divisor
[n
-1];
1651 /* Multiply and subtract. */
1653 for (i
= 0; i
< n
; i
++)
1655 p
= qhat
* b_divisor
[i
];
1656 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1657 b_dividend
[i
+ j
] = t
;
1658 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1659 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1661 t
= b_dividend
[j
+n
] - k
;
1662 b_dividend
[j
+n
] = t
;
1664 b_quotient
[j
] = qhat
;
1669 for (i
= 0; i
< n
; i
++)
1671 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1672 b_dividend
[i
+j
] = t
;
1673 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1675 b_dividend
[j
+n
] += k
;
1679 for (i
= 0; i
< n
; i
++)
1680 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1681 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1683 for (i
= 0; i
< n
; i
++)
1684 b_remainder
[i
] = b_dividend
[i
];
1688 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1689 the result. If QUOTIENT is nonnull, store the value of the quotient
1690 there and return the number of blocks in it. The return value is
1691 not defined otherwise. If REMAINDER is nonnull, store the value
1692 of the remainder there and store the number of blocks in
1693 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1694 the division overflowed. */
1696 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1697 HOST_WIDE_INT
*remainder
,
1698 const HOST_WIDE_INT
*dividend_val
,
1699 unsigned int dividend_len
, unsigned int dividend_prec
,
1700 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1701 unsigned int divisor_prec
, signop sgn
,
1704 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1705 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1706 unsigned HOST_HALF_WIDE_INT
1707 b_quotient
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1708 unsigned HOST_HALF_WIDE_INT
1709 b_remainder
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1710 unsigned HOST_HALF_WIDE_INT
1711 b_dividend
[(4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1712 unsigned HOST_HALF_WIDE_INT
1713 b_divisor
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1715 bool dividend_neg
= false;
1716 bool divisor_neg
= false;
1717 bool overflow
= false;
1718 wide_int neg_dividend
, neg_divisor
;
1720 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1722 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1727 /* The smallest signed number / -1 causes overflow. The dividend_len
1728 check is for speed rather than correctness. */
1730 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1732 && wi::only_sign_bit_p (dividend
))
1735 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1736 (signed min / -1) has the same representation as the orignal dividend.
1737 We have traditionally made division by zero act as division by one,
1738 so there too we use the original dividend. */
1749 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1750 quotient
[i
] = dividend_val
[i
];
1751 return dividend_len
;
1757 /* Do it on the host if you can. */
1759 && wi::fits_shwi_p (dividend
)
1760 && wi::fits_shwi_p (divisor
))
1762 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1763 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1765 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1767 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1770 quotient
[0] = HOST_WIDE_INT_MIN
;
1783 quotient
[0] = o0
/ o1
;
1786 remainder
[0] = o0
% o1
;
1794 && wi::fits_uhwi_p (dividend
)
1795 && wi::fits_uhwi_p (divisor
))
1797 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1798 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1801 quotient
[0] = o0
/ o1
;
1804 remainder
[0] = o0
% o1
;
1810 /* Make the divisor and dividend positive and remember what we
1814 if (wi::neg_p (dividend
))
1816 neg_dividend
= -dividend
;
1817 dividend
= neg_dividend
;
1818 dividend_neg
= true;
1820 if (wi::neg_p (divisor
))
1822 neg_divisor
= -divisor
;
1823 divisor
= neg_divisor
;
1828 wi_unpack (b_dividend
, dividend
.get_val (), dividend
.get_len (),
1829 dividend_blocks_needed
, dividend_prec
, sgn
);
1830 wi_unpack (b_divisor
, divisor
.get_val (), divisor
.get_len (),
1831 divisor_blocks_needed
, divisor_prec
, sgn
);
1833 m
= dividend_blocks_needed
;
1835 while (m
> 1 && b_dividend
[m
- 1] == 0)
1838 n
= divisor_blocks_needed
;
1839 while (n
> 1 && b_divisor
[n
- 1] == 0)
1842 memset (b_quotient
, 0, sizeof (b_quotient
));
1844 divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
1846 unsigned int quotient_len
= 0;
1849 wi_pack ((unsigned HOST_WIDE_INT
*) quotient
, b_quotient
, m
);
1850 quotient_len
= canonize (quotient
, (m
+ 1) / 2, dividend_prec
);
1851 /* The quotient is neg if exactly one of the divisor or dividend is
1853 if (dividend_neg
!= divisor_neg
)
1854 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
1855 quotient_len
, dividend_prec
,
1861 wi_pack ((unsigned HOST_WIDE_INT
*) remainder
, b_remainder
, n
);
1862 *remainder_len
= canonize (remainder
, (n
+ 1) / 2, dividend_prec
);
1863 /* The remainder is always the same sign as the dividend. */
1865 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
1866 *remainder_len
, dividend_prec
,
1870 return quotient_len
;
1874 * Shifting, rotating and extraction.
1877 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1878 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1880 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1881 unsigned int xlen
, unsigned int precision
,
1884 /* Split the shift into a whole-block shift and a subblock shift. */
1885 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1886 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1888 /* The whole-block shift fills with zeros. */
1889 unsigned int len
= BLOCKS_NEEDED (precision
);
1890 for (unsigned int i
= 0; i
< skip
; ++i
)
1893 /* It's easier to handle the simple block case specially. */
1894 if (small_shift
== 0)
1895 for (unsigned int i
= skip
; i
< len
; ++i
)
1896 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
1899 /* The first unfilled output block is a left shift of the first
1900 block in XVAL. The other output blocks contain bits from two
1901 consecutive input blocks. */
1902 unsigned HOST_WIDE_INT carry
= 0;
1903 for (unsigned int i
= skip
; i
< len
; ++i
)
1905 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
1906 val
[i
] = (x
<< small_shift
) | carry
;
1907 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1910 return canonize (val
, len
, precision
);
1913 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1914 number of blocks in VAL. The input has XPRECISION bits and the
1915 output has XPRECISION - SHIFT bits. */
1917 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1918 unsigned int xlen
, unsigned int xprecision
,
1921 /* Split the shift into a whole-block shift and a subblock shift. */
1922 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1923 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1925 /* Work out how many blocks are needed to store the significant bits
1926 (excluding the upper zeros or signs). */
1927 unsigned int len
= BLOCKS_NEEDED (xprecision
- shift
);
1929 /* It's easier to handle the simple block case specially. */
1930 if (small_shift
== 0)
1931 for (unsigned int i
= 0; i
< len
; ++i
)
1932 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
1935 /* Each output block but the last is a combination of two input blocks.
1936 The last block is a right shift of the last block in XVAL. */
1937 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
1938 for (unsigned int i
= 0; i
< len
; ++i
)
1940 val
[i
] = curr
>> small_shift
;
1941 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
1942 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1948 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1949 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1950 VAL has PRECISION bits. */
1952 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1953 unsigned int xlen
, unsigned int xprecision
,
1954 unsigned int precision
, unsigned int shift
)
1956 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1958 /* The value we just created has precision XPRECISION - SHIFT.
1959 Zero-extend it to wider precisions. */
1960 if (precision
> xprecision
- shift
)
1962 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1964 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
1965 else if (val
[len
- 1] < 0)
1967 /* Add a new block with a zero. */
1972 return canonize (val
, len
, precision
);
1975 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1976 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1977 VAL has PRECISION bits. */
1979 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1980 unsigned int xlen
, unsigned int xprecision
,
1981 unsigned int precision
, unsigned int shift
)
1983 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1985 /* The value we just created has precision XPRECISION - SHIFT.
1986 Sign-extend it to wider types. */
1987 if (precision
> xprecision
- shift
)
1989 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1991 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
1993 return canonize (val
, len
, precision
);
1996 /* Return the number of leading (upper) zeros in X. */
1998 wi::clz (const wide_int_ref
&x
)
2000 /* Calculate how many bits there above the highest represented block. */
2001 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2003 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2005 /* The upper -COUNT bits of HIGH are not part of the value.
2007 high
= (high
<< -count
) >> -count
;
2008 else if (x
.sign_mask () < 0)
2009 /* The upper bit is set, so there are no leading zeros. */
2012 /* We don't need to look below HIGH. Either HIGH is nonzero,
2013 or the top bit of the block below is nonzero; clz_hwi is
2014 HOST_BITS_PER_WIDE_INT in the latter case. */
2015 return count
+ clz_hwi (high
);
2018 /* Return the number of redundant sign bits in X. (That is, the number
2019 of bits immediately below the sign bit that have the same value as
2022 wi::clrsb (const wide_int_ref
&x
)
2024 /* Calculate how many bits there above the highest represented block. */
2025 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2027 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2028 unsigned HOST_WIDE_INT mask
= -1;
2031 /* The upper -COUNT bits of HIGH are not part of the value.
2032 Clear them from both MASK and HIGH. */
2037 /* If the top bit is 1, count the number of leading 1s. If the top
2038 bit is zero, count the number of leading zeros. */
2039 if (high
> mask
/ 2)
2042 /* There are no sign bits below the top block, so we don't need to look
2043 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2045 return count
+ clz_hwi (high
) - 1;
2048 /* Return the number of trailing (lower) zeros in X. */
2050 wi::ctz (const wide_int_ref
&x
)
2052 if (x
.len
== 1 && x
.ulow () == 0)
2055 /* Having dealt with the zero case, there must be a block with a
2056 nonzero bit. We don't care about the bits above the first 1. */
2058 while (x
.val
[i
] == 0)
2060 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2063 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2066 wi::exact_log2 (const wide_int_ref
&x
)
2068 /* Reject cases where there are implicit -1 blocks above HIGH. */
2069 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2072 /* Set CRUX to the index of the entry that should be nonzero.
2073 If the top block is zero then the next lowest block (if any)
2074 must have the high bit set. */
2075 unsigned int crux
= x
.len
- 1;
2076 if (crux
> 0 && x
.val
[crux
] == 0)
2079 /* Check that all lower blocks are zero. */
2080 for (unsigned int i
= 0; i
< crux
; ++i
)
2084 /* Get a zero-extended form of block CRUX. */
2085 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2086 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2087 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2089 /* Now it's down to whether HWI is a power of 2. */
2090 int res
= ::exact_log2 (hwi
);
2092 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2096 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2098 wi::floor_log2 (const wide_int_ref
&x
)
2100 return x
.precision
- 1 - clz (x
);
2103 /* Return the index of the first (lowest) set bit in X, counting from 1.
2104 Return 0 if X is 0. */
2106 wi::ffs (const wide_int_ref
&x
)
2108 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2111 /* Return true if sign-extending X to have precision PRECISION would give
2112 the minimum signed value at that precision. */
2114 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2116 return ctz (x
) + 1 == int (precision
);
2119 /* Return true if X represents the minimum signed value. */
2121 wi::only_sign_bit_p (const wide_int_ref
&x
)
2123 return only_sign_bit_p (x
, x
.precision
);
2127 * Private utilities.
2130 void gt_ggc_mx (widest_int
*) { }
2131 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2132 void gt_pch_nx (widest_int
*) { }
2134 template void wide_int::dump () const;
2135 template void generic_wide_int
<wide_int_ref_storage
<false> >::dump () const;
2136 template void generic_wide_int
<wide_int_ref_storage
<true> >::dump () const;
2137 template void offset_int::dump () const;
2138 template void widest_int::dump () const;