1 /* Operations with very long integers.
2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #define HOST_BITS_PER_HALF_WIDE_INT 32
30 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
31 # define HOST_HALF_WIDE_INT long
32 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
33 # define HOST_HALF_WIDE_INT int
35 #error Please add support for HOST_HALF_WIDE_INT
38 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
39 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
40 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
41 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
42 typedef unsigned HOST_WIDE_INT UWtype
;
43 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
44 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
45 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
47 typedef unsigned int UDWtype
__attribute__ ((mode (DI
)));
49 typedef unsigned int UDWtype
__attribute__ ((mode (TI
)));
54 static const HOST_WIDE_INT zeros
[1] = {};
60 /* Quantities to deal with values that hold half of a wide int. Used
61 in multiply and divide. */
62 #define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
64 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
65 #define BLOCKS_NEEDED(PREC) (PREC ? CEIL (PREC, HOST_BITS_PER_WIDE_INT) : 1)
66 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
68 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
69 based on the top existing bit of VAL. */
71 static unsigned HOST_WIDE_INT
72 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
74 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? HOST_WIDE_INT_M1
: 0;
77 /* Convert the integer in VAL to canonical form, returning its new length.
78 LEN is the number of blocks currently in VAL and PRECISION is the number
79 of bits in the integer it represents.
81 This function only changes the representation, not the value. */
83 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
85 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
89 if (len
> blocks_needed
)
96 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
97 val
[len
- 1] = top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
98 if (top
!= 0 && top
!= HOST_WIDE_INT_M1
)
101 /* At this point we know that the top is either 0 or -1. Find the
102 first block that is not a copy of this. */
103 for (i
= len
- 2; i
>= 0; i
--)
105 HOST_WIDE_INT x
= val
[i
];
108 if (SIGN_MASK (x
) == top
)
111 /* We need an extra block because the top bit block i does
112 not match the extension. */
117 /* The number is 0 or -1. */
121 /* VAL[0] is the unsigned result of an operation. Canonize it by adding
122 another 0 block if needed, and return number of blocks needed. */
124 static inline unsigned int
125 canonize_uhwi (HOST_WIDE_INT
*val
, unsigned int precision
)
127 if (val
[0] < 0 && precision
> HOST_BITS_PER_WIDE_INT
)
136 * Conversion routines in and out of wide_int.
139 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
140 result for an integer with precision PRECISION. Return the length
141 of VAL (after any canonization). */
143 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
144 unsigned int xlen
, unsigned int precision
, bool need_canon
)
146 for (unsigned i
= 0; i
< xlen
; i
++)
148 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
151 /* Construct a wide int from a buffer of length LEN. BUFFER will be
152 read according to byte endianness and word endianness of the target.
153 Only the lower BUFFER_LEN bytes of the result are set; the remaining
154 high bytes are cleared. */
156 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
158 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
159 wide_int result
= wide_int::create (precision
);
160 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
162 /* We have to clear all the bits ourself, as we merely or in values
164 unsigned int len
= BLOCKS_NEEDED (precision
);
165 HOST_WIDE_INT
*val
= result
.write_val (0);
166 for (unsigned int i
= 0; i
< len
; ++i
)
169 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
173 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
174 unsigned HOST_WIDE_INT value
;
176 if (buffer_len
> UNITS_PER_WORD
)
178 unsigned int word
= byte
/ UNITS_PER_WORD
;
180 if (WORDS_BIG_ENDIAN
)
181 word
= (words
- 1) - word
;
183 offset
= word
* UNITS_PER_WORD
;
185 if (BYTES_BIG_ENDIAN
)
186 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
188 offset
+= byte
% UNITS_PER_WORD
;
191 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
193 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
195 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
196 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
199 result
.set_len (canonize (val
, len
, precision
));
204 /* Sets RESULT from X, the sign is taken according to SGN. */
206 wi::to_mpz (const wide_int_ref
&x
, mpz_t result
, signop sgn
)
208 int len
= x
.get_len ();
209 const HOST_WIDE_INT
*v
= x
.get_val ();
210 int excess
= len
* HOST_BITS_PER_WIDE_INT
- x
.get_precision ();
212 if (wi::neg_p (x
, sgn
))
214 /* We use ones complement to avoid -x80..0 edge case that -
216 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
217 for (int i
= 0; i
< len
; i
++)
220 t
[len
- 1] = (unsigned HOST_WIDE_INT
) t
[len
- 1] << excess
>> excess
;
221 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
222 mpz_com (result
, result
);
226 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
227 for (int i
= 0; i
< len
- 1; i
++)
229 t
[len
- 1] = (unsigned HOST_WIDE_INT
) v
[len
- 1] << excess
>> excess
;
230 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
232 else if (excess
< 0 && wi::neg_p (x
))
234 int extra
= CEIL (-excess
, HOST_BITS_PER_WIDE_INT
);
235 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
+ extra
);
236 for (int i
= 0; i
< len
; i
++)
238 for (int i
= 0; i
< extra
; i
++)
240 excess
= (-excess
) % HOST_BITS_PER_WIDE_INT
;
242 t
[len
+ extra
- 1] = (HOST_WIDE_INT_1U
<< excess
) - 1;
243 mpz_import (result
, len
+ extra
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
246 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
249 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
250 values of VAL will be wrapped; otherwise, they will be set to the
251 appropriate minimum or maximum TYPE bound. */
253 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
256 unsigned int prec
= TYPE_PRECISION (type
);
257 wide_int res
= wide_int::create (prec
);
265 get_type_static_bounds (type
, min
, max
);
267 if (mpz_cmp (x
, min
) < 0)
269 else if (mpz_cmp (x
, max
) > 0)
276 /* Determine the number of unsigned HOST_WIDE_INTs that are required
277 for representing the absolute value. The code to calculate count is
278 extracted from the GMP manual, section "Integer Import and Export":
279 http://gmplib.org/manual/Integer-Import-and-Export.html */
280 numb
= CHAR_BIT
* sizeof (HOST_WIDE_INT
);
281 count
= CEIL (mpz_sizeinbase (x
, 2), numb
);
282 HOST_WIDE_INT
*val
= res
.write_val (0);
283 /* Read the absolute value.
285 Write directly to the wide_int storage if possible, otherwise leave
286 GMP to allocate the memory for us. It might be slightly more efficient
287 to use mpz_tdiv_r_2exp for the latter case, but the situation is
288 pathological and it seems safer to operate on the original mpz value
290 void *valres
= mpz_export (count
<= WIDE_INT_MAX_INL_ELTS
? val
: 0,
291 &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
297 count
= MIN (count
, BLOCKS_NEEDED (prec
));
300 memcpy (val
, valres
, count
* sizeof (HOST_WIDE_INT
));
303 /* Zero-extend the absolute value to PREC bits. */
304 if (count
< BLOCKS_NEEDED (prec
) && val
[count
- 1] < 0)
307 count
= canonize (val
, count
, prec
);
317 * Largest and smallest values in a mode.
320 /* Return the largest SGNed number that is representable in PRECISION bits.
322 TODO: There is still code from the double_int era that trys to
323 make up for the fact that double int's could not represent the
324 min and max values of all types. This code should be removed
325 because the min and max values can always be represented in
326 wide_ints and int-csts. */
328 wi::max_value (unsigned int precision
, signop sgn
)
330 gcc_checking_assert (precision
!= 0);
332 /* The unsigned max is just all ones. */
333 return shwi (-1, precision
);
335 /* The signed max is all ones except the top bit. This must be
336 explicitly represented. */
337 return mask (precision
- 1, false, precision
);
340 /* Return the largest SGNed number that is representable in PRECISION bits. */
342 wi::min_value (unsigned int precision
, signop sgn
)
344 gcc_checking_assert (precision
!= 0);
346 return uhwi (0, precision
);
348 /* The signed min is all zeros except the top bit. This must be
349 explicitly represented. */
350 return wi::set_bit_in_zero (precision
- 1, precision
);
357 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
358 signedness SGN, to an integer that has PRECISION bits. Store the blocks
359 in VAL and return the number of blocks used.
361 This function can handle both extension (PRECISION > XPRECISION)
362 and truncation (PRECISION < XPRECISION). */
364 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
365 unsigned int xlen
, unsigned int xprecision
,
366 unsigned int precision
, signop sgn
)
368 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
369 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
370 for (unsigned i
= 0; i
< len
; i
++)
373 if (precision
> xprecision
)
375 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
380 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
381 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
382 else if (val
[len
- 1] < 0)
384 while (len
< BLOCKS_NEEDED (xprecision
))
386 if (small_xprecision
)
387 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
394 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
395 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
398 len
= canonize (val
, len
, precision
);
403 /* This function hides the fact that we cannot rely on the bits beyond
404 the precision. This issue comes up in the relational comparisions
405 where we do allow comparisons of values of different precisions. */
406 static inline HOST_WIDE_INT
407 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
408 unsigned int blocks_needed
, unsigned int small_prec
,
409 unsigned int index
, signop sgn
)
414 else if (index
< blocks_needed
|| sgn
== SIGNED
)
415 /* Signed or within the precision. */
416 val
= SIGN_MASK (a
[len
- 1]);
418 /* Unsigned extension beyond the precision. */
421 if (small_prec
&& index
== blocks_needed
- 1)
422 return (sgn
== SIGNED
423 ? sext_hwi (val
, small_prec
)
424 : zext_hwi (val
, small_prec
));
429 /* Find the highest bit represented in a wide int. This will in
430 general have the same value as the sign bit. */
431 static inline HOST_WIDE_INT
432 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
434 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
435 unsigned HOST_WIDE_INT val
= a
[len
- 1];
438 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
442 * Comparisons, note that only equality is an operator. The other
443 * comparisons cannot be operators since they are inherently signed or
444 * unsigned and C++ has no such operators.
447 /* Return true if OP0 == OP1. */
449 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
450 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
454 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
456 if (op0len
!= op1len
)
459 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
461 /* It does not matter if we zext or sext here, we just have to
462 do both the same way. */
463 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
469 if (op0
[l0
] != op1
[l0
])
477 /* Return true if OP0 < OP1 using signed comparisons. */
479 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
480 unsigned int precision
,
481 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
483 HOST_WIDE_INT s0
, s1
;
484 unsigned HOST_WIDE_INT u0
, u1
;
485 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
486 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
487 int l
= MAX (op0len
- 1, op1len
- 1);
489 /* Only the top block is compared as signed. The rest are unsigned
491 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
492 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
501 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
502 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
514 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
517 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
518 unsigned int precision
,
519 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
521 HOST_WIDE_INT s0
, s1
;
522 unsigned HOST_WIDE_INT u0
, u1
;
523 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
524 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
525 int l
= MAX (op0len
- 1, op1len
- 1);
527 /* Only the top block is compared as signed. The rest are unsigned
529 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
530 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
539 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
540 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
552 /* Return true if OP0 < OP1 using unsigned comparisons. */
554 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
555 unsigned int precision
,
556 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
558 unsigned HOST_WIDE_INT x0
;
559 unsigned HOST_WIDE_INT x1
;
560 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
561 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
562 int l
= MAX (op0len
- 1, op1len
- 1);
566 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
567 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
578 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
579 unsigned compares. */
581 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
582 unsigned int precision
,
583 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
585 unsigned HOST_WIDE_INT x0
;
586 unsigned HOST_WIDE_INT x1
;
587 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
588 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
589 int l
= MAX (op0len
- 1, op1len
- 1);
593 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
594 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
609 /* Sign-extend the number represented by XVAL and XLEN into VAL,
610 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
611 and VAL have PRECISION bits. */
613 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
614 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
616 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
617 /* Extending beyond the precision is a no-op. If we have only stored
618 OFFSET bits or fewer, the rest are already signs. */
619 if (offset
>= precision
|| len
>= xlen
)
621 for (unsigned i
= 0; i
< xlen
; ++i
)
625 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
626 for (unsigned int i
= 0; i
< len
; i
++)
630 val
[len
] = sext_hwi (xval
[len
], suboffset
);
633 return canonize (val
, len
, precision
);
636 /* Zero-extend the number represented by XVAL and XLEN into VAL,
637 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
638 and VAL have PRECISION bits. */
640 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
641 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
643 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
644 /* Extending beyond the precision is a no-op. If we have only stored
645 OFFSET bits or fewer, and the upper stored bit is zero, then there
647 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
649 for (unsigned i
= 0; i
< xlen
; ++i
)
653 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
654 for (unsigned int i
= 0; i
< len
; i
++)
655 val
[i
] = i
< xlen
? xval
[i
] : -1;
657 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
660 return canonize (val
, len
+ 1, precision
);
664 * Masking, inserting, shifting, rotating.
667 /* Insert WIDTH bits from Y into X starting at START. */
669 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
676 unsigned int precision
= x
.get_precision ();
677 if (start
>= precision
)
680 gcc_checking_assert (precision
>= width
);
682 if (start
+ width
>= precision
)
683 width
= precision
- start
;
685 mask
= wi::shifted_mask (start
, width
, false, precision
);
686 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
689 tmp
= wi::bit_and_not (x
, mask
);
690 result
= result
| tmp
;
695 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
696 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
699 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
700 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
702 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
703 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
705 if (block
+ 1 >= xlen
)
707 /* The operation either affects the last current block or needs
709 unsigned int len
= block
+ 1;
710 for (unsigned int i
= 0; i
< len
; i
++)
711 val
[i
] = safe_uhwi (xval
, xlen
, i
);
712 val
[block
] |= HOST_WIDE_INT_1U
<< subbit
;
714 /* If the bit we just set is at the msb of the block, make sure
715 that any higher bits are zeros. */
716 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
721 return canonize (val
, len
, precision
);
725 for (unsigned int i
= 0; i
< xlen
; i
++)
727 val
[block
] |= HOST_WIDE_INT_1U
<< subbit
;
728 return canonize (val
, xlen
, precision
);
732 /* Byte swap the integer represented by XVAL and XLEN into VAL. Return
733 the number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
735 wi::bswap_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
736 unsigned int xlen
, unsigned int precision
)
738 unsigned int s
, len
= BLOCKS_NEEDED (precision
);
740 /* This is not a well defined operation if the precision is not a
742 gcc_assert ((precision
& 0x7) == 0);
744 memset (val
, 0, sizeof (unsigned HOST_WIDE_INT
) * len
);
746 /* Only swap the bytes that are not the padding. */
747 for (s
= 0; s
< precision
; s
+= 8)
749 unsigned int d
= precision
- s
- 8;
750 unsigned HOST_WIDE_INT byte
;
752 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
753 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
755 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
757 block
= d
/ HOST_BITS_PER_WIDE_INT
;
758 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
760 val
[block
] |= byte
<< offset
;
763 return canonize (val
, len
, precision
);
766 /* Bitreverse the integer represented by XVAL and LEN into VAL. Return
767 the number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
769 wi::bitreverse_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
770 unsigned int len
, unsigned int precision
)
774 for (i
= 0; i
< len
; i
++)
777 for (s
= 0; s
< precision
; s
++)
779 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
780 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
781 if (((safe_uhwi (xval
, len
, block
) >> offset
) & 1) != 0)
783 unsigned int d
= (precision
- 1) - s
;
784 block
= d
/ HOST_BITS_PER_WIDE_INT
;
785 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
786 val
[block
] |= HOST_WIDE_INT_1U
<< offset
;
790 return canonize (val
, len
, precision
);
793 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
794 above that up to PREC are zeros. The result is inverted if NEGATE
795 is true. Return the number of blocks in VAL. */
797 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
802 val
[0] = negate
? 0 : -1;
807 val
[0] = negate
? -1 : 0;
812 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
813 val
[i
++] = negate
? 0 : -1;
815 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
818 HOST_WIDE_INT last
= (HOST_WIDE_INT_1U
<< shift
) - 1;
819 val
[i
++] = negate
? ~last
: last
;
822 val
[i
++] = negate
? -1 : 0;
827 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
828 bits are ones, and the bits above that up to PREC are zeros. The result
829 is inverted if NEGATE is true. Return the number of blocks in VAL. */
831 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
832 bool negate
, unsigned int prec
)
834 if (start
>= prec
|| width
== 0)
836 val
[0] = negate
? -1 : 0;
840 if (width
> prec
- start
)
841 width
= prec
- start
;
842 unsigned int end
= start
+ width
;
845 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
846 val
[i
++] = negate
? -1 : 0;
848 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
851 HOST_WIDE_INT block
= (HOST_WIDE_INT_1U
<< shift
) - 1;
853 if (shift
< HOST_BITS_PER_WIDE_INT
)
856 block
= (HOST_WIDE_INT_1U
<< shift
) - block
- 1;
857 val
[i
++] = negate
? ~block
: block
;
862 val
[i
++] = negate
? block
: ~block
;
868 val
[i
++] = negate
? 0 : -1;
872 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
874 val
[i
++] = negate
? 0 : -1;
876 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
880 HOST_WIDE_INT block
= (HOST_WIDE_INT_1U
<< shift
) - 1;
881 val
[i
++] = negate
? ~block
: block
;
884 val
[i
++] = negate
? -1 : 0;
890 * logical operations.
893 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
895 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
896 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
897 unsigned int op1len
, unsigned int prec
)
901 bool need_canon
= true;
903 unsigned int len
= MAX (op0len
, op1len
);
906 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
924 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
940 val
[l0
] = op0
[l0
] & op1
[l0
];
945 len
= canonize (val
, len
, prec
);
950 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
952 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
953 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
954 unsigned int op1len
, unsigned int prec
)
959 bool need_canon
= true;
961 unsigned int len
= MAX (op0len
, op1len
);
964 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
982 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
998 val
[l0
] = op0
[l0
] & ~op1
[l0
];
1003 len
= canonize (val
, len
, prec
);
1008 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
1010 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1011 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1012 unsigned int op1len
, unsigned int prec
)
1015 int l0
= op0len
- 1;
1016 int l1
= op1len
- 1;
1017 bool need_canon
= true;
1019 unsigned int len
= MAX (op0len
, op1len
);
1022 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1040 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1056 val
[l0
] = op0
[l0
] | op1
[l0
];
1061 len
= canonize (val
, len
, prec
);
1066 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1068 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1069 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1070 unsigned int op1len
, unsigned int prec
)
1073 int l0
= op0len
- 1;
1074 int l1
= op1len
- 1;
1075 bool need_canon
= true;
1077 unsigned int len
= MAX (op0len
, op1len
);
1080 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1098 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1114 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1119 len
= canonize (val
, len
, prec
);
1124 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1126 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1127 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1128 unsigned int op1len
, unsigned int prec
)
1131 int l0
= op0len
- 1;
1132 int l1
= op1len
- 1;
1134 unsigned int len
= MAX (op0len
, op1len
);
1137 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1140 val
[l0
] = op0
[l0
] ^ op1mask
;
1147 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1150 val
[l1
] = op0mask
^ op1
[l1
];
1157 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1161 return canonize (val
, len
, prec
);
1168 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1169 whether the result overflows when OP0 and OP1 are treated as having
1170 signedness SGN. Return the number of blocks in VAL. */
1172 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1173 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1174 unsigned int op1len
, unsigned int prec
,
1175 signop sgn
, wi::overflow_type
*overflow
)
1177 unsigned HOST_WIDE_INT o0
= 0;
1178 unsigned HOST_WIDE_INT o1
= 0;
1179 unsigned HOST_WIDE_INT x
= 0;
1180 unsigned HOST_WIDE_INT carry
= 0;
1181 unsigned HOST_WIDE_INT old_carry
= 0;
1182 unsigned HOST_WIDE_INT mask0
, mask1
;
1185 unsigned int len
= MAX (op0len
, op1len
);
1186 mask0
= -top_bit_of (op0
, op0len
, prec
);
1187 mask1
= -top_bit_of (op1
, op1len
, prec
);
1188 /* Add all of the explicitly defined elements. */
1190 for (i
= 0; i
< len
; i
++)
1192 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1193 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1194 x
= o0
+ o1
+ carry
;
1197 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1200 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1202 val
[len
] = mask0
+ mask1
+ carry
;
1206 = (sgn
== UNSIGNED
&& carry
) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1210 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1213 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1214 if ((HOST_WIDE_INT
) (x
<< shift
) < 0)
1216 if (o0
> (unsigned HOST_WIDE_INT
) val
[len
- 1])
1217 *overflow
= wi::OVF_UNDERFLOW
;
1218 else if (o0
< (unsigned HOST_WIDE_INT
) val
[len
- 1])
1219 *overflow
= wi::OVF_OVERFLOW
;
1221 *overflow
= wi::OVF_NONE
;
1224 *overflow
= wi::OVF_NONE
;
1228 /* Put the MSB of X and O0 and in the top of the HWI. */
1232 *overflow
= (x
<= o0
) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1234 *overflow
= (x
< o0
) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1238 return canonize (val
, len
, prec
);
1241 /* Subroutines of the multiplication and division operations. Unpack
1242 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1243 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1244 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1246 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
, const HOST_WIDE_INT
*input
,
1247 unsigned int in_len
, unsigned int out_len
,
1248 unsigned int prec
, signop sgn
)
1252 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1253 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1258 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1259 mask
&= HALF_INT_MASK
;
1264 for (i
= 0; i
< blocks_needed
- 1; i
++)
1266 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1268 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1271 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1275 x
= sext_hwi (x
, small_prec
);
1277 x
= zext_hwi (x
, small_prec
);
1280 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1282 /* Smear the sign bit. */
1287 /* The inverse of wi_unpack. IN_LEN is the number of input
1288 blocks and PRECISION is the precision of the result. Return the
1289 number of blocks in the canonicalized result. */
1291 wi_pack (HOST_WIDE_INT
*result
,
1292 const unsigned HOST_HALF_WIDE_INT
*input
,
1293 unsigned int in_len
, unsigned int precision
)
1297 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
1299 while (i
+ 1 < in_len
)
1301 result
[j
++] = ((unsigned HOST_WIDE_INT
) input
[i
]
1302 | ((unsigned HOST_WIDE_INT
) input
[i
+ 1]
1303 << HOST_BITS_PER_HALF_WIDE_INT
));
1307 /* Handle the case where in_len is odd. For this we zero extend. */
1309 result
[j
++] = (unsigned HOST_WIDE_INT
) input
[i
];
1310 else if (j
< blocks_needed
)
1312 return canonize (result
, j
, precision
);
1315 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1318 If HIGH is not set, throw away the upper half after the check is
1319 made to see if it overflows. Unfortunately there is no better way
1320 to check for overflow than to do this. If OVERFLOW is nonnull,
1321 record in *OVERFLOW whether the result overflowed. SGN controls
1322 the signedness and is used to check overflow or if HIGH is set.
1324 NOTE: Overflow type for signed overflow is not yet implemented. */
1326 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1327 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1328 unsigned int op2len
, unsigned int prec
, signop sgn
,
1329 wi::overflow_type
*overflow
, bool high
)
1331 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1335 /* If the top level routine did not really pass in an overflow, then
1336 just make sure that we never attempt to set it. */
1337 bool needs_overflow
= (overflow
!= 0);
1339 *overflow
= wi::OVF_NONE
;
1341 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1342 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1344 /* This is a surprisingly common case, so do it first. */
1345 if (op1
== 0 || op2
== 0)
1352 if (sgn
== UNSIGNED
)
1354 /* If the inputs are single HWIs and the output has room for at
1355 least two HWIs, we can use umul_ppmm directly. */
1356 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1357 && wi::fits_uhwi_p (op1
)
1358 && wi::fits_uhwi_p (op2
))
1360 /* This case never overflows. */
1366 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1367 if (val
[1] < 0 && prec
> HOST_BITS_PER_WIDE_INT
* 2)
1372 return 1 + (val
[1] != 0 || val
[0] < 0);
1374 /* Likewise if the output is a full single HWI, except that the
1375 upper HWI of the result is only used for determining overflow.
1376 (We handle this case inline when overflow isn't needed.) */
1377 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1379 unsigned HOST_WIDE_INT upper
;
1380 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1382 /* Unsigned overflow can only be +OVERFLOW. */
1383 *overflow
= (upper
!= 0) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1391 /* Handle multiplications by 1. */
1396 val
[0] = wi::neg_p (op2
, sgn
) ? -1 : 0;
1399 for (i
= 0; i
< op2len
; i
++)
1407 val
[0] = wi::neg_p (op1
, sgn
) ? -1 : 0;
1410 for (i
= 0; i
< op1len
; i
++)
1415 /* If we need to check for overflow, we can only do half wide
1416 multiplies quickly because we need to look at the top bits to
1417 check for the overflow. */
1418 if ((high
|| needs_overflow
)
1419 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1421 unsigned HOST_WIDE_INT r
;
1425 o0
= op1
.to_shwi ();
1426 o1
= op2
.to_shwi ();
1430 o0
= op1
.to_uhwi ();
1431 o1
= op2
.to_uhwi ();
1439 if ((HOST_WIDE_INT
) r
!= sext_hwi (r
, prec
))
1440 /* FIXME: Signed overflow type is not implemented yet. */
1441 *overflow
= OVF_UNKNOWN
;
1445 if ((r
>> prec
) != 0)
1446 /* Unsigned overflow can only be +OVERFLOW. */
1447 *overflow
= OVF_OVERFLOW
;
1450 val
[0] = high
? r
>> prec
: r
;
1454 /* The sizes here are scaled to support a 2x WIDE_INT_MAX_INL_PRECISION by 2x
1455 WIDE_INT_MAX_INL_PRECISION yielding a 4x WIDE_INT_MAX_INL_PRECISION
1458 unsigned HOST_HALF_WIDE_INT
1459 ubuf
[4 * WIDE_INT_MAX_INL_PRECISION
/ HOST_BITS_PER_HALF_WIDE_INT
];
1460 unsigned HOST_HALF_WIDE_INT
1461 vbuf
[4 * WIDE_INT_MAX_INL_PRECISION
/ HOST_BITS_PER_HALF_WIDE_INT
];
1462 /* The '2' in 'R' is because we are internally doing a full
1464 unsigned HOST_HALF_WIDE_INT
1465 rbuf
[2 * 4 * WIDE_INT_MAX_INL_PRECISION
/ HOST_BITS_PER_HALF_WIDE_INT
];
1466 const HOST_WIDE_INT mask
1467 = (HOST_WIDE_INT_1
<< HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1468 unsigned HOST_HALF_WIDE_INT
*u
= ubuf
;
1469 unsigned HOST_HALF_WIDE_INT
*v
= vbuf
;
1470 unsigned HOST_HALF_WIDE_INT
*r
= rbuf
;
1473 prec
= MIN ((op1len
+ op2len
+ 1) * HOST_BITS_PER_WIDE_INT
, prec
);
1474 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1475 unsigned int half_blocks_needed
= blocks_needed
* 2;
1476 if (UNLIKELY (prec
> WIDE_INT_MAX_INL_PRECISION
))
1478 unsigned HOST_HALF_WIDE_INT
*buf
1479 = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT
, 4 * half_blocks_needed
);
1481 v
= u
+ half_blocks_needed
;
1482 r
= v
+ half_blocks_needed
;
1485 /* We do unsigned mul and then correct it. */
1486 wi_unpack (u
, op1val
, op1len
, half_blocks_needed
, prec
, UNSIGNED
);
1487 wi_unpack (v
, op2val
, op2len
, half_blocks_needed
, prec
, UNSIGNED
);
1489 /* The 2 is for a full mult. */
1490 memset (r
, 0, half_blocks_needed
* 2
1491 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1493 for (j
= 0; j
< half_blocks_needed
; j
++)
1496 for (i
= 0; i
< half_blocks_needed
; i
++)
1498 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1500 r
[i
+ j
] = t
& HALF_INT_MASK
;
1501 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1503 r
[j
+ half_blocks_needed
] = k
;
1507 if ((high
|| needs_overflow
) && (shift
= prec
% HOST_BITS_PER_WIDE_INT
) != 0)
1509 /* The high or needs_overflow code assumes that the high bits
1510 only appear from r[half_blocks_needed] up to
1511 r[half_blocks_needed * 2 - 1]. If prec is not a multiple
1512 of HOST_BITS_PER_WIDE_INT, shift the bits above prec up
1513 to make that code simple. */
1514 if (shift
== HOST_BITS_PER_HALF_WIDE_INT
)
1515 memmove (&r
[half_blocks_needed
], &r
[half_blocks_needed
- 1],
1516 sizeof (r
[0]) * half_blocks_needed
);
1519 unsigned int skip
= shift
< HOST_BITS_PER_HALF_WIDE_INT
;
1521 shift
-= HOST_BITS_PER_HALF_WIDE_INT
;
1522 for (i
= 2 * half_blocks_needed
- 1; i
>= half_blocks_needed
; i
--)
1523 r
[i
] = ((r
[i
- skip
] << (-shift
% HOST_BITS_PER_HALF_WIDE_INT
))
1524 | (r
[i
- skip
- 1] >> shift
));
1528 /* We did unsigned math above. For signed we must adjust the
1529 product (assuming we need to see that). */
1530 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1532 unsigned HOST_WIDE_INT b
;
1533 if (wi::neg_p (op1
))
1536 for (i
= 0; i
< half_blocks_needed
; i
++)
1538 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1539 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1540 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1541 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1544 if (wi::neg_p (op2
))
1547 for (i
= 0; i
< half_blocks_needed
; i
++)
1549 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1550 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1551 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1552 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1561 /* For unsigned, overflow is true if any of the top bits are set.
1562 For signed, overflow is true if any of the top bits are not equal
1564 if (sgn
== UNSIGNED
)
1568 top
= r
[half_blocks_needed
- 1
1569 - ((-prec
% HOST_BITS_PER_WIDE_INT
)
1570 >= HOST_BITS_PER_HALF_WIDE_INT
)];
1571 top
= SIGN_MASK (((unsigned HOST_WIDE_INT
) top
)
1572 << (HOST_BITS_PER_WIDE_INT
/ 2
1573 + (-prec
% HOST_BITS_PER_HALF_WIDE_INT
)));
1577 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1578 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1579 /* FIXME: Signed overflow type is not implemented yet. */
1580 *overflow
= (sgn
== UNSIGNED
) ? wi::OVF_OVERFLOW
: wi::OVF_UNKNOWN
;
1583 int r_offset
= high
? half_blocks_needed
: 0;
1584 return wi_pack (val
, &r
[r_offset
], half_blocks_needed
, prec
);
1587 /* Compute the population count of X. */
1589 wi::popcount (const wide_int_ref
&x
)
1594 /* The high order block is special if it is the last block and the
1595 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1596 have to clear out any ones above the precision before doing
1597 popcount on this block. */
1598 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1599 unsigned int stop
= x
.len
;
1602 count
= popcount_hwi (x
.uhigh () << -count
);
1607 if (x
.sign_mask () >= 0)
1611 for (i
= 0; i
< stop
; ++i
)
1612 count
+= popcount_hwi (x
.val
[i
]);
1617 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1618 whether the result overflows when OP0 and OP1 are treated as having
1619 signedness SGN. Return the number of blocks in VAL. */
1621 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1622 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1623 unsigned int op1len
, unsigned int prec
,
1624 signop sgn
, wi::overflow_type
*overflow
)
1626 unsigned HOST_WIDE_INT o0
= 0;
1627 unsigned HOST_WIDE_INT o1
= 0;
1628 unsigned HOST_WIDE_INT x
= 0;
1629 /* We implement subtraction as an in place negate and add. Negation
1630 is just inversion and add 1, so we can do the add of 1 by just
1631 starting the borrow in of the first element at 1. */
1632 unsigned HOST_WIDE_INT borrow
= 0;
1633 unsigned HOST_WIDE_INT old_borrow
= 0;
1635 unsigned HOST_WIDE_INT mask0
, mask1
;
1638 unsigned int len
= MAX (op0len
, op1len
);
1639 mask0
= -top_bit_of (op0
, op0len
, prec
);
1640 mask1
= -top_bit_of (op1
, op1len
, prec
);
1642 /* Subtract all of the explicitly defined elements. */
1643 for (i
= 0; i
< len
; i
++)
1645 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1646 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1647 x
= o0
- o1
- borrow
;
1649 old_borrow
= borrow
;
1650 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1653 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1655 val
[len
] = mask0
- mask1
- borrow
;
1658 *overflow
= (sgn
== UNSIGNED
&& borrow
) ? OVF_UNDERFLOW
: OVF_NONE
;
1662 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1665 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1666 if ((HOST_WIDE_INT
) (x
<< shift
) < 0)
1669 *overflow
= OVF_UNDERFLOW
;
1671 *overflow
= OVF_OVERFLOW
;
1673 *overflow
= OVF_NONE
;
1676 *overflow
= OVF_NONE
;
1680 /* Put the MSB of X and O0 and in the top of the HWI. */
1684 *overflow
= (x
>= o0
) ? OVF_UNDERFLOW
: OVF_NONE
;
1686 *overflow
= (x
> o0
) ? OVF_UNDERFLOW
: OVF_NONE
;
1690 return canonize (val
, len
, prec
);
1698 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1699 algorithm is a small modification of the algorithm in Hacker's
1700 Delight by Warren, which itself is a small modification of Knuth's
1701 algorithm. M is the number of significant elements of U however
1702 there needs to be at least one extra element of B_DIVIDEND
1703 allocated, N is the number of elements of B_DIVISOR.
1704 Return new value for N. */
1706 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1707 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1708 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1709 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1712 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1713 HOST_WIDE_INT and stored in the lower bits of each word. This
1714 algorithm should work properly on both 32 and 64 bit
1716 unsigned HOST_WIDE_INT b
1717 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1718 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1719 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1720 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1724 /* Single digit divisor. */
1728 for (j
= m
- 1; j
>= 0; j
--)
1730 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1731 k
= ((k
* b
+ b_dividend
[j
])
1732 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1733 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1739 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1743 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1744 algorithm, we can overwrite b_dividend and b_divisor, so we do
1746 for (i
= n
- 1; i
> 0; i
--)
1747 b_divisor
[i
] = (b_divisor
[i
] << s
)
1748 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1749 b_divisor
[0] = b_divisor
[0] << s
;
1751 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1752 for (i
= m
- 1; i
> 0; i
--)
1753 b_dividend
[i
] = (b_dividend
[i
] << s
)
1754 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1755 b_dividend
[0] = b_dividend
[0] << s
;
1759 for (j
= m
- n
; j
>= 0; j
--)
1761 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1762 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1764 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1767 rhat
+= b_divisor
[n
-1];
1772 /* Multiply and subtract. */
1774 for (i
= 0; i
< n
; i
++)
1776 p
= qhat
* b_divisor
[i
];
1777 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1778 b_dividend
[i
+ j
] = t
;
1779 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1780 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1782 t
= b_dividend
[j
+n
] - k
;
1783 b_dividend
[j
+n
] = t
;
1785 b_quotient
[j
] = qhat
;
1790 for (i
= 0; i
< n
; i
++)
1792 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1793 b_dividend
[i
+j
] = t
;
1794 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1796 b_dividend
[j
+n
] += k
;
1799 /* If N > M, the main loop was skipped, quotient will be 0 and
1800 we can't copy more than M half-limbs into the remainder, as they
1801 aren't present in b_dividend (which has . */
1804 for (i
= 0; i
< n
; i
++)
1805 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1806 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1808 for (i
= 0; i
< n
; i
++)
1809 b_remainder
[i
] = b_dividend
[i
];
1814 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1815 the result. If QUOTIENT is nonnull, store the value of the quotient
1816 there and return the number of blocks in it. The return value is
1817 not defined otherwise. If REMAINDER is nonnull, store the value
1818 of the remainder there and store the number of blocks in
1819 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1820 the division overflowed. */
1822 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1823 HOST_WIDE_INT
*remainder
,
1824 const HOST_WIDE_INT
*dividend_val
,
1825 unsigned int dividend_len
, unsigned int dividend_prec
,
1826 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1827 unsigned int divisor_prec
, signop sgn
,
1828 wi::overflow_type
*oflow
)
1831 bool dividend_neg
= false;
1832 bool divisor_neg
= false;
1833 bool overflow
= false;
1834 wide_int neg_dividend
, neg_divisor
;
1836 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1838 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1843 /* The smallest signed number / -1 causes overflow. The dividend_len
1844 check is for speed rather than correctness. */
1846 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1848 && wi::only_sign_bit_p (dividend
))
1851 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1852 (signed min / -1) has the same representation as the orignal dividend.
1853 We have traditionally made division by zero act as division by one,
1854 so there too we use the original dividend. */
1863 *oflow
= OVF_OVERFLOW
;
1865 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1866 quotient
[i
] = dividend_val
[i
];
1867 return dividend_len
;
1873 /* Do it on the host if you can. */
1875 && wi::fits_shwi_p (dividend
)
1876 && wi::fits_shwi_p (divisor
))
1878 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1879 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1881 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1883 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1886 quotient
[0] = HOST_WIDE_INT_MIN
;
1899 quotient
[0] = o0
/ o1
;
1902 remainder
[0] = o0
% o1
;
1910 && wi::fits_uhwi_p (dividend
)
1911 && wi::fits_uhwi_p (divisor
))
1913 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1914 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1915 unsigned int quotient_len
= 1;
1919 quotient
[0] = o0
/ o1
;
1920 quotient_len
= canonize_uhwi (quotient
, dividend_prec
);
1924 remainder
[0] = o0
% o1
;
1925 *remainder_len
= canonize_uhwi (remainder
, dividend_prec
);
1927 return quotient_len
;
1930 /* Make the divisor and dividend positive and remember what we
1934 if (wi::neg_p (dividend
))
1936 neg_dividend
= -dividend
;
1937 dividend
= neg_dividend
;
1938 dividend_neg
= true;
1940 if (wi::neg_p (divisor
))
1942 neg_divisor
= -divisor
;
1943 divisor
= neg_divisor
;
1948 unsigned HOST_HALF_WIDE_INT
1949 b_quotient_buf
[4 * WIDE_INT_MAX_INL_PRECISION
1950 / HOST_BITS_PER_HALF_WIDE_INT
];
1951 unsigned HOST_HALF_WIDE_INT
1952 b_remainder_buf
[4 * WIDE_INT_MAX_INL_PRECISION
1953 / HOST_BITS_PER_HALF_WIDE_INT
];
1954 unsigned HOST_HALF_WIDE_INT
1955 b_dividend_buf
[(4 * WIDE_INT_MAX_INL_PRECISION
1956 / HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1957 unsigned HOST_HALF_WIDE_INT
1958 b_divisor_buf
[4 * WIDE_INT_MAX_INL_PRECISION
1959 / HOST_BITS_PER_HALF_WIDE_INT
];
1960 unsigned HOST_HALF_WIDE_INT
*b_quotient
= b_quotient_buf
;
1961 unsigned HOST_HALF_WIDE_INT
*b_remainder
= b_remainder_buf
;
1962 unsigned HOST_HALF_WIDE_INT
*b_dividend
= b_dividend_buf
;
1963 unsigned HOST_HALF_WIDE_INT
*b_divisor
= b_divisor_buf
;
1965 if (sgn
== SIGNED
|| dividend_val
[dividend_len
- 1] >= 0)
1966 dividend_prec
= MIN ((dividend_len
+ 1) * HOST_BITS_PER_WIDE_INT
,
1968 if (sgn
== SIGNED
|| divisor_val
[divisor_len
- 1] >= 0)
1969 divisor_prec
= MIN (divisor_len
* HOST_BITS_PER_WIDE_INT
, divisor_prec
);
1970 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1971 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1972 if (UNLIKELY (dividend_prec
> WIDE_INT_MAX_INL_PRECISION
)
1973 || UNLIKELY (divisor_prec
> WIDE_INT_MAX_INL_PRECISION
))
1975 unsigned HOST_HALF_WIDE_INT
*buf
1976 = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT
,
1977 3 * dividend_blocks_needed
+ 1
1978 + divisor_blocks_needed
);
1980 b_remainder
= b_quotient
+ dividend_blocks_needed
;
1981 b_dividend
= b_remainder
+ dividend_blocks_needed
;
1982 b_divisor
= b_dividend
+ dividend_blocks_needed
+ 1;
1983 memset (b_quotient
, 0,
1984 dividend_blocks_needed
* sizeof (HOST_HALF_WIDE_INT
));
1986 wi_unpack (b_dividend
, dividend
.get_val (), dividend
.get_len (),
1987 dividend_blocks_needed
, dividend_prec
, UNSIGNED
);
1988 wi_unpack (b_divisor
, divisor
.get_val (), divisor
.get_len (),
1989 divisor_blocks_needed
, divisor_prec
, UNSIGNED
);
1991 m
= dividend_blocks_needed
;
1993 while (m
> 1 && b_dividend
[m
- 1] == 0)
1996 n
= divisor_blocks_needed
;
1997 while (n
> 1 && b_divisor
[n
- 1] == 0)
2000 if (b_quotient
== b_quotient_buf
)
2001 memset (b_quotient_buf
, 0, sizeof (b_quotient_buf
));
2003 n
= divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
2005 unsigned int quotient_len
= 0;
2008 quotient_len
= wi_pack (quotient
, b_quotient
, m
, dividend_prec
);
2009 /* The quotient is neg if exactly one of the divisor or dividend is
2011 if (dividend_neg
!= divisor_neg
)
2012 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
2013 quotient_len
, dividend_prec
,
2019 *remainder_len
= wi_pack (remainder
, b_remainder
, n
, dividend_prec
);
2020 /* The remainder is always the same sign as the dividend. */
2022 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
2023 *remainder_len
, dividend_prec
,
2027 return quotient_len
;
2031 * Shifting, rotating and extraction.
2034 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
2035 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
2037 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
2038 unsigned int xlen
, unsigned int precision
,
2041 /* Split the shift into a whole-block shift and a subblock shift. */
2042 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
2043 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
2045 /* The whole-block shift fills with zeros. */
2046 unsigned int len
= BLOCKS_NEEDED (precision
);
2047 len
= MIN (xlen
+ skip
+ 1, len
);
2048 for (unsigned int i
= 0; i
< skip
; ++i
)
2051 /* It's easier to handle the simple block case specially. */
2052 if (small_shift
== 0)
2053 for (unsigned int i
= skip
; i
< len
; ++i
)
2054 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
2057 /* The first unfilled output block is a left shift of the first
2058 block in XVAL. The other output blocks contain bits from two
2059 consecutive input blocks. */
2060 unsigned HOST_WIDE_INT carry
= 0;
2061 for (unsigned int i
= skip
; i
< len
; ++i
)
2063 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
2064 val
[i
] = (x
<< small_shift
) | carry
;
2065 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
2068 return canonize (val
, len
, precision
);
2071 /* Right shift XVAL by SHIFT and store the result in VAL. LEN is the
2072 number of blocks in VAL. The input has XPRECISION bits and the
2073 output has XPRECISION - SHIFT bits. */
2075 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
2076 unsigned int xlen
, unsigned int shift
, unsigned int len
)
2078 /* Split the shift into a whole-block shift and a subblock shift. */
2079 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
2080 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
2082 /* It's easier to handle the simple block case specially. */
2083 if (small_shift
== 0)
2084 for (unsigned int i
= 0; i
< len
; ++i
)
2085 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
2088 /* Each output block but the last is a combination of two input blocks.
2089 The last block is a right shift of the last block in XVAL. */
2090 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
2091 for (unsigned int i
= 0; i
< len
; ++i
)
2093 val
[i
] = curr
>> small_shift
;
2094 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
2095 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
2100 /* Logically right shift XVAL by SHIFT and store the result in VAL.
2101 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2102 VAL has PRECISION bits. */
2104 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
2105 unsigned int xlen
, unsigned int xprecision
,
2106 unsigned int precision
, unsigned int shift
)
2108 /* Work out how many blocks are needed to store the significant bits
2109 (excluding the upper zeros or signs). */
2110 unsigned int blocks_needed
= BLOCKS_NEEDED (xprecision
- shift
);
2111 unsigned int len
= blocks_needed
;
2112 if (len
> xlen
&& xval
[xlen
- 1] >= 0)
2115 rshift_large_common (val
, xval
, xlen
, shift
, len
);
2117 /* The value we just created has precision XPRECISION - SHIFT.
2118 Zero-extend it to wider precisions. */
2119 if (precision
> xprecision
- shift
&& len
== blocks_needed
)
2121 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
2123 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
2124 else if (val
[len
- 1] < 0)
2126 /* Add a new block with a zero. */
2131 return canonize (val
, len
, precision
);
2134 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
2135 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2136 VAL has PRECISION bits. */
2138 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
2139 unsigned int xlen
, unsigned int xprecision
,
2140 unsigned int precision
, unsigned int shift
)
2142 /* Work out how many blocks are needed to store the significant bits
2143 (excluding the upper zeros or signs). */
2144 unsigned int blocks_needed
= BLOCKS_NEEDED (xprecision
- shift
);
2145 unsigned int len
= MIN (xlen
, blocks_needed
);
2147 rshift_large_common (val
, xval
, xlen
, shift
, len
);
2149 /* The value we just created has precision XPRECISION - SHIFT.
2150 Sign-extend it to wider types. */
2151 if (precision
> xprecision
- shift
&& len
== blocks_needed
)
2153 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
2155 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
2157 return canonize (val
, len
, precision
);
2160 /* Return the number of leading (upper) zeros in X. */
2162 wi::clz (const wide_int_ref
&x
)
2164 if (x
.sign_mask () < 0)
2165 /* The upper bit is set, so there are no leading zeros. */
2168 /* Calculate how many bits there above the highest represented block. */
2169 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2171 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2173 /* The upper -COUNT bits of HIGH are not part of the value.
2175 high
= (high
<< -count
) >> -count
;
2177 /* We don't need to look below HIGH. Either HIGH is nonzero,
2178 or the top bit of the block below is nonzero; clz_hwi is
2179 HOST_BITS_PER_WIDE_INT in the latter case. */
2180 return count
+ clz_hwi (high
);
2183 /* Return the number of redundant sign bits in X. (That is, the number
2184 of bits immediately below the sign bit that have the same value as
2187 wi::clrsb (const wide_int_ref
&x
)
2189 /* Calculate how many bits there above the highest represented block. */
2190 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2192 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2193 unsigned HOST_WIDE_INT mask
= -1;
2196 /* The upper -COUNT bits of HIGH are not part of the value.
2197 Clear them from both MASK and HIGH. */
2202 /* If the top bit is 1, count the number of leading 1s. If the top
2203 bit is zero, count the number of leading zeros. */
2204 if (high
> mask
/ 2)
2207 /* There are no sign bits below the top block, so we don't need to look
2208 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2210 return count
+ clz_hwi (high
) - 1;
2213 /* Return the number of trailing (lower) zeros in X. */
2215 wi::ctz (const wide_int_ref
&x
)
2217 if (x
.len
== 1 && x
.ulow () == 0)
2220 /* Having dealt with the zero case, there must be a block with a
2221 nonzero bit. We don't care about the bits above the first 1. */
2223 while (x
.val
[i
] == 0)
2225 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2228 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2231 wi::exact_log2 (const wide_int_ref
&x
)
2233 /* Reject cases where there are implicit -1 blocks above HIGH. */
2234 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2237 /* Set CRUX to the index of the entry that should be nonzero.
2238 If the top block is zero then the next lowest block (if any)
2239 must have the high bit set. */
2240 unsigned int crux
= x
.len
- 1;
2241 if (crux
> 0 && x
.val
[crux
] == 0)
2244 /* Check that all lower blocks are zero. */
2245 for (unsigned int i
= 0; i
< crux
; ++i
)
2249 /* Get a zero-extended form of block CRUX. */
2250 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2251 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2252 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2254 /* Now it's down to whether HWI is a power of 2. */
2255 int res
= ::exact_log2 (hwi
);
2257 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2261 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2263 wi::floor_log2 (const wide_int_ref
&x
)
2265 return x
.precision
- 1 - clz (x
);
2268 /* Return the index of the first (lowest) set bit in X, counting from 1.
2269 Return 0 if X is 0. */
2271 wi::ffs (const wide_int_ref
&x
)
2273 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2276 /* Return true if sign-extending X to have precision PRECISION would give
2277 the minimum signed value at that precision. */
2279 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2281 return ctz (x
) + 1 == int (precision
);
2284 /* Return true if X represents the minimum signed value. */
2286 wi::only_sign_bit_p (const wide_int_ref
&x
)
2288 return only_sign_bit_p (x
, x
.precision
);
2291 /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2292 down to the previous value that has no bits set outside MASK.
2293 This rounding wraps for signed values if VAL is negative and
2294 the top bit of MASK is clear.
2296 For example, round_down_for_mask (6, 0xf1) would give 1 and
2297 round_down_for_mask (24, 0xf1) would give 17. */
2300 wi::round_down_for_mask (const wide_int
&val
, const wide_int
&mask
)
2302 /* Get the bits in VAL that are outside the mask. */
2303 wide_int extra_bits
= wi::bit_and_not (val
, mask
);
2304 if (extra_bits
== 0)
2307 /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
2309 unsigned int precision
= val
.get_precision ();
2310 wide_int lower_mask
= wi::mask (precision
- wi::clz (extra_bits
),
2313 /* Clear the bits that aren't in MASK, but ensure that all bits
2314 in MASK below the top cleared bit are set. */
2315 return (val
& mask
) | (mask
& lower_mask
);
2318 /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2319 up to the next value that has no bits set outside MASK. The rounding
2320 wraps if there are no suitable values greater than VAL.
2322 For example, round_up_for_mask (6, 0xf1) would give 16 and
2323 round_up_for_mask (24, 0xf1) would give 32. */
2326 wi::round_up_for_mask (const wide_int
&val
, const wide_int
&mask
)
2328 /* Get the bits in VAL that are outside the mask. */
2329 wide_int extra_bits
= wi::bit_and_not (val
, mask
);
2330 if (extra_bits
== 0)
2333 /* Get a mask that is all 1s above the top bit in EXTRA_BITS. */
2334 unsigned int precision
= val
.get_precision ();
2335 wide_int upper_mask
= wi::mask (precision
- wi::clz (extra_bits
),
2338 /* Get the bits of the mask that are above the top bit in EXTRA_BITS. */
2341 /* Conceptually we need to:
2343 - clear bits of VAL outside UPPER_MASK
2344 - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
2345 - propagate the carry through the bits of VAL in UPPER_MASK
2347 If (~VAL & UPPER_MASK) is nonzero, the carry eventually
2348 reaches that bit and the process leaves all lower bits clear.
2349 If (~VAL & UPPER_MASK) is zero then the result is also zero. */
2350 wide_int tmp
= wi::bit_and_not (upper_mask
, val
);
2352 return (val
| tmp
) & -tmp
;
2355 /* Compute the modular multiplicative inverse of A modulo B
2356 using extended Euclid's algorithm. Assumes A and B are coprime,
2357 and that A and B have the same precision. */
2359 wi::mod_inv (const wide_int
&a
, const wide_int
&b
)
2361 /* Verify the assumption. */
2362 gcc_checking_assert (wi::eq_p (wi::gcd (a
, b
), 1));
2364 unsigned int p
= a
.get_precision () + 1;
2365 gcc_checking_assert (b
.get_precision () + 1 == p
);
2366 wide_int c
= wide_int::from (a
, p
, UNSIGNED
);
2367 wide_int d
= wide_int::from (b
, p
, UNSIGNED
);
2368 wide_int x0
= wide_int::from (0, p
, UNSIGNED
);
2369 wide_int x1
= wide_int::from (1, p
, UNSIGNED
);
2371 if (wi::eq_p (b
, 1))
2372 return wide_int::from (1, p
, UNSIGNED
);
2374 while (wi::gt_p (c
, 1, UNSIGNED
))
2377 wide_int q
= wi::divmod_trunc (c
, d
, UNSIGNED
, &d
);
2380 x0
= wi::sub (x1
, wi::mul (q
, x0
));
2383 if (wi::lt_p (x1
, 0, SIGNED
))
2389 * Private utilities.
2392 void gt_ggc_mx (widest_int
*) { }
2393 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2394 void gt_pch_nx (widest_int
*) { }
2396 template void wide_int::dump () const;
2397 template void generic_wide_int
<wide_int_ref_storage
<false> >::dump () const;
2398 template void generic_wide_int
<wide_int_ref_storage
<true> >::dump () const;
2399 template void offset_int::dump () const;
2400 template void widest_int::dump () const;
2402 /* We could add all the above ::dump variants here, but wide_int and
2403 widest_int should handle the common cases. Besides, you can always
2404 call the dump method directly. */
2407 debug (const wide_int
&ref
)
2413 debug (const wide_int
*ptr
)
2418 fprintf (stderr
, "<nil>\n");
2422 debug (const widest_int
&ref
)
2428 debug (const widest_int
*ptr
)
2433 fprintf (stderr
, "<nil>\n");
2438 namespace selftest
{
2440 /* Selftests for wide ints. We run these multiple times, once per type. */
2442 /* Helper function for building a test value. */
2444 template <class VALUE_TYPE
>
2448 /* Specializations of the fixture for each wide-int type. */
2450 /* Specialization for VALUE_TYPE == wide_int. */
2456 return wi::shwi (i
, 32);
2459 /* Specialization for VALUE_TYPE == offset_int. */
2465 return offset_int (i
);
2468 /* Specialization for VALUE_TYPE == widest_int. */
2474 return widest_int (i
);
2477 /* Verify that print_dec (WI, ..., SGN) gives the expected string
2478 representation (using base 10). */
2481 assert_deceq (const char *expected
, const wide_int_ref
&wi
, signop sgn
)
2483 char buf
[WIDE_INT_PRINT_BUFFER_SIZE
], *p
= buf
;
2485 if (print_dec_buf_size (wi
, sgn
, &len
))
2486 p
= XALLOCAVEC (char, len
);
2487 print_dec (wi
, p
, sgn
);
2488 ASSERT_STREQ (expected
, p
);
2491 /* Likewise for base 16. */
2494 assert_hexeq (const char *expected
, const wide_int_ref
&wi
)
2496 char buf
[WIDE_INT_PRINT_BUFFER_SIZE
], *p
= buf
;
2498 if (print_hex_buf_size (wi
, &len
))
2499 p
= XALLOCAVEC (char, len
);
2501 ASSERT_STREQ (expected
, p
);
2506 /* Verify that print_dec and print_hex work for VALUE_TYPE. */
2508 template <class VALUE_TYPE
>
2512 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (42);
2513 assert_deceq ("42", a
, SIGNED
);
2514 assert_hexeq ("0x2a", a
);
2515 assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
2516 assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
2517 assert_hexeq ("0xffffffffffffffff", wi::mask
<widest_int
> (64, false));
2518 if (WIDE_INT_MAX_INL_PRECISION
> 128)
2520 assert_hexeq ("0x20000000000000000fffffffffffffffe",
2521 wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
2522 assert_hexeq ("0x200000000000004000123456789abcdef",
2523 wi::lshift (1, 129) + wi::lshift (1, 74)
2524 + wi::lshift (0x1234567, 32) + 0x89abcdef);
2528 /* Verify that various operations work correctly for VALUE_TYPE,
2529 unary and binary, using both function syntax, and
2530 overloaded-operators. */
2532 template <class VALUE_TYPE
>
2536 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (7);
2537 VALUE_TYPE b
= from_int
<VALUE_TYPE
> (3);
2539 /* Using functions. */
2540 assert_deceq ("-7", wi::neg (a
), SIGNED
);
2541 assert_deceq ("10", wi::add (a
, b
), SIGNED
);
2542 assert_deceq ("4", wi::sub (a
, b
), SIGNED
);
2543 assert_deceq ("-4", wi::sub (b
, a
), SIGNED
);
2544 assert_deceq ("21", wi::mul (a
, b
), SIGNED
);
2546 /* Using operators. */
2547 assert_deceq ("-7", -a
, SIGNED
);
2548 assert_deceq ("10", a
+ b
, SIGNED
);
2549 assert_deceq ("4", a
- b
, SIGNED
);
2550 assert_deceq ("-4", b
- a
, SIGNED
);
2551 assert_deceq ("21", a
* b
, SIGNED
);
2554 /* Verify that various comparisons work correctly for VALUE_TYPE. */
2556 template <class VALUE_TYPE
>
2560 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (7);
2561 VALUE_TYPE b
= from_int
<VALUE_TYPE
> (3);
2564 ASSERT_TRUE (wi::eq_p (a
, a
));
2565 ASSERT_FALSE (wi::eq_p (a
, b
));
2568 ASSERT_TRUE (wi::ne_p (a
, b
));
2569 ASSERT_FALSE (wi::ne_p (a
, a
));
2572 ASSERT_FALSE (wi::lts_p (a
, a
));
2573 ASSERT_FALSE (wi::lts_p (a
, b
));
2574 ASSERT_TRUE (wi::lts_p (b
, a
));
2577 ASSERT_TRUE (wi::les_p (a
, a
));
2578 ASSERT_FALSE (wi::les_p (a
, b
));
2579 ASSERT_TRUE (wi::les_p (b
, a
));
2582 ASSERT_FALSE (wi::gts_p (a
, a
));
2583 ASSERT_TRUE (wi::gts_p (a
, b
));
2584 ASSERT_FALSE (wi::gts_p (b
, a
));
2587 ASSERT_TRUE (wi::ges_p (a
, a
));
2588 ASSERT_TRUE (wi::ges_p (a
, b
));
2589 ASSERT_FALSE (wi::ges_p (b
, a
));
2592 ASSERT_EQ (-1, wi::cmps (b
, a
));
2593 ASSERT_EQ (0, wi::cmps (a
, a
));
2594 ASSERT_EQ (1, wi::cmps (a
, b
));
2597 /* Run all of the selftests, using the given VALUE_TYPE. */
2599 template <class VALUE_TYPE
>
2600 static void run_all_wide_int_tests ()
2602 test_printing
<VALUE_TYPE
> ();
2603 test_ops
<VALUE_TYPE
> ();
2604 test_comparisons
<VALUE_TYPE
> ();
2607 /* Test overflow conditions. */
2612 static int precs
[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
2613 static int offsets
[] = { 16, 1, 0 };
2614 for (unsigned int i
= 0; i
< ARRAY_SIZE (precs
); ++i
)
2615 for (unsigned int j
= 0; j
< ARRAY_SIZE (offsets
); ++j
)
2617 int prec
= precs
[i
];
2618 int offset
= offsets
[j
];
2619 wi::overflow_type overflow
;
2622 sum
= wi::add (wi::max_value (prec
, UNSIGNED
) - offset
, 1,
2623 UNSIGNED
, &overflow
);
2624 ASSERT_EQ (sum
, -offset
);
2625 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
== 0);
2627 sum
= wi::add (1, wi::max_value (prec
, UNSIGNED
) - offset
,
2628 UNSIGNED
, &overflow
);
2629 ASSERT_EQ (sum
, -offset
);
2630 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
== 0);
2632 diff
= wi::sub (wi::max_value (prec
, UNSIGNED
) - offset
,
2633 wi::max_value (prec
, UNSIGNED
),
2634 UNSIGNED
, &overflow
);
2635 ASSERT_EQ (diff
, -offset
);
2636 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
!= 0);
2638 diff
= wi::sub (wi::max_value (prec
, UNSIGNED
) - offset
,
2639 wi::max_value (prec
, UNSIGNED
) - 1,
2640 UNSIGNED
, &overflow
);
2641 ASSERT_EQ (diff
, 1 - offset
);
2642 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
> 1);
2646 /* Test the round_{down,up}_for_mask functions. */
2649 test_round_for_mask ()
2651 unsigned int prec
= 18;
2652 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec
),
2653 wi::shwi (0xf1, prec
)));
2654 ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec
),
2655 wi::shwi (0xf1, prec
)));
2657 ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec
),
2658 wi::shwi (0xf1, prec
)));
2659 ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec
),
2660 wi::shwi (0xf1, prec
)));
2662 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec
),
2663 wi::shwi (0xf1, prec
)));
2664 ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec
),
2665 wi::shwi (0xf1, prec
)));
2667 ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec
),
2668 wi::shwi (0x111, prec
)));
2669 ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec
),
2670 wi::shwi (0x111, prec
)));
2672 ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec
),
2673 wi::shwi (0xfc, prec
)));
2674 ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec
),
2675 wi::shwi (0xfc, prec
)));
2677 ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec
),
2678 wi::shwi (0xabc, prec
)));
2679 ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec
),
2680 wi::shwi (0xabc, prec
)));
2682 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec
),
2683 wi::shwi (0xabc, prec
)));
2684 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec
),
2685 wi::shwi (0xabc, prec
)));
2687 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec
),
2688 wi::shwi (0xabc, prec
)));
2689 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec
),
2690 wi::shwi (0xabc, prec
)));
2693 /* Run all of the selftests within this file, for all value types. */
2696 wide_int_cc_tests ()
2698 run_all_wide_int_tests
<wide_int
> ();
2699 run_all_wide_int_tests
<offset_int
> ();
2700 run_all_wide_int_tests
<widest_int
> ();
2702 test_round_for_mask ();
2703 ASSERT_EQ (wi::mask (128, false, 128),
2704 wi::shifted_mask (0, 128, false, 128));
2705 ASSERT_EQ (wi::mask (128, true, 128),
2706 wi::shifted_mask (0, 128, true, 128));
2707 ASSERT_EQ (wi::multiple_of_p (from_int
<widest_int
> (1),
2708 from_int
<widest_int
> (-128), UNSIGNED
),
2712 } // namespace selftest
2713 #endif /* CHECKING_P */