1 /* Operations with very long integers.
2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #define HOST_BITS_PER_HALF_WIDE_INT 32
30 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
31 # define HOST_HALF_WIDE_INT long
32 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
33 # define HOST_HALF_WIDE_INT int
35 #error Please add support for HOST_HALF_WIDE_INT
38 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
39 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
40 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
41 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
42 typedef unsigned HOST_WIDE_INT UWtype
;
43 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
44 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
45 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
47 typedef unsigned int UDWtype
__attribute__ ((mode (DI
)));
49 typedef unsigned int UDWtype
__attribute__ ((mode (TI
)));
54 static const HOST_WIDE_INT zeros
[1] = {};
60 /* Quantities to deal with values that hold half of a wide int. Used
61 in multiply and divide. */
62 #define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
64 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
65 #define BLOCKS_NEEDED(PREC) (PREC ? CEIL (PREC, HOST_BITS_PER_WIDE_INT) : 1)
66 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
68 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
69 based on the top existing bit of VAL. */
71 static unsigned HOST_WIDE_INT
72 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
74 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? HOST_WIDE_INT_M1
: 0;
77 /* Convert the integer in VAL to canonical form, returning its new length.
78 LEN is the number of blocks currently in VAL and PRECISION is the number
79 of bits in the integer it represents.
81 This function only changes the representation, not the value. */
83 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
85 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
89 if (len
> blocks_needed
)
96 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
97 val
[len
- 1] = top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
98 if (top
!= 0 && top
!= HOST_WIDE_INT_M1
)
101 /* At this point we know that the top is either 0 or -1. Find the
102 first block that is not a copy of this. */
103 for (i
= len
- 2; i
>= 0; i
--)
105 HOST_WIDE_INT x
= val
[i
];
108 if (SIGN_MASK (x
) == top
)
111 /* We need an extra block because the top bit block i does
112 not match the extension. */
117 /* The number is 0 or -1. */
121 /* VAL[0] is the unsigned result of an operation. Canonize it by adding
122 another 0 block if needed, and return number of blocks needed. */
124 static inline unsigned int
125 canonize_uhwi (HOST_WIDE_INT
*val
, unsigned int precision
)
127 if (val
[0] < 0 && precision
> HOST_BITS_PER_WIDE_INT
)
136 * Conversion routines in and out of wide_int.
139 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
140 result for an integer with precision PRECISION. Return the length
141 of VAL (after any canonization). */
143 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
144 unsigned int xlen
, unsigned int precision
, bool need_canon
)
146 for (unsigned i
= 0; i
< xlen
; i
++)
148 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
151 /* Construct a wide int from a buffer of length LEN. BUFFER will be
152 read according to byte endianness and word endianness of the target.
153 Only the lower BUFFER_LEN bytes of the result are set; the remaining
154 high bytes are cleared. */
156 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
158 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
159 wide_int result
= wide_int::create (precision
);
160 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
162 /* We have to clear all the bits ourself, as we merely or in values
164 unsigned int len
= BLOCKS_NEEDED (precision
);
165 HOST_WIDE_INT
*val
= result
.write_val (0);
166 for (unsigned int i
= 0; i
< len
; ++i
)
169 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
173 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
174 unsigned HOST_WIDE_INT value
;
176 if (buffer_len
> UNITS_PER_WORD
)
178 unsigned int word
= byte
/ UNITS_PER_WORD
;
180 if (WORDS_BIG_ENDIAN
)
181 word
= (words
- 1) - word
;
183 offset
= word
* UNITS_PER_WORD
;
185 if (BYTES_BIG_ENDIAN
)
186 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
188 offset
+= byte
% UNITS_PER_WORD
;
191 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
193 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
195 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
196 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
199 result
.set_len (canonize (val
, len
, precision
));
204 /* Sets RESULT from X, the sign is taken according to SGN. */
206 wi::to_mpz (const wide_int_ref
&x
, mpz_t result
, signop sgn
)
208 int len
= x
.get_len ();
209 const HOST_WIDE_INT
*v
= x
.get_val ();
210 int excess
= len
* HOST_BITS_PER_WIDE_INT
- x
.get_precision ();
212 if (wi::neg_p (x
, sgn
))
214 /* We use ones complement to avoid -x80..0 edge case that -
216 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
217 for (int i
= 0; i
< len
; i
++)
220 t
[len
- 1] = (unsigned HOST_WIDE_INT
) t
[len
- 1] << excess
>> excess
;
221 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
222 mpz_com (result
, result
);
226 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
227 for (int i
= 0; i
< len
- 1; i
++)
229 t
[len
- 1] = (unsigned HOST_WIDE_INT
) v
[len
- 1] << excess
>> excess
;
230 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
232 else if (excess
< 0 && wi::neg_p (x
))
234 int extra
= CEIL (-excess
, HOST_BITS_PER_WIDE_INT
);
235 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
+ extra
);
236 for (int i
= 0; i
< len
; i
++)
238 for (int i
= 0; i
< extra
; i
++)
240 excess
= (-excess
) % HOST_BITS_PER_WIDE_INT
;
242 t
[len
+ extra
- 1] = (HOST_WIDE_INT_1U
<< excess
) - 1;
243 mpz_import (result
, len
+ extra
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
246 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
249 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
250 values of VAL will be wrapped; otherwise, they will be set to the
251 appropriate minimum or maximum TYPE bound. */
253 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
256 unsigned int prec
= TYPE_PRECISION (type
);
257 wide_int res
= wide_int::create (prec
);
265 get_type_static_bounds (type
, min
, max
);
267 if (mpz_cmp (x
, min
) < 0)
269 else if (mpz_cmp (x
, max
) > 0)
276 /* Determine the number of unsigned HOST_WIDE_INTs that are required
277 for representing the absolute value. The code to calculate count is
278 extracted from the GMP manual, section "Integer Import and Export":
279 http://gmplib.org/manual/Integer-Import-and-Export.html */
280 numb
= CHAR_BIT
* sizeof (HOST_WIDE_INT
);
281 count
= CEIL (mpz_sizeinbase (x
, 2), numb
);
282 HOST_WIDE_INT
*val
= res
.write_val (0);
283 /* Read the absolute value.
285 Write directly to the wide_int storage if possible, otherwise leave
286 GMP to allocate the memory for us. It might be slightly more efficient
287 to use mpz_tdiv_r_2exp for the latter case, but the situation is
288 pathological and it seems safer to operate on the original mpz value
290 void *valres
= mpz_export (count
<= WIDE_INT_MAX_INL_ELTS
? val
: 0,
291 &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
297 count
= MIN (count
, BLOCKS_NEEDED (prec
));
300 memcpy (val
, valres
, count
* sizeof (HOST_WIDE_INT
));
303 /* Zero-extend the absolute value to PREC bits. */
304 if (count
< BLOCKS_NEEDED (prec
) && val
[count
- 1] < 0)
307 count
= canonize (val
, count
, prec
);
317 * Largest and smallest values in a mode.
320 /* Return the largest SGNed number that is representable in PRECISION bits.
322 TODO: There is still code from the double_int era that trys to
323 make up for the fact that double int's could not represent the
324 min and max values of all types. This code should be removed
325 because the min and max values can always be represented in
326 wide_ints and int-csts. */
328 wi::max_value (unsigned int precision
, signop sgn
)
330 gcc_checking_assert (precision
!= 0);
332 /* The unsigned max is just all ones. */
333 return shwi (-1, precision
);
335 /* The signed max is all ones except the top bit. This must be
336 explicitly represented. */
337 return mask (precision
- 1, false, precision
);
340 /* Return the largest SGNed number that is representable in PRECISION bits. */
342 wi::min_value (unsigned int precision
, signop sgn
)
344 gcc_checking_assert (precision
!= 0);
346 return uhwi (0, precision
);
348 /* The signed min is all zeros except the top bit. This must be
349 explicitly represented. */
350 return wi::set_bit_in_zero (precision
- 1, precision
);
357 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
358 signedness SGN, to an integer that has PRECISION bits. Store the blocks
359 in VAL and return the number of blocks used.
361 This function can handle both extension (PRECISION > XPRECISION)
362 and truncation (PRECISION < XPRECISION). */
364 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
365 unsigned int xlen
, unsigned int xprecision
,
366 unsigned int precision
, signop sgn
)
368 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
369 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
370 for (unsigned i
= 0; i
< len
; i
++)
373 if (precision
> xprecision
)
375 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
380 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
381 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
382 else if (val
[len
- 1] < 0)
384 while (len
< BLOCKS_NEEDED (xprecision
))
386 if (small_xprecision
)
387 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
394 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
395 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
398 len
= canonize (val
, len
, precision
);
403 /* This function hides the fact that we cannot rely on the bits beyond
404 the precision. This issue comes up in the relational comparisions
405 where we do allow comparisons of values of different precisions. */
406 static inline HOST_WIDE_INT
407 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
408 unsigned int blocks_needed
, unsigned int small_prec
,
409 unsigned int index
, signop sgn
)
414 else if (index
< blocks_needed
|| sgn
== SIGNED
)
415 /* Signed or within the precision. */
416 val
= SIGN_MASK (a
[len
- 1]);
418 /* Unsigned extension beyond the precision. */
421 if (small_prec
&& index
== blocks_needed
- 1)
422 return (sgn
== SIGNED
423 ? sext_hwi (val
, small_prec
)
424 : zext_hwi (val
, small_prec
));
429 /* Find the highest bit represented in a wide int. This will in
430 general have the same value as the sign bit. */
431 static inline HOST_WIDE_INT
432 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
434 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
435 unsigned HOST_WIDE_INT val
= a
[len
- 1];
438 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
442 * Comparisons, note that only equality is an operator. The other
443 * comparisons cannot be operators since they are inherently signed or
444 * unsigned and C++ has no such operators.
447 /* Return true if OP0 == OP1. */
449 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
450 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
454 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
456 if (op0len
!= op1len
)
459 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
461 /* It does not matter if we zext or sext here, we just have to
462 do both the same way. */
463 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
469 if (op0
[l0
] != op1
[l0
])
477 /* Return true if OP0 < OP1 using signed comparisons. */
479 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
480 unsigned int precision
,
481 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
483 HOST_WIDE_INT s0
, s1
;
484 unsigned HOST_WIDE_INT u0
, u1
;
485 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
486 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
487 int l
= MAX (op0len
- 1, op1len
- 1);
489 /* Only the top block is compared as signed. The rest are unsigned
491 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
492 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
501 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
502 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
514 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
517 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
518 unsigned int precision
,
519 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
521 HOST_WIDE_INT s0
, s1
;
522 unsigned HOST_WIDE_INT u0
, u1
;
523 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
524 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
525 int l
= MAX (op0len
- 1, op1len
- 1);
527 /* Only the top block is compared as signed. The rest are unsigned
529 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
530 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
539 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
540 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
552 /* Return true if OP0 < OP1 using unsigned comparisons. */
554 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
555 unsigned int precision
,
556 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
558 unsigned HOST_WIDE_INT x0
;
559 unsigned HOST_WIDE_INT x1
;
560 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
561 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
562 int l
= MAX (op0len
- 1, op1len
- 1);
566 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
567 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
578 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
579 unsigned compares. */
581 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
582 unsigned int precision
,
583 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
585 unsigned HOST_WIDE_INT x0
;
586 unsigned HOST_WIDE_INT x1
;
587 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
588 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
589 int l
= MAX (op0len
- 1, op1len
- 1);
593 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
594 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
609 /* Sign-extend the number represented by XVAL and XLEN into VAL,
610 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
611 and VAL have PRECISION bits. */
613 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
614 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
616 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
617 /* Extending beyond the precision is a no-op. If we have only stored
618 OFFSET bits or fewer, the rest are already signs. */
619 if (offset
>= precision
|| len
>= xlen
)
621 for (unsigned i
= 0; i
< xlen
; ++i
)
625 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
626 for (unsigned int i
= 0; i
< len
; i
++)
630 val
[len
] = sext_hwi (xval
[len
], suboffset
);
633 return canonize (val
, len
, precision
);
636 /* Zero-extend the number represented by XVAL and XLEN into VAL,
637 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
638 and VAL have PRECISION bits. */
640 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
641 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
643 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
644 /* Extending beyond the precision is a no-op. If we have only stored
645 OFFSET bits or fewer, and the upper stored bit is zero, then there
647 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
649 for (unsigned i
= 0; i
< xlen
; ++i
)
653 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
654 for (unsigned int i
= 0; i
< len
; i
++)
655 val
[i
] = i
< xlen
? xval
[i
] : -1;
657 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
660 return canonize (val
, len
+ 1, precision
);
664 * Masking, inserting, shifting, rotating.
667 /* Insert WIDTH bits from Y into X starting at START. */
669 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
676 unsigned int precision
= x
.get_precision ();
677 if (start
>= precision
)
680 gcc_checking_assert (precision
>= width
);
682 if (start
+ width
>= precision
)
683 width
= precision
- start
;
685 mask
= wi::shifted_mask (start
, width
, false, precision
);
686 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
689 tmp
= wi::bit_and_not (x
, mask
);
690 result
= result
| tmp
;
695 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
696 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
699 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
700 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
702 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
703 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
705 if (block
+ 1 >= xlen
)
707 /* The operation either affects the last current block or needs
709 unsigned int len
= block
+ 1;
710 for (unsigned int i
= 0; i
< len
; i
++)
711 val
[i
] = safe_uhwi (xval
, xlen
, i
);
712 val
[block
] |= HOST_WIDE_INT_1U
<< subbit
;
714 /* If the bit we just set is at the msb of the block, make sure
715 that any higher bits are zeros. */
716 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
721 return canonize (val
, len
, precision
);
725 for (unsigned int i
= 0; i
< xlen
; i
++)
727 val
[block
] |= HOST_WIDE_INT_1U
<< subbit
;
728 return canonize (val
, xlen
, precision
);
732 /* Byte swap the integer represented by XVAL and XLEN into VAL. Return
733 the number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
735 wi::bswap_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
736 unsigned int xlen
, unsigned int precision
)
738 unsigned int s
, len
= BLOCKS_NEEDED (precision
);
740 /* This is not a well defined operation if the precision is not a
742 gcc_assert ((precision
& 0x7) == 0);
744 memset (val
, 0, sizeof (unsigned HOST_WIDE_INT
) * len
);
746 /* Only swap the bytes that are not the padding. */
747 for (s
= 0; s
< precision
; s
+= 8)
749 unsigned int d
= precision
- s
- 8;
750 unsigned HOST_WIDE_INT byte
;
752 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
753 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
755 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
757 block
= d
/ HOST_BITS_PER_WIDE_INT
;
758 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
760 val
[block
] |= byte
<< offset
;
763 return canonize (val
, len
, precision
);
766 /* Bitreverse the integer represented by XVAL and LEN into VAL. Return
767 the number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
769 wi::bitreverse_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
770 unsigned int len
, unsigned int precision
)
774 for (i
= 0; i
< len
; i
++)
777 for (s
= 0; s
< precision
; s
++)
779 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
780 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
781 if (((safe_uhwi (xval
, len
, block
) >> offset
) & 1) != 0)
783 unsigned int d
= (precision
- 1) - s
;
784 block
= d
/ HOST_BITS_PER_WIDE_INT
;
785 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
786 val
[block
] |= HOST_WIDE_INT_1U
<< offset
;
790 return canonize (val
, len
, precision
);
793 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
794 above that up to PREC are zeros. The result is inverted if NEGATE
795 is true. Return the number of blocks in VAL. */
797 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
802 val
[0] = negate
? 0 : -1;
807 val
[0] = negate
? -1 : 0;
812 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
813 val
[i
++] = negate
? 0 : -1;
815 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
818 HOST_WIDE_INT last
= (HOST_WIDE_INT_1U
<< shift
) - 1;
819 val
[i
++] = negate
? ~last
: last
;
822 val
[i
++] = negate
? -1 : 0;
827 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
828 bits are ones, and the bits above that up to PREC are zeros. The result
829 is inverted if NEGATE is true. Return the number of blocks in VAL. */
831 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
832 bool negate
, unsigned int prec
)
834 if (start
>= prec
|| width
== 0)
836 val
[0] = negate
? -1 : 0;
840 if (width
> prec
- start
)
841 width
= prec
- start
;
842 unsigned int end
= start
+ width
;
845 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
846 val
[i
++] = negate
? -1 : 0;
848 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
851 HOST_WIDE_INT block
= (HOST_WIDE_INT_1U
<< shift
) - 1;
853 if (shift
< HOST_BITS_PER_WIDE_INT
)
856 block
= (HOST_WIDE_INT_1U
<< shift
) - block
- 1;
857 val
[i
++] = negate
? ~block
: block
;
862 val
[i
++] = negate
? block
: ~block
;
868 val
[i
++] = negate
? 0 : -1;
872 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
874 val
[i
++] = negate
? 0 : -1;
876 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
880 HOST_WIDE_INT block
= (HOST_WIDE_INT_1U
<< shift
) - 1;
881 val
[i
++] = negate
? ~block
: block
;
884 val
[i
++] = negate
? -1 : 0;
890 * logical operations.
893 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
895 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
896 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
897 unsigned int op1len
, unsigned int prec
)
901 bool need_canon
= true;
903 unsigned int len
= MAX (op0len
, op1len
);
906 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
924 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
940 val
[l0
] = op0
[l0
] & op1
[l0
];
945 len
= canonize (val
, len
, prec
);
950 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
952 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
953 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
954 unsigned int op1len
, unsigned int prec
)
959 bool need_canon
= true;
961 unsigned int len
= MAX (op0len
, op1len
);
964 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
982 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
998 val
[l0
] = op0
[l0
] & ~op1
[l0
];
1003 len
= canonize (val
, len
, prec
);
1008 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
1010 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1011 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1012 unsigned int op1len
, unsigned int prec
)
1015 int l0
= op0len
- 1;
1016 int l1
= op1len
- 1;
1017 bool need_canon
= true;
1019 unsigned int len
= MAX (op0len
, op1len
);
1022 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1040 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1056 val
[l0
] = op0
[l0
] | op1
[l0
];
1061 len
= canonize (val
, len
, prec
);
1066 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1068 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1069 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1070 unsigned int op1len
, unsigned int prec
)
1073 int l0
= op0len
- 1;
1074 int l1
= op1len
- 1;
1075 bool need_canon
= true;
1077 unsigned int len
= MAX (op0len
, op1len
);
1080 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1098 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1114 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1119 len
= canonize (val
, len
, prec
);
1124 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1126 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1127 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1128 unsigned int op1len
, unsigned int prec
)
1131 int l0
= op0len
- 1;
1132 int l1
= op1len
- 1;
1134 unsigned int len
= MAX (op0len
, op1len
);
1137 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1140 val
[l0
] = op0
[l0
] ^ op1mask
;
1147 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1150 val
[l1
] = op0mask
^ op1
[l1
];
1157 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1161 return canonize (val
, len
, prec
);
1168 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1169 whether the result overflows when OP0 and OP1 are treated as having
1170 signedness SGN. Return the number of blocks in VAL. */
1172 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1173 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1174 unsigned int op1len
, unsigned int prec
,
1175 signop sgn
, wi::overflow_type
*overflow
)
1177 unsigned HOST_WIDE_INT o0
= 0;
1178 unsigned HOST_WIDE_INT o1
= 0;
1179 unsigned HOST_WIDE_INT x
= 0;
1180 unsigned HOST_WIDE_INT carry
= 0;
1181 unsigned HOST_WIDE_INT old_carry
= 0;
1182 unsigned HOST_WIDE_INT mask0
, mask1
;
1185 unsigned int len
= MAX (op0len
, op1len
);
1186 mask0
= -top_bit_of (op0
, op0len
, prec
);
1187 mask1
= -top_bit_of (op1
, op1len
, prec
);
1188 /* Add all of the explicitly defined elements. */
1190 for (i
= 0; i
< len
; i
++)
1192 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1193 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1194 x
= o0
+ o1
+ carry
;
1197 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1200 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1202 val
[len
] = mask0
+ mask1
+ carry
;
1206 = (sgn
== UNSIGNED
&& carry
) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1210 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1213 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1214 if ((HOST_WIDE_INT
) (x
<< shift
) < 0)
1216 if (o0
> (unsigned HOST_WIDE_INT
) val
[len
- 1])
1217 *overflow
= wi::OVF_UNDERFLOW
;
1218 else if (o0
< (unsigned HOST_WIDE_INT
) val
[len
- 1])
1219 *overflow
= wi::OVF_OVERFLOW
;
1221 *overflow
= wi::OVF_NONE
;
1224 *overflow
= wi::OVF_NONE
;
1228 /* Put the MSB of X and O0 and in the top of the HWI. */
1232 *overflow
= (x
<= o0
) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1234 *overflow
= (x
< o0
) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1238 return canonize (val
, len
, prec
);
1241 /* Subroutines of the multiplication and division operations. Unpack
1242 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1243 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1244 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1246 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
, const HOST_WIDE_INT
*input
,
1247 unsigned int in_len
, unsigned int out_len
,
1248 unsigned int prec
, signop sgn
)
1252 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1253 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1258 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1259 mask
&= HALF_INT_MASK
;
1264 for (i
= 0; i
< blocks_needed
- 1; i
++)
1266 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1268 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1271 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1275 x
= sext_hwi (x
, small_prec
);
1277 x
= zext_hwi (x
, small_prec
);
1280 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1282 /* Smear the sign bit. */
1287 /* The inverse of wi_unpack. IN_LEN is the number of input
1288 blocks and PRECISION is the precision of the result. Return the
1289 number of blocks in the canonicalized result. */
1291 wi_pack (HOST_WIDE_INT
*result
,
1292 const unsigned HOST_HALF_WIDE_INT
*input
,
1293 unsigned int in_len
, unsigned int precision
)
1297 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
1299 while (i
+ 1 < in_len
)
1301 result
[j
++] = ((unsigned HOST_WIDE_INT
) input
[i
]
1302 | ((unsigned HOST_WIDE_INT
) input
[i
+ 1]
1303 << HOST_BITS_PER_HALF_WIDE_INT
));
1307 /* Handle the case where in_len is odd. For this we zero extend. */
1309 result
[j
++] = (unsigned HOST_WIDE_INT
) input
[i
];
1310 else if (j
< blocks_needed
)
1312 return canonize (result
, j
, precision
);
1315 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1318 If HIGH is not set, throw away the upper half after the check is
1319 made to see if it overflows. Unfortunately there is no better way
1320 to check for overflow than to do this. If OVERFLOW is nonnull,
1321 record in *OVERFLOW whether the result overflowed. SGN controls
1322 the signedness and is used to check overflow or if HIGH is set.
1324 NOTE: Overflow type for signed overflow is not yet implemented. */
1326 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1327 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1328 unsigned int op2len
, unsigned int prec
, signop sgn
,
1329 wi::overflow_type
*overflow
, bool high
)
1331 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1335 /* If the top level routine did not really pass in an overflow, then
1336 just make sure that we never attempt to set it. */
1337 bool needs_overflow
= (overflow
!= 0);
1339 *overflow
= wi::OVF_NONE
;
1341 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1342 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1344 /* This is a surprisingly common case, so do it first. */
1345 if (op1
== 0 || op2
== 0)
1352 if (sgn
== UNSIGNED
)
1354 /* If the inputs are single HWIs and the output has room for at
1355 least two HWIs, we can use umul_ppmm directly. */
1356 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1357 && wi::fits_uhwi_p (op1
)
1358 && wi::fits_uhwi_p (op2
))
1360 /* This case never overflows. */
1366 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1367 if (val
[1] < 0 && prec
> HOST_BITS_PER_WIDE_INT
* 2)
1372 return 1 + (val
[1] != 0 || val
[0] < 0);
1374 /* Likewise if the output is a full single HWI, except that the
1375 upper HWI of the result is only used for determining overflow.
1376 (We handle this case inline when overflow isn't needed.) */
1377 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1379 unsigned HOST_WIDE_INT upper
;
1380 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1382 /* Unsigned overflow can only be +OVERFLOW. */
1383 *overflow
= (upper
!= 0) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1391 /* Handle multiplications by 1. */
1396 val
[0] = wi::neg_p (op2
, sgn
) ? -1 : 0;
1399 for (i
= 0; i
< op2len
; i
++)
1407 val
[0] = wi::neg_p (op1
, sgn
) ? -1 : 0;
1410 for (i
= 0; i
< op1len
; i
++)
1415 /* If we need to check for overflow, we can only do half wide
1416 multiplies quickly because we need to look at the top bits to
1417 check for the overflow. */
1418 if ((high
|| needs_overflow
)
1419 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1421 unsigned HOST_WIDE_INT r
;
1425 o0
= op1
.to_shwi ();
1426 o1
= op2
.to_shwi ();
1430 o0
= op1
.to_uhwi ();
1431 o1
= op2
.to_uhwi ();
1439 if ((HOST_WIDE_INT
) r
!= sext_hwi (r
, prec
))
1440 /* FIXME: Signed overflow type is not implemented yet. */
1441 *overflow
= OVF_UNKNOWN
;
1445 if ((r
>> prec
) != 0)
1446 /* Unsigned overflow can only be +OVERFLOW. */
1447 *overflow
= OVF_OVERFLOW
;
1450 val
[0] = high
? r
>> prec
: r
;
1454 /* The sizes here are scaled to support a 2x WIDE_INT_MAX_INL_PRECISION by 2x
1455 WIDE_INT_MAX_INL_PRECISION yielding a 4x WIDE_INT_MAX_INL_PRECISION
1458 unsigned HOST_HALF_WIDE_INT
1459 ubuf
[4 * WIDE_INT_MAX_INL_PRECISION
/ HOST_BITS_PER_HALF_WIDE_INT
];
1460 unsigned HOST_HALF_WIDE_INT
1461 vbuf
[4 * WIDE_INT_MAX_INL_PRECISION
/ HOST_BITS_PER_HALF_WIDE_INT
];
1462 /* The '2' in 'R' is because we are internally doing a full
1464 unsigned HOST_HALF_WIDE_INT
1465 rbuf
[2 * 4 * WIDE_INT_MAX_INL_PRECISION
/ HOST_BITS_PER_HALF_WIDE_INT
];
1466 const HOST_WIDE_INT mask
1467 = (HOST_WIDE_INT_1
<< HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1468 unsigned HOST_HALF_WIDE_INT
*u
= ubuf
;
1469 unsigned HOST_HALF_WIDE_INT
*v
= vbuf
;
1470 unsigned HOST_HALF_WIDE_INT
*r
= rbuf
;
1473 prec
= MIN ((op1len
+ op2len
+ 1) * HOST_BITS_PER_WIDE_INT
, prec
);
1474 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1475 unsigned int half_blocks_needed
= blocks_needed
* 2;
1476 if (UNLIKELY (prec
> WIDE_INT_MAX_INL_PRECISION
))
1478 unsigned HOST_HALF_WIDE_INT
*buf
1479 = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT
, 4 * half_blocks_needed
);
1481 v
= u
+ half_blocks_needed
;
1482 r
= v
+ half_blocks_needed
;
1485 /* We do unsigned mul and then correct it. */
1486 wi_unpack (u
, op1val
, op1len
, half_blocks_needed
, prec
, UNSIGNED
);
1487 wi_unpack (v
, op2val
, op2len
, half_blocks_needed
, prec
, UNSIGNED
);
1489 /* The 2 is for a full mult. */
1490 memset (r
, 0, half_blocks_needed
* 2
1491 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1493 for (j
= 0; j
< half_blocks_needed
; j
++)
1496 for (i
= 0; i
< half_blocks_needed
; i
++)
1498 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1500 r
[i
+ j
] = t
& HALF_INT_MASK
;
1501 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1503 r
[j
+ half_blocks_needed
] = k
;
1507 if ((high
|| needs_overflow
) && (shift
= prec
% HOST_BITS_PER_WIDE_INT
) != 0)
1509 /* The high or needs_overflow code assumes that the high bits
1510 only appear from r[half_blocks_needed] up to
1511 r[half_blocks_needed * 2 - 1]. If prec is not a multiple
1512 of HOST_BITS_PER_WIDE_INT, shift the bits above prec up
1513 to make that code simple. */
1514 if (shift
== HOST_BITS_PER_HALF_WIDE_INT
)
1515 memmove (&r
[half_blocks_needed
], &r
[half_blocks_needed
- 1],
1516 sizeof (r
[0]) * half_blocks_needed
);
1519 unsigned int skip
= shift
< HOST_BITS_PER_HALF_WIDE_INT
;
1521 shift
-= HOST_BITS_PER_HALF_WIDE_INT
;
1522 for (i
= 2 * half_blocks_needed
- 1; i
>= half_blocks_needed
; i
--)
1523 r
[i
] = ((r
[i
- skip
] << (-shift
% HOST_BITS_PER_HALF_WIDE_INT
))
1524 | (r
[i
- skip
- 1] >> shift
));
1528 /* We did unsigned math above. For signed we must adjust the
1529 product (assuming we need to see that). */
1530 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1532 unsigned HOST_WIDE_INT b
;
1533 if (wi::neg_p (op1
))
1536 for (i
= 0; i
< half_blocks_needed
; i
++)
1538 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1539 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1540 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1541 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1544 if (wi::neg_p (op2
))
1547 for (i
= 0; i
< half_blocks_needed
; i
++)
1549 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1550 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1551 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1552 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1561 /* For unsigned, overflow is true if any of the top bits are set.
1562 For signed, overflow is true if any of the top bits are not equal
1564 if (sgn
== UNSIGNED
)
1568 top
= r
[half_blocks_needed
- 1
1569 - ((-prec
% HOST_BITS_PER_WIDE_INT
)
1570 >= HOST_BITS_PER_HALF_WIDE_INT
)];
1571 top
= SIGN_MASK (((unsigned HOST_WIDE_INT
) top
)
1572 << (HOST_BITS_PER_WIDE_INT
/ 2
1573 + (-prec
% HOST_BITS_PER_HALF_WIDE_INT
)));
1577 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1578 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1579 /* FIXME: Signed overflow type is not implemented yet. */
1580 *overflow
= (sgn
== UNSIGNED
) ? wi::OVF_OVERFLOW
: wi::OVF_UNKNOWN
;
1583 int r_offset
= high
? half_blocks_needed
: 0;
1584 return wi_pack (val
, &r
[r_offset
], half_blocks_needed
, prec
);
1587 /* Compute the population count of X. */
1589 wi::popcount (const wide_int_ref
&x
)
1594 /* The high order block is special if it is the last block and the
1595 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1596 have to clear out any ones above the precision before doing
1597 popcount on this block. */
1598 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1599 unsigned int stop
= x
.len
;
1602 count
= popcount_hwi (x
.uhigh () << -count
);
1607 if (x
.sign_mask () >= 0)
1611 for (i
= 0; i
< stop
; ++i
)
1612 count
+= popcount_hwi (x
.val
[i
]);
1617 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1618 whether the result overflows when OP0 and OP1 are treated as having
1619 signedness SGN. Return the number of blocks in VAL. */
1621 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1622 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1623 unsigned int op1len
, unsigned int prec
,
1624 signop sgn
, wi::overflow_type
*overflow
)
1626 unsigned HOST_WIDE_INT o0
= 0;
1627 unsigned HOST_WIDE_INT o1
= 0;
1628 unsigned HOST_WIDE_INT x
= 0;
1629 /* We implement subtraction as an in place negate and add. Negation
1630 is just inversion and add 1, so we can do the add of 1 by just
1631 starting the borrow in of the first element at 1. */
1632 unsigned HOST_WIDE_INT borrow
= 0;
1633 unsigned HOST_WIDE_INT old_borrow
= 0;
1635 unsigned HOST_WIDE_INT mask0
, mask1
;
1638 unsigned int len
= MAX (op0len
, op1len
);
1639 mask0
= -top_bit_of (op0
, op0len
, prec
);
1640 mask1
= -top_bit_of (op1
, op1len
, prec
);
1642 /* Subtract all of the explicitly defined elements. */
1643 for (i
= 0; i
< len
; i
++)
1645 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1646 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1647 x
= o0
- o1
- borrow
;
1649 old_borrow
= borrow
;
1650 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1653 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1655 val
[len
] = mask0
- mask1
- borrow
;
1658 *overflow
= (sgn
== UNSIGNED
&& borrow
) ? OVF_UNDERFLOW
: OVF_NONE
;
1662 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1665 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1666 if ((HOST_WIDE_INT
) (x
<< shift
) < 0)
1669 *overflow
= OVF_UNDERFLOW
;
1671 *overflow
= OVF_OVERFLOW
;
1673 *overflow
= OVF_NONE
;
1676 *overflow
= OVF_NONE
;
1680 /* Put the MSB of X and O0 and in the top of the HWI. */
1684 *overflow
= (x
>= o0
) ? OVF_UNDERFLOW
: OVF_NONE
;
1686 *overflow
= (x
> o0
) ? OVF_UNDERFLOW
: OVF_NONE
;
1690 return canonize (val
, len
, prec
);
1698 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1699 algorithm is a small modification of the algorithm in Hacker's
1700 Delight by Warren, which itself is a small modification of Knuth's
1701 algorithm. M is the number of significant elements of U however
1702 there needs to be at least one extra element of B_DIVIDEND
1703 allocated, N is the number of elements of B_DIVISOR.
1704 Return new value for N. */
1706 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1707 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1708 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1709 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1712 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1713 HOST_WIDE_INT and stored in the lower bits of each word. This
1714 algorithm should work properly on both 32 and 64 bit
1716 unsigned HOST_WIDE_INT b
= HOST_WIDE_INT_1U
<< HOST_BITS_PER_HALF_WIDE_INT
;
1717 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1718 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1719 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1723 /* Single digit divisor. */
1727 for (j
= m
- 1; j
>= 0; j
--)
1729 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1730 k
= ((k
* b
+ b_dividend
[j
])
1731 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1732 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1738 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1742 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1743 algorithm, we can overwrite b_dividend and b_divisor, so we do
1745 for (i
= n
- 1; i
> 0; i
--)
1746 b_divisor
[i
] = (b_divisor
[i
] << s
)
1747 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1748 b_divisor
[0] = b_divisor
[0] << s
;
1750 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1751 for (i
= m
- 1; i
> 0; i
--)
1752 b_dividend
[i
] = (b_dividend
[i
] << s
)
1753 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1754 b_dividend
[0] = b_dividend
[0] << s
;
1758 for (j
= m
- n
; j
>= 0; j
--)
1760 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1761 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1763 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1766 rhat
+= b_divisor
[n
-1];
1771 /* Multiply and subtract. */
1773 for (i
= 0; i
< n
; i
++)
1775 p
= qhat
* b_divisor
[i
];
1776 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1777 b_dividend
[i
+ j
] = t
;
1778 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1779 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1781 t
= b_dividend
[j
+n
] - k
;
1782 b_dividend
[j
+n
] = t
;
1784 b_quotient
[j
] = qhat
;
1789 for (i
= 0; i
< n
; i
++)
1791 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1792 b_dividend
[i
+j
] = t
;
1793 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1795 b_dividend
[j
+n
] += k
;
1798 /* If N > M, the main loop was skipped, quotient will be 0 and
1799 we can't copy more than M half-limbs into the remainder, as they
1800 aren't present in b_dividend (which has . */
1803 for (i
= 0; i
< n
; i
++)
1804 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1805 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1807 for (i
= 0; i
< n
; i
++)
1808 b_remainder
[i
] = b_dividend
[i
];
1813 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1814 the result. If QUOTIENT is nonnull, store the value of the quotient
1815 there and return the number of blocks in it. The return value is
1816 not defined otherwise. If REMAINDER is nonnull, store the value
1817 of the remainder there and store the number of blocks in
1818 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1819 the division overflowed. */
1821 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1822 HOST_WIDE_INT
*remainder
,
1823 const HOST_WIDE_INT
*dividend_val
,
1824 unsigned int dividend_len
, unsigned int dividend_prec
,
1825 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1826 unsigned int divisor_prec
, signop sgn
,
1827 wi::overflow_type
*oflow
)
1830 bool dividend_neg
= false;
1831 bool divisor_neg
= false;
1832 bool overflow
= false;
1833 wide_int neg_dividend
, neg_divisor
;
1835 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1837 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1842 /* The smallest signed number / -1 causes overflow. The dividend_len
1843 check is for speed rather than correctness. */
1845 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1847 && wi::only_sign_bit_p (dividend
))
1850 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1851 (signed min / -1) has the same representation as the orignal dividend.
1852 We have traditionally made division by zero act as division by one,
1853 so there too we use the original dividend. */
1862 *oflow
= OVF_OVERFLOW
;
1864 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1865 quotient
[i
] = dividend_val
[i
];
1866 return dividend_len
;
1872 /* Do it on the host if you can. */
1874 && wi::fits_shwi_p (dividend
)
1875 && wi::fits_shwi_p (divisor
))
1877 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1878 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1880 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1882 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1885 quotient
[0] = HOST_WIDE_INT_MIN
;
1898 quotient
[0] = o0
/ o1
;
1901 remainder
[0] = o0
% o1
;
1909 && wi::fits_uhwi_p (dividend
)
1910 && wi::fits_uhwi_p (divisor
))
1912 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1913 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1914 unsigned int quotient_len
= 1;
1918 quotient
[0] = o0
/ o1
;
1919 quotient_len
= canonize_uhwi (quotient
, dividend_prec
);
1923 remainder
[0] = o0
% o1
;
1924 *remainder_len
= canonize_uhwi (remainder
, dividend_prec
);
1926 return quotient_len
;
1929 /* Make the divisor and dividend positive and remember what we
1933 if (wi::neg_p (dividend
))
1935 neg_dividend
= -dividend
;
1936 dividend
= neg_dividend
;
1937 dividend_neg
= true;
1939 if (wi::neg_p (divisor
))
1941 neg_divisor
= -divisor
;
1942 divisor
= neg_divisor
;
1947 unsigned HOST_HALF_WIDE_INT
1948 b_quotient_buf
[4 * WIDE_INT_MAX_INL_PRECISION
1949 / HOST_BITS_PER_HALF_WIDE_INT
];
1950 unsigned HOST_HALF_WIDE_INT
1951 b_remainder_buf
[4 * WIDE_INT_MAX_INL_PRECISION
1952 / HOST_BITS_PER_HALF_WIDE_INT
];
1953 unsigned HOST_HALF_WIDE_INT
1954 b_dividend_buf
[(4 * WIDE_INT_MAX_INL_PRECISION
1955 / HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1956 unsigned HOST_HALF_WIDE_INT
1957 b_divisor_buf
[4 * WIDE_INT_MAX_INL_PRECISION
1958 / HOST_BITS_PER_HALF_WIDE_INT
];
1959 unsigned HOST_HALF_WIDE_INT
*b_quotient
= b_quotient_buf
;
1960 unsigned HOST_HALF_WIDE_INT
*b_remainder
= b_remainder_buf
;
1961 unsigned HOST_HALF_WIDE_INT
*b_dividend
= b_dividend_buf
;
1962 unsigned HOST_HALF_WIDE_INT
*b_divisor
= b_divisor_buf
;
1964 if (sgn
== SIGNED
|| dividend_val
[dividend_len
- 1] >= 0)
1965 dividend_prec
= MIN ((dividend_len
+ 1) * HOST_BITS_PER_WIDE_INT
,
1967 if (sgn
== SIGNED
|| divisor_val
[divisor_len
- 1] >= 0)
1968 divisor_prec
= MIN (divisor_len
* HOST_BITS_PER_WIDE_INT
, divisor_prec
);
1969 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1970 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1971 if (UNLIKELY (dividend_prec
> WIDE_INT_MAX_INL_PRECISION
)
1972 || UNLIKELY (divisor_prec
> WIDE_INT_MAX_INL_PRECISION
))
1974 unsigned HOST_HALF_WIDE_INT
*buf
1975 = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT
,
1976 3 * dividend_blocks_needed
+ 1
1977 + divisor_blocks_needed
);
1979 b_remainder
= b_quotient
+ dividend_blocks_needed
;
1980 b_dividend
= b_remainder
+ dividend_blocks_needed
;
1981 b_divisor
= b_dividend
+ dividend_blocks_needed
+ 1;
1982 memset (b_quotient
, 0,
1983 dividend_blocks_needed
* sizeof (HOST_HALF_WIDE_INT
));
1985 wi_unpack (b_dividend
, dividend
.get_val (), dividend
.get_len (),
1986 dividend_blocks_needed
, dividend_prec
, UNSIGNED
);
1987 wi_unpack (b_divisor
, divisor
.get_val (), divisor
.get_len (),
1988 divisor_blocks_needed
, divisor_prec
, UNSIGNED
);
1990 m
= dividend_blocks_needed
;
1992 while (m
> 1 && b_dividend
[m
- 1] == 0)
1995 n
= divisor_blocks_needed
;
1996 while (n
> 1 && b_divisor
[n
- 1] == 0)
1999 if (b_quotient
== b_quotient_buf
)
2000 memset (b_quotient_buf
, 0, sizeof (b_quotient_buf
));
2002 n
= divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
2004 unsigned int quotient_len
= 0;
2007 quotient_len
= wi_pack (quotient
, b_quotient
, m
, dividend_prec
);
2008 /* The quotient is neg if exactly one of the divisor or dividend is
2010 if (dividend_neg
!= divisor_neg
)
2011 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
2012 quotient_len
, dividend_prec
,
2018 *remainder_len
= wi_pack (remainder
, b_remainder
, n
, dividend_prec
);
2019 /* The remainder is always the same sign as the dividend. */
2021 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
2022 *remainder_len
, dividend_prec
,
2026 return quotient_len
;
2030 * Shifting, rotating and extraction.
2033 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
2034 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
2036 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
2037 unsigned int xlen
, unsigned int precision
,
2040 /* Split the shift into a whole-block shift and a subblock shift. */
2041 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
2042 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
2044 /* The whole-block shift fills with zeros. */
2045 unsigned int len
= BLOCKS_NEEDED (precision
);
2046 len
= MIN (xlen
+ skip
+ 1, len
);
2047 for (unsigned int i
= 0; i
< skip
; ++i
)
2050 /* It's easier to handle the simple block case specially. */
2051 if (small_shift
== 0)
2052 for (unsigned int i
= skip
; i
< len
; ++i
)
2053 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
2056 /* The first unfilled output block is a left shift of the first
2057 block in XVAL. The other output blocks contain bits from two
2058 consecutive input blocks. */
2059 unsigned HOST_WIDE_INT carry
= 0;
2060 for (unsigned int i
= skip
; i
< len
; ++i
)
2062 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
2063 val
[i
] = (x
<< small_shift
) | carry
;
2064 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
2067 return canonize (val
, len
, precision
);
2070 /* Right shift XVAL by SHIFT and store the result in VAL. LEN is the
2071 number of blocks in VAL. The input has XPRECISION bits and the
2072 output has XPRECISION - SHIFT bits. */
2074 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
2075 unsigned int xlen
, unsigned int shift
, unsigned int len
)
2077 /* Split the shift into a whole-block shift and a subblock shift. */
2078 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
2079 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
2081 /* It's easier to handle the simple block case specially. */
2082 if (small_shift
== 0)
2083 for (unsigned int i
= 0; i
< len
; ++i
)
2084 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
2087 /* Each output block but the last is a combination of two input blocks.
2088 The last block is a right shift of the last block in XVAL. */
2089 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
2090 for (unsigned int i
= 0; i
< len
; ++i
)
2092 val
[i
] = curr
>> small_shift
;
2093 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
2094 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
2099 /* Logically right shift XVAL by SHIFT and store the result in VAL.
2100 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2101 VAL has PRECISION bits. */
2103 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
2104 unsigned int xlen
, unsigned int xprecision
,
2105 unsigned int precision
, unsigned int shift
)
2107 /* Work out how many blocks are needed to store the significant bits
2108 (excluding the upper zeros or signs). */
2109 unsigned int blocks_needed
= BLOCKS_NEEDED (xprecision
- shift
);
2110 unsigned int len
= blocks_needed
;
2111 if (len
> xlen
&& xval
[xlen
- 1] >= 0)
2114 rshift_large_common (val
, xval
, xlen
, shift
, len
);
2116 /* The value we just created has precision XPRECISION - SHIFT.
2117 Zero-extend it to wider precisions. */
2118 if (precision
> xprecision
- shift
&& len
== blocks_needed
)
2120 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
2122 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
2123 else if (val
[len
- 1] < 0)
2125 /* Add a new block with a zero. */
2130 return canonize (val
, len
, precision
);
2133 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
2134 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2135 VAL has PRECISION bits. */
2137 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
2138 unsigned int xlen
, unsigned int xprecision
,
2139 unsigned int precision
, unsigned int shift
)
2141 /* Work out how many blocks are needed to store the significant bits
2142 (excluding the upper zeros or signs). */
2143 unsigned int blocks_needed
= BLOCKS_NEEDED (xprecision
- shift
);
2144 unsigned int len
= MIN (xlen
, blocks_needed
);
2146 rshift_large_common (val
, xval
, xlen
, shift
, len
);
2148 /* The value we just created has precision XPRECISION - SHIFT.
2149 Sign-extend it to wider types. */
2150 if (precision
> xprecision
- shift
&& len
== blocks_needed
)
2152 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
2154 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
2156 return canonize (val
, len
, precision
);
2159 /* Return the number of leading (upper) zeros in X. */
2161 wi::clz (const wide_int_ref
&x
)
2163 if (x
.sign_mask () < 0)
2164 /* The upper bit is set, so there are no leading zeros. */
2167 /* Calculate how many bits there above the highest represented block. */
2168 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2170 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2172 /* The upper -COUNT bits of HIGH are not part of the value.
2174 high
= (high
<< -count
) >> -count
;
2176 /* We don't need to look below HIGH. Either HIGH is nonzero,
2177 or the top bit of the block below is nonzero; clz_hwi is
2178 HOST_BITS_PER_WIDE_INT in the latter case. */
2179 return count
+ clz_hwi (high
);
2182 /* Return the number of redundant sign bits in X. (That is, the number
2183 of bits immediately below the sign bit that have the same value as
2186 wi::clrsb (const wide_int_ref
&x
)
2188 /* Calculate how many bits there above the highest represented block. */
2189 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2191 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2192 unsigned HOST_WIDE_INT mask
= -1;
2195 /* The upper -COUNT bits of HIGH are not part of the value.
2196 Clear them from both MASK and HIGH. */
2201 /* If the top bit is 1, count the number of leading 1s. If the top
2202 bit is zero, count the number of leading zeros. */
2203 if (high
> mask
/ 2)
2206 /* There are no sign bits below the top block, so we don't need to look
2207 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2209 return count
+ clz_hwi (high
) - 1;
2212 /* Return the number of trailing (lower) zeros in X. */
2214 wi::ctz (const wide_int_ref
&x
)
2216 if (x
.len
== 1 && x
.ulow () == 0)
2219 /* Having dealt with the zero case, there must be a block with a
2220 nonzero bit. We don't care about the bits above the first 1. */
2222 while (x
.val
[i
] == 0)
2224 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2227 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2230 wi::exact_log2 (const wide_int_ref
&x
)
2232 /* Reject cases where there are implicit -1 blocks above HIGH. */
2233 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2236 /* Set CRUX to the index of the entry that should be nonzero.
2237 If the top block is zero then the next lowest block (if any)
2238 must have the high bit set. */
2239 unsigned int crux
= x
.len
- 1;
2240 if (crux
> 0 && x
.val
[crux
] == 0)
2243 /* Check that all lower blocks are zero. */
2244 for (unsigned int i
= 0; i
< crux
; ++i
)
2248 /* Get a zero-extended form of block CRUX. */
2249 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2250 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2251 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2253 /* Now it's down to whether HWI is a power of 2. */
2254 int res
= ::exact_log2 (hwi
);
2256 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2260 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2262 wi::floor_log2 (const wide_int_ref
&x
)
2264 return x
.precision
- 1 - clz (x
);
2267 /* Return the index of the first (lowest) set bit in X, counting from 1.
2268 Return 0 if X is 0. */
2270 wi::ffs (const wide_int_ref
&x
)
2272 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2275 /* Return true if sign-extending X to have precision PRECISION would give
2276 the minimum signed value at that precision. */
2278 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2280 return ctz (x
) + 1 == int (precision
);
2283 /* Return true if X represents the minimum signed value. */
2285 wi::only_sign_bit_p (const wide_int_ref
&x
)
2287 return only_sign_bit_p (x
, x
.precision
);
2290 /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2291 down to the previous value that has no bits set outside MASK.
2292 This rounding wraps for signed values if VAL is negative and
2293 the top bit of MASK is clear.
2295 For example, round_down_for_mask (6, 0xf1) would give 1 and
2296 round_down_for_mask (24, 0xf1) would give 17. */
2299 wi::round_down_for_mask (const wide_int
&val
, const wide_int
&mask
)
2301 /* Get the bits in VAL that are outside the mask. */
2302 wide_int extra_bits
= wi::bit_and_not (val
, mask
);
2303 if (extra_bits
== 0)
2306 /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
2308 unsigned int precision
= val
.get_precision ();
2309 wide_int lower_mask
= wi::mask (precision
- wi::clz (extra_bits
),
2312 /* Clear the bits that aren't in MASK, but ensure that all bits
2313 in MASK below the top cleared bit are set. */
2314 return (val
& mask
) | (mask
& lower_mask
);
2317 /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2318 up to the next value that has no bits set outside MASK. The rounding
2319 wraps if there are no suitable values greater than VAL.
2321 For example, round_up_for_mask (6, 0xf1) would give 16 and
2322 round_up_for_mask (24, 0xf1) would give 32. */
2325 wi::round_up_for_mask (const wide_int
&val
, const wide_int
&mask
)
2327 /* Get the bits in VAL that are outside the mask. */
2328 wide_int extra_bits
= wi::bit_and_not (val
, mask
);
2329 if (extra_bits
== 0)
2332 /* Get a mask that is all 1s above the top bit in EXTRA_BITS. */
2333 unsigned int precision
= val
.get_precision ();
2334 wide_int upper_mask
= wi::mask (precision
- wi::clz (extra_bits
),
2337 /* Get the bits of the mask that are above the top bit in EXTRA_BITS. */
2340 /* Conceptually we need to:
2342 - clear bits of VAL outside UPPER_MASK
2343 - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
2344 - propagate the carry through the bits of VAL in UPPER_MASK
2346 If (~VAL & UPPER_MASK) is nonzero, the carry eventually
2347 reaches that bit and the process leaves all lower bits clear.
2348 If (~VAL & UPPER_MASK) is zero then the result is also zero. */
2349 wide_int tmp
= wi::bit_and_not (upper_mask
, val
);
2351 return (val
| tmp
) & -tmp
;
2354 /* Compute the modular multiplicative inverse of A modulo B
2355 using extended Euclid's algorithm. Assumes A and B are coprime,
2356 and that A and B have the same precision. */
2358 wi::mod_inv (const wide_int
&a
, const wide_int
&b
)
2360 /* Verify the assumption. */
2361 gcc_checking_assert (wi::eq_p (wi::gcd (a
, b
), 1));
2363 unsigned int p
= a
.get_precision () + 1;
2364 gcc_checking_assert (b
.get_precision () + 1 == p
);
2365 wide_int c
= wide_int::from (a
, p
, UNSIGNED
);
2366 wide_int d
= wide_int::from (b
, p
, UNSIGNED
);
2367 wide_int x0
= wide_int::from (0, p
, UNSIGNED
);
2368 wide_int x1
= wide_int::from (1, p
, UNSIGNED
);
2370 if (wi::eq_p (b
, 1))
2371 return wide_int::from (1, p
, UNSIGNED
);
2373 while (wi::gt_p (c
, 1, UNSIGNED
))
2376 wide_int q
= wi::divmod_trunc (c
, d
, UNSIGNED
, &d
);
2379 x0
= wi::sub (x1
, wi::mul (q
, x0
));
2382 if (wi::lt_p (x1
, 0, SIGNED
))
2388 * Private utilities.
2391 void gt_ggc_mx (widest_int
*) { }
2392 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2393 void gt_pch_nx (widest_int
*) { }
2395 template void wide_int::dump () const;
2396 template void generic_wide_int
<wide_int_ref_storage
<false> >::dump () const;
2397 template void generic_wide_int
<wide_int_ref_storage
<true> >::dump () const;
2398 template void offset_int::dump () const;
2399 template void widest_int::dump () const;
2401 /* We could add all the above ::dump variants here, but wide_int and
2402 widest_int should handle the common cases. Besides, you can always
2403 call the dump method directly. */
2406 debug (const wide_int
&ref
)
2412 debug (const wide_int
*ptr
)
2417 fprintf (stderr
, "<nil>\n");
2421 debug (const widest_int
&ref
)
2427 debug (const widest_int
*ptr
)
2432 fprintf (stderr
, "<nil>\n");
2437 namespace selftest
{
2439 /* Selftests for wide ints. We run these multiple times, once per type. */
2441 /* Helper function for building a test value. */
2443 template <class VALUE_TYPE
>
2447 /* Specializations of the fixture for each wide-int type. */
2449 /* Specialization for VALUE_TYPE == wide_int. */
2455 return wi::shwi (i
, 32);
2458 /* Specialization for VALUE_TYPE == offset_int. */
2464 return offset_int (i
);
2467 /* Specialization for VALUE_TYPE == widest_int. */
2473 return widest_int (i
);
2476 /* Verify that print_dec (WI, ..., SGN) gives the expected string
2477 representation (using base 10). */
2480 assert_deceq (const char *expected
, const wide_int_ref
&wi
, signop sgn
)
2482 char buf
[WIDE_INT_PRINT_BUFFER_SIZE
], *p
= buf
;
2484 if (print_dec_buf_size (wi
, sgn
, &len
))
2485 p
= XALLOCAVEC (char, len
);
2486 print_dec (wi
, p
, sgn
);
2487 ASSERT_STREQ (expected
, p
);
2490 /* Likewise for base 16. */
2493 assert_hexeq (const char *expected
, const wide_int_ref
&wi
)
2495 char buf
[WIDE_INT_PRINT_BUFFER_SIZE
], *p
= buf
;
2497 if (print_hex_buf_size (wi
, &len
))
2498 p
= XALLOCAVEC (char, len
);
2500 ASSERT_STREQ (expected
, p
);
2505 /* Verify that print_dec and print_hex work for VALUE_TYPE. */
2507 template <class VALUE_TYPE
>
2511 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (42);
2512 assert_deceq ("42", a
, SIGNED
);
2513 assert_hexeq ("0x2a", a
);
2514 assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
2515 assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
2516 assert_hexeq ("0xffffffffffffffff", wi::mask
<widest_int
> (64, false));
2517 if (WIDE_INT_MAX_INL_PRECISION
> 128)
2519 assert_hexeq ("0x20000000000000000fffffffffffffffe",
2520 wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
2521 assert_hexeq ("0x200000000000004000123456789abcdef",
2522 wi::lshift (1, 129) + wi::lshift (1, 74)
2523 + wi::lshift (0x1234567, 32) + 0x89abcdef);
2527 /* Verify that various operations work correctly for VALUE_TYPE,
2528 unary and binary, using both function syntax, and
2529 overloaded-operators. */
2531 template <class VALUE_TYPE
>
2535 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (7);
2536 VALUE_TYPE b
= from_int
<VALUE_TYPE
> (3);
2538 /* Using functions. */
2539 assert_deceq ("-7", wi::neg (a
), SIGNED
);
2540 assert_deceq ("10", wi::add (a
, b
), SIGNED
);
2541 assert_deceq ("4", wi::sub (a
, b
), SIGNED
);
2542 assert_deceq ("-4", wi::sub (b
, a
), SIGNED
);
2543 assert_deceq ("21", wi::mul (a
, b
), SIGNED
);
2545 /* Using operators. */
2546 assert_deceq ("-7", -a
, SIGNED
);
2547 assert_deceq ("10", a
+ b
, SIGNED
);
2548 assert_deceq ("4", a
- b
, SIGNED
);
2549 assert_deceq ("-4", b
- a
, SIGNED
);
2550 assert_deceq ("21", a
* b
, SIGNED
);
2553 /* Verify that various comparisons work correctly for VALUE_TYPE. */
2555 template <class VALUE_TYPE
>
2559 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (7);
2560 VALUE_TYPE b
= from_int
<VALUE_TYPE
> (3);
2563 ASSERT_TRUE (wi::eq_p (a
, a
));
2564 ASSERT_FALSE (wi::eq_p (a
, b
));
2567 ASSERT_TRUE (wi::ne_p (a
, b
));
2568 ASSERT_FALSE (wi::ne_p (a
, a
));
2571 ASSERT_FALSE (wi::lts_p (a
, a
));
2572 ASSERT_FALSE (wi::lts_p (a
, b
));
2573 ASSERT_TRUE (wi::lts_p (b
, a
));
2576 ASSERT_TRUE (wi::les_p (a
, a
));
2577 ASSERT_FALSE (wi::les_p (a
, b
));
2578 ASSERT_TRUE (wi::les_p (b
, a
));
2581 ASSERT_FALSE (wi::gts_p (a
, a
));
2582 ASSERT_TRUE (wi::gts_p (a
, b
));
2583 ASSERT_FALSE (wi::gts_p (b
, a
));
2586 ASSERT_TRUE (wi::ges_p (a
, a
));
2587 ASSERT_TRUE (wi::ges_p (a
, b
));
2588 ASSERT_FALSE (wi::ges_p (b
, a
));
2591 ASSERT_EQ (-1, wi::cmps (b
, a
));
2592 ASSERT_EQ (0, wi::cmps (a
, a
));
2593 ASSERT_EQ (1, wi::cmps (a
, b
));
2596 /* Run all of the selftests, using the given VALUE_TYPE. */
2598 template <class VALUE_TYPE
>
2599 static void run_all_wide_int_tests ()
2601 test_printing
<VALUE_TYPE
> ();
2602 test_ops
<VALUE_TYPE
> ();
2603 test_comparisons
<VALUE_TYPE
> ();
2606 /* Test overflow conditions. */
2611 static int precs
[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
2612 static int offsets
[] = { 16, 1, 0 };
2613 for (unsigned int i
= 0; i
< ARRAY_SIZE (precs
); ++i
)
2614 for (unsigned int j
= 0; j
< ARRAY_SIZE (offsets
); ++j
)
2616 int prec
= precs
[i
];
2617 int offset
= offsets
[j
];
2618 wi::overflow_type overflow
;
2621 sum
= wi::add (wi::max_value (prec
, UNSIGNED
) - offset
, 1,
2622 UNSIGNED
, &overflow
);
2623 ASSERT_EQ (sum
, -offset
);
2624 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
== 0);
2626 sum
= wi::add (1, wi::max_value (prec
, UNSIGNED
) - offset
,
2627 UNSIGNED
, &overflow
);
2628 ASSERT_EQ (sum
, -offset
);
2629 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
== 0);
2631 diff
= wi::sub (wi::max_value (prec
, UNSIGNED
) - offset
,
2632 wi::max_value (prec
, UNSIGNED
),
2633 UNSIGNED
, &overflow
);
2634 ASSERT_EQ (diff
, -offset
);
2635 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
!= 0);
2637 diff
= wi::sub (wi::max_value (prec
, UNSIGNED
) - offset
,
2638 wi::max_value (prec
, UNSIGNED
) - 1,
2639 UNSIGNED
, &overflow
);
2640 ASSERT_EQ (diff
, 1 - offset
);
2641 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
> 1);
2645 /* Test the round_{down,up}_for_mask functions. */
2648 test_round_for_mask ()
2650 unsigned int prec
= 18;
2651 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec
),
2652 wi::shwi (0xf1, prec
)));
2653 ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec
),
2654 wi::shwi (0xf1, prec
)));
2656 ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec
),
2657 wi::shwi (0xf1, prec
)));
2658 ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec
),
2659 wi::shwi (0xf1, prec
)));
2661 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec
),
2662 wi::shwi (0xf1, prec
)));
2663 ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec
),
2664 wi::shwi (0xf1, prec
)));
2666 ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec
),
2667 wi::shwi (0x111, prec
)));
2668 ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec
),
2669 wi::shwi (0x111, prec
)));
2671 ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec
),
2672 wi::shwi (0xfc, prec
)));
2673 ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec
),
2674 wi::shwi (0xfc, prec
)));
2676 ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec
),
2677 wi::shwi (0xabc, prec
)));
2678 ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec
),
2679 wi::shwi (0xabc, prec
)));
2681 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec
),
2682 wi::shwi (0xabc, prec
)));
2683 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec
),
2684 wi::shwi (0xabc, prec
)));
2686 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec
),
2687 wi::shwi (0xabc, prec
)));
2688 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec
),
2689 wi::shwi (0xabc, prec
)));
2692 /* Run all of the selftests within this file, for all value types. */
2695 wide_int_cc_tests ()
2697 run_all_wide_int_tests
<wide_int
> ();
2698 run_all_wide_int_tests
<offset_int
> ();
2699 run_all_wide_int_tests
<widest_int
> ();
2701 test_round_for_mask ();
2702 ASSERT_EQ (wi::mask (128, false, 128),
2703 wi::shifted_mask (0, 128, false, 128));
2704 ASSERT_EQ (wi::mask (128, true, 128),
2705 wi::shifted_mask (0, 128, true, 128));
2706 ASSERT_EQ (wi::multiple_of_p (from_int
<widest_int
> (1),
2707 from_int
<widest_int
> (-128), UNSIGNED
),
2711 } // namespace selftest
2712 #endif /* CHECKING_P */