1 /* Operations with very long integers.
2 Copyright (C) 2012-2022 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #define HOST_BITS_PER_HALF_WIDE_INT 32
30 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
31 # define HOST_HALF_WIDE_INT long
32 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
33 # define HOST_HALF_WIDE_INT int
35 #error Please add support for HOST_HALF_WIDE_INT
38 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
39 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
40 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
41 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
42 typedef unsigned HOST_WIDE_INT UWtype
;
43 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
44 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
45 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
47 typedef unsigned int UDWtype
__attribute__ ((mode (DI
)));
49 typedef unsigned int UDWtype
__attribute__ ((mode (TI
)));
54 static const HOST_WIDE_INT zeros
[WIDE_INT_MAX_ELTS
] = {};
60 /* Quantities to deal with values that hold half of a wide int. Used
61 in multiply and divide. */
62 #define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
64 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
65 #define BLOCKS_NEEDED(PREC) \
66 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
67 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
69 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
70 based on the top existing bit of VAL. */
72 static unsigned HOST_WIDE_INT
73 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
75 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? HOST_WIDE_INT_M1
: 0;
78 /* Convert the integer in VAL to canonical form, returning its new length.
79 LEN is the number of blocks currently in VAL and PRECISION is the number
80 of bits in the integer it represents.
82 This function only changes the representation, not the value. */
84 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
86 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
90 if (len
> blocks_needed
)
97 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
98 val
[len
- 1] = top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
99 if (top
!= 0 && top
!= (HOST_WIDE_INT
)-1)
102 /* At this point we know that the top is either 0 or -1. Find the
103 first block that is not a copy of this. */
104 for (i
= len
- 2; i
>= 0; i
--)
106 HOST_WIDE_INT x
= val
[i
];
109 if (SIGN_MASK (x
) == top
)
112 /* We need an extra block because the top bit block i does
113 not match the extension. */
118 /* The number is 0 or -1. */
122 /* VAL[0] is the unsigned result of an operation. Canonize it by adding
123 another 0 block if needed, and return number of blocks needed. */
125 static inline unsigned int
126 canonize_uhwi (HOST_WIDE_INT
*val
, unsigned int precision
)
128 if (val
[0] < 0 && precision
> HOST_BITS_PER_WIDE_INT
)
137 * Conversion routines in and out of wide_int.
140 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
141 result for an integer with precision PRECISION. Return the length
142 of VAL (after any canonization. */
144 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
145 unsigned int xlen
, unsigned int precision
, bool need_canon
)
147 for (unsigned i
= 0; i
< xlen
; i
++)
149 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
152 /* Construct a wide int from a buffer of length LEN. BUFFER will be
153 read according to byte endianness and word endianness of the target.
154 Only the lower BUFFER_LEN bytes of the result are set; the remaining
155 high bytes are cleared. */
157 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
159 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
160 wide_int result
= wide_int::create (precision
);
161 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
163 /* We have to clear all the bits ourself, as we merely or in values
165 unsigned int len
= BLOCKS_NEEDED (precision
);
166 HOST_WIDE_INT
*val
= result
.write_val ();
167 for (unsigned int i
= 0; i
< len
; ++i
)
170 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
174 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
175 unsigned HOST_WIDE_INT value
;
177 if (buffer_len
> UNITS_PER_WORD
)
179 unsigned int word
= byte
/ UNITS_PER_WORD
;
181 if (WORDS_BIG_ENDIAN
)
182 word
= (words
- 1) - word
;
184 offset
= word
* UNITS_PER_WORD
;
186 if (BYTES_BIG_ENDIAN
)
187 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
189 offset
+= byte
% UNITS_PER_WORD
;
192 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
194 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
196 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
197 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
200 result
.set_len (canonize (val
, len
, precision
));
205 /* Sets RESULT from X, the sign is taken according to SGN. */
207 wi::to_mpz (const wide_int_ref
&x
, mpz_t result
, signop sgn
)
209 int len
= x
.get_len ();
210 const HOST_WIDE_INT
*v
= x
.get_val ();
211 int excess
= len
* HOST_BITS_PER_WIDE_INT
- x
.get_precision ();
213 if (wi::neg_p (x
, sgn
))
215 /* We use ones complement to avoid -x80..0 edge case that -
217 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
218 for (int i
= 0; i
< len
; i
++)
221 t
[len
- 1] = (unsigned HOST_WIDE_INT
) t
[len
- 1] << excess
>> excess
;
222 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
223 mpz_com (result
, result
);
227 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
228 for (int i
= 0; i
< len
- 1; i
++)
230 t
[len
- 1] = (unsigned HOST_WIDE_INT
) v
[len
- 1] << excess
>> excess
;
231 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
233 else if (excess
< 0 && wi::neg_p (x
))
236 = (-excess
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
;
237 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
+ extra
);
238 for (int i
= 0; i
< len
; i
++)
240 for (int i
= 0; i
< extra
; i
++)
242 excess
= (-excess
) % HOST_BITS_PER_WIDE_INT
;
244 t
[len
+ extra
- 1] = (HOST_WIDE_INT_1U
<< excess
) - 1;
245 mpz_import (result
, len
+ extra
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
248 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
251 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
252 values of VAL will be wrapped; otherwise, they will be set to the
253 appropriate minimum or maximum TYPE bound. */
255 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
258 unsigned int prec
= TYPE_PRECISION (type
);
259 wide_int res
= wide_int::create (prec
);
267 get_type_static_bounds (type
, min
, max
);
269 if (mpz_cmp (x
, min
) < 0)
271 else if (mpz_cmp (x
, max
) > 0)
278 /* Determine the number of unsigned HOST_WIDE_INTs that are required
279 for representing the absolute value. The code to calculate count is
280 extracted from the GMP manual, section "Integer Import and Export":
281 http://gmplib.org/manual/Integer-Import-and-Export.html */
282 numb
= CHAR_BIT
* sizeof (HOST_WIDE_INT
);
283 count
= (mpz_sizeinbase (x
, 2) + numb
- 1) / numb
;
284 HOST_WIDE_INT
*val
= res
.write_val ();
285 /* Read the absolute value.
287 Write directly to the wide_int storage if possible, otherwise leave
288 GMP to allocate the memory for us. It might be slightly more efficient
289 to use mpz_tdiv_r_2exp for the latter case, but the situation is
290 pathological and it seems safer to operate on the original mpz value
292 void *valres
= mpz_export (count
<= WIDE_INT_MAX_ELTS
? val
: 0,
293 &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
299 count
= MIN (count
, BLOCKS_NEEDED (prec
));
302 memcpy (val
, valres
, count
* sizeof (HOST_WIDE_INT
));
305 /* Zero-extend the absolute value to PREC bits. */
306 if (count
< BLOCKS_NEEDED (prec
) && val
[count
- 1] < 0)
309 count
= canonize (val
, count
, prec
);
319 * Largest and smallest values in a mode.
322 /* Return the largest SGNed number that is representable in PRECISION bits.
324 TODO: There is still code from the double_int era that trys to
325 make up for the fact that double int's could not represent the
326 min and max values of all types. This code should be removed
327 because the min and max values can always be represented in
328 wide_ints and int-csts. */
330 wi::max_value (unsigned int precision
, signop sgn
)
332 gcc_checking_assert (precision
!= 0);
334 /* The unsigned max is just all ones. */
335 return shwi (-1, precision
);
337 /* The signed max is all ones except the top bit. This must be
338 explicitly represented. */
339 return mask (precision
- 1, false, precision
);
342 /* Return the largest SGNed number that is representable in PRECISION bits. */
344 wi::min_value (unsigned int precision
, signop sgn
)
346 gcc_checking_assert (precision
!= 0);
348 return uhwi (0, precision
);
350 /* The signed min is all zeros except the top bit. This must be
351 explicitly represented. */
352 return wi::set_bit_in_zero (precision
- 1, precision
);
359 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
360 signedness SGN, to an integer that has PRECISION bits. Store the blocks
361 in VAL and return the number of blocks used.
363 This function can handle both extension (PRECISION > XPRECISION)
364 and truncation (PRECISION < XPRECISION). */
366 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
367 unsigned int xlen
, unsigned int xprecision
,
368 unsigned int precision
, signop sgn
)
370 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
371 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
372 for (unsigned i
= 0; i
< len
; i
++)
375 if (precision
> xprecision
)
377 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
382 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
383 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
384 else if (val
[len
- 1] < 0)
386 while (len
< BLOCKS_NEEDED (xprecision
))
388 if (small_xprecision
)
389 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
396 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
397 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
400 len
= canonize (val
, len
, precision
);
405 /* This function hides the fact that we cannot rely on the bits beyond
406 the precision. This issue comes up in the relational comparisions
407 where we do allow comparisons of values of different precisions. */
408 static inline HOST_WIDE_INT
409 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
410 unsigned int blocks_needed
, unsigned int small_prec
,
411 unsigned int index
, signop sgn
)
416 else if (index
< blocks_needed
|| sgn
== SIGNED
)
417 /* Signed or within the precision. */
418 val
= SIGN_MASK (a
[len
- 1]);
420 /* Unsigned extension beyond the precision. */
423 if (small_prec
&& index
== blocks_needed
- 1)
424 return (sgn
== SIGNED
425 ? sext_hwi (val
, small_prec
)
426 : zext_hwi (val
, small_prec
));
431 /* Find the highest bit represented in a wide int. This will in
432 general have the same value as the sign bit. */
433 static inline HOST_WIDE_INT
434 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
436 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
437 unsigned HOST_WIDE_INT val
= a
[len
- 1];
440 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
444 * Comparisons, note that only equality is an operator. The other
445 * comparisons cannot be operators since they are inherently signed or
446 * unsigned and C++ has no such operators.
449 /* Return true if OP0 == OP1. */
451 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
452 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
456 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
458 if (op0len
!= op1len
)
461 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
463 /* It does not matter if we zext or sext here, we just have to
464 do both the same way. */
465 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
471 if (op0
[l0
] != op1
[l0
])
479 /* Return true if OP0 < OP1 using signed comparisons. */
481 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
482 unsigned int precision
,
483 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
485 HOST_WIDE_INT s0
, s1
;
486 unsigned HOST_WIDE_INT u0
, u1
;
487 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
488 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
489 int l
= MAX (op0len
- 1, op1len
- 1);
491 /* Only the top block is compared as signed. The rest are unsigned
493 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
494 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
503 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
504 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
516 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
519 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
520 unsigned int precision
,
521 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
523 HOST_WIDE_INT s0
, s1
;
524 unsigned HOST_WIDE_INT u0
, u1
;
525 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
526 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
527 int l
= MAX (op0len
- 1, op1len
- 1);
529 /* Only the top block is compared as signed. The rest are unsigned
531 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
532 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
541 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
542 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
554 /* Return true if OP0 < OP1 using unsigned comparisons. */
556 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
557 unsigned int precision
,
558 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
560 unsigned HOST_WIDE_INT x0
;
561 unsigned HOST_WIDE_INT x1
;
562 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
563 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
564 int l
= MAX (op0len
- 1, op1len
- 1);
568 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
569 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
580 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
581 unsigned compares. */
583 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
584 unsigned int precision
,
585 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
587 unsigned HOST_WIDE_INT x0
;
588 unsigned HOST_WIDE_INT x1
;
589 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
590 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
591 int l
= MAX (op0len
- 1, op1len
- 1);
595 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
596 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
611 /* Sign-extend the number represented by XVAL and XLEN into VAL,
612 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
613 and VAL have PRECISION bits. */
615 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
616 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
618 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
619 /* Extending beyond the precision is a no-op. If we have only stored
620 OFFSET bits or fewer, the rest are already signs. */
621 if (offset
>= precision
|| len
>= xlen
)
623 for (unsigned i
= 0; i
< xlen
; ++i
)
627 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
628 for (unsigned int i
= 0; i
< len
; i
++)
632 val
[len
] = sext_hwi (xval
[len
], suboffset
);
635 return canonize (val
, len
, precision
);
638 /* Zero-extend the number represented by XVAL and XLEN into VAL,
639 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
640 and VAL have PRECISION bits. */
642 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
643 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
645 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
646 /* Extending beyond the precision is a no-op. If we have only stored
647 OFFSET bits or fewer, and the upper stored bit is zero, then there
649 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
651 for (unsigned i
= 0; i
< xlen
; ++i
)
655 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
656 for (unsigned int i
= 0; i
< len
; i
++)
657 val
[i
] = i
< xlen
? xval
[i
] : -1;
659 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
662 return canonize (val
, len
+ 1, precision
);
666 * Masking, inserting, shifting, rotating.
669 /* Insert WIDTH bits from Y into X starting at START. */
671 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
678 unsigned int precision
= x
.get_precision ();
679 if (start
>= precision
)
682 gcc_checking_assert (precision
>= width
);
684 if (start
+ width
>= precision
)
685 width
= precision
- start
;
687 mask
= wi::shifted_mask (start
, width
, false, precision
);
688 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
691 tmp
= wi::bit_and_not (x
, mask
);
692 result
= result
| tmp
;
697 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
698 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
701 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
702 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
704 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
705 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
707 if (block
+ 1 >= xlen
)
709 /* The operation either affects the last current block or needs
711 unsigned int len
= block
+ 1;
712 for (unsigned int i
= 0; i
< len
; i
++)
713 val
[i
] = safe_uhwi (xval
, xlen
, i
);
714 val
[block
] |= HOST_WIDE_INT_1U
<< subbit
;
716 /* If the bit we just set is at the msb of the block, make sure
717 that any higher bits are zeros. */
718 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
723 return canonize (val
, len
, precision
);
727 for (unsigned int i
= 0; i
< xlen
; i
++)
729 val
[block
] |= HOST_WIDE_INT_1U
<< subbit
;
730 return canonize (val
, xlen
, precision
);
736 wide_int_storage::bswap () const
738 wide_int result
= wide_int::create (precision
);
740 unsigned int len
= BLOCKS_NEEDED (precision
);
741 unsigned int xlen
= get_len ();
742 const HOST_WIDE_INT
*xval
= get_val ();
743 HOST_WIDE_INT
*val
= result
.write_val ();
745 /* This is not a well defined operation if the precision is not a
747 gcc_assert ((precision
& 0x7) == 0);
749 for (i
= 0; i
< len
; i
++)
752 /* Only swap the bytes that are not the padding. */
753 for (s
= 0; s
< precision
; s
+= 8)
755 unsigned int d
= precision
- s
- 8;
756 unsigned HOST_WIDE_INT byte
;
758 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
759 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
761 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
763 block
= d
/ HOST_BITS_PER_WIDE_INT
;
764 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
766 val
[block
] |= byte
<< offset
;
769 result
.set_len (canonize (val
, len
, precision
));
773 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
774 above that up to PREC are zeros. The result is inverted if NEGATE
775 is true. Return the number of blocks in VAL. */
777 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
782 val
[0] = negate
? 0 : -1;
787 val
[0] = negate
? -1 : 0;
792 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
793 val
[i
++] = negate
? 0 : -1;
795 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
798 HOST_WIDE_INT last
= (HOST_WIDE_INT_1U
<< shift
) - 1;
799 val
[i
++] = negate
? ~last
: last
;
802 val
[i
++] = negate
? -1 : 0;
807 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
808 bits are ones, and the bits above that up to PREC are zeros. The result
809 is inverted if NEGATE is true. Return the number of blocks in VAL. */
811 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
812 bool negate
, unsigned int prec
)
814 if (start
>= prec
|| width
== 0)
816 val
[0] = negate
? -1 : 0;
820 if (width
> prec
- start
)
821 width
= prec
- start
;
822 unsigned int end
= start
+ width
;
825 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
826 val
[i
++] = negate
? -1 : 0;
828 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
831 HOST_WIDE_INT block
= (HOST_WIDE_INT_1U
<< shift
) - 1;
833 if (shift
< HOST_BITS_PER_WIDE_INT
)
836 block
= (HOST_WIDE_INT_1U
<< shift
) - block
- 1;
837 val
[i
++] = negate
? ~block
: block
;
842 val
[i
++] = negate
? block
: ~block
;
845 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
847 val
[i
++] = negate
? 0 : -1;
849 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
853 HOST_WIDE_INT block
= (HOST_WIDE_INT_1U
<< shift
) - 1;
854 val
[i
++] = negate
? ~block
: block
;
857 val
[i
++] = negate
? -1 : 0;
863 * logical operations.
866 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
868 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
869 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
870 unsigned int op1len
, unsigned int prec
)
874 bool need_canon
= true;
876 unsigned int len
= MAX (op0len
, op1len
);
879 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
897 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
913 val
[l0
] = op0
[l0
] & op1
[l0
];
918 len
= canonize (val
, len
, prec
);
923 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
925 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
926 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
927 unsigned int op1len
, unsigned int prec
)
932 bool need_canon
= true;
934 unsigned int len
= MAX (op0len
, op1len
);
937 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
955 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
971 val
[l0
] = op0
[l0
] & ~op1
[l0
];
976 len
= canonize (val
, len
, prec
);
981 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
983 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
984 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
985 unsigned int op1len
, unsigned int prec
)
990 bool need_canon
= true;
992 unsigned int len
= MAX (op0len
, op1len
);
995 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1013 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1029 val
[l0
] = op0
[l0
] | op1
[l0
];
1034 len
= canonize (val
, len
, prec
);
1039 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1041 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1042 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1043 unsigned int op1len
, unsigned int prec
)
1046 int l0
= op0len
- 1;
1047 int l1
= op1len
- 1;
1048 bool need_canon
= true;
1050 unsigned int len
= MAX (op0len
, op1len
);
1053 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1071 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1087 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1092 len
= canonize (val
, len
, prec
);
1097 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1099 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1100 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1101 unsigned int op1len
, unsigned int prec
)
1104 int l0
= op0len
- 1;
1105 int l1
= op1len
- 1;
1107 unsigned int len
= MAX (op0len
, op1len
);
1110 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1113 val
[l0
] = op0
[l0
] ^ op1mask
;
1120 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1123 val
[l1
] = op0mask
^ op1
[l1
];
1130 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1134 return canonize (val
, len
, prec
);
1141 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1142 whether the result overflows when OP0 and OP1 are treated as having
1143 signedness SGN. Return the number of blocks in VAL. */
1145 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1146 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1147 unsigned int op1len
, unsigned int prec
,
1148 signop sgn
, wi::overflow_type
*overflow
)
1150 unsigned HOST_WIDE_INT o0
= 0;
1151 unsigned HOST_WIDE_INT o1
= 0;
1152 unsigned HOST_WIDE_INT x
= 0;
1153 unsigned HOST_WIDE_INT carry
= 0;
1154 unsigned HOST_WIDE_INT old_carry
= 0;
1155 unsigned HOST_WIDE_INT mask0
, mask1
;
1158 unsigned int len
= MAX (op0len
, op1len
);
1159 mask0
= -top_bit_of (op0
, op0len
, prec
);
1160 mask1
= -top_bit_of (op1
, op1len
, prec
);
1161 /* Add all of the explicitly defined elements. */
1163 for (i
= 0; i
< len
; i
++)
1165 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1166 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1167 x
= o0
+ o1
+ carry
;
1170 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1173 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1175 val
[len
] = mask0
+ mask1
+ carry
;
1179 = (sgn
== UNSIGNED
&& carry
) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1183 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1186 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1187 if ((HOST_WIDE_INT
) (x
<< shift
) < 0)
1189 if (o0
> (unsigned HOST_WIDE_INT
) val
[len
- 1])
1190 *overflow
= wi::OVF_UNDERFLOW
;
1191 else if (o0
< (unsigned HOST_WIDE_INT
) val
[len
- 1])
1192 *overflow
= wi::OVF_OVERFLOW
;
1194 *overflow
= wi::OVF_NONE
;
1197 *overflow
= wi::OVF_NONE
;
1201 /* Put the MSB of X and O0 and in the top of the HWI. */
1205 *overflow
= (x
<= o0
) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1207 *overflow
= (x
< o0
) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1211 return canonize (val
, len
, prec
);
1214 /* Subroutines of the multiplication and division operations. Unpack
1215 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1216 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1217 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1219 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
, const HOST_WIDE_INT
*input
,
1220 unsigned int in_len
, unsigned int out_len
,
1221 unsigned int prec
, signop sgn
)
1225 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1226 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1231 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1232 mask
&= HALF_INT_MASK
;
1237 for (i
= 0; i
< blocks_needed
- 1; i
++)
1239 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1241 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1244 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1248 x
= sext_hwi (x
, small_prec
);
1250 x
= zext_hwi (x
, small_prec
);
1253 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1255 /* Smear the sign bit. */
1260 /* The inverse of wi_unpack. IN_LEN is the number of input
1261 blocks and PRECISION is the precision of the result. Return the
1262 number of blocks in the canonicalized result. */
1264 wi_pack (HOST_WIDE_INT
*result
,
1265 const unsigned HOST_HALF_WIDE_INT
*input
,
1266 unsigned int in_len
, unsigned int precision
)
1270 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
1272 while (i
+ 1 < in_len
)
1274 result
[j
++] = ((unsigned HOST_WIDE_INT
) input
[i
]
1275 | ((unsigned HOST_WIDE_INT
) input
[i
+ 1]
1276 << HOST_BITS_PER_HALF_WIDE_INT
));
1280 /* Handle the case where in_len is odd. For this we zero extend. */
1282 result
[j
++] = (unsigned HOST_WIDE_INT
) input
[i
];
1283 else if (j
< blocks_needed
)
1285 return canonize (result
, j
, precision
);
1288 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1291 If HIGH is not set, throw away the upper half after the check is
1292 made to see if it overflows. Unfortunately there is no better way
1293 to check for overflow than to do this. If OVERFLOW is nonnull,
1294 record in *OVERFLOW whether the result overflowed. SGN controls
1295 the signedness and is used to check overflow or if HIGH is set.
1297 NOTE: Overflow type for signed overflow is not yet implemented. */
1299 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1300 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1301 unsigned int op2len
, unsigned int prec
, signop sgn
,
1302 wi::overflow_type
*overflow
, bool high
)
1304 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1307 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1308 unsigned int half_blocks_needed
= blocks_needed
* 2;
1309 /* The sizes here are scaled to support a 2x largest mode by 2x
1310 largest mode yielding a 4x largest mode result. This is what is
1313 unsigned HOST_HALF_WIDE_INT
1314 u
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1315 unsigned HOST_HALF_WIDE_INT
1316 v
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1317 /* The '2' in 'R' is because we are internally doing a full
1319 unsigned HOST_HALF_WIDE_INT
1320 r
[2 * 4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1321 HOST_WIDE_INT mask
= ((HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1323 /* If the top level routine did not really pass in an overflow, then
1324 just make sure that we never attempt to set it. */
1325 bool needs_overflow
= (overflow
!= 0);
1327 *overflow
= wi::OVF_NONE
;
1329 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1330 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1332 /* This is a surprisingly common case, so do it first. */
1333 if (op1
== 0 || op2
== 0)
1340 if (sgn
== UNSIGNED
)
1342 /* If the inputs are single HWIs and the output has room for at
1343 least two HWIs, we can use umul_ppmm directly. */
1344 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1345 && wi::fits_uhwi_p (op1
)
1346 && wi::fits_uhwi_p (op2
))
1348 /* This case never overflows. */
1354 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1355 if (val
[1] < 0 && prec
> HOST_BITS_PER_WIDE_INT
* 2)
1360 return 1 + (val
[1] != 0 || val
[0] < 0);
1362 /* Likewise if the output is a full single HWI, except that the
1363 upper HWI of the result is only used for determining overflow.
1364 (We handle this case inline when overflow isn't needed.) */
1365 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1367 unsigned HOST_WIDE_INT upper
;
1368 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1370 /* Unsigned overflow can only be +OVERFLOW. */
1371 *overflow
= (upper
!= 0) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1379 /* Handle multiplications by 1. */
1384 val
[0] = wi::neg_p (op2
, sgn
) ? -1 : 0;
1387 for (i
= 0; i
< op2len
; i
++)
1395 val
[0] = wi::neg_p (op1
, sgn
) ? -1 : 0;
1398 for (i
= 0; i
< op1len
; i
++)
1403 /* If we need to check for overflow, we can only do half wide
1404 multiplies quickly because we need to look at the top bits to
1405 check for the overflow. */
1406 if ((high
|| needs_overflow
)
1407 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1409 unsigned HOST_WIDE_INT r
;
1413 o0
= op1
.to_shwi ();
1414 o1
= op2
.to_shwi ();
1418 o0
= op1
.to_uhwi ();
1419 o1
= op2
.to_uhwi ();
1427 if ((HOST_WIDE_INT
) r
!= sext_hwi (r
, prec
))
1428 /* FIXME: Signed overflow type is not implemented yet. */
1429 *overflow
= OVF_UNKNOWN
;
1433 if ((r
>> prec
) != 0)
1434 /* Unsigned overflow can only be +OVERFLOW. */
1435 *overflow
= OVF_OVERFLOW
;
1438 val
[0] = high
? r
>> prec
: r
;
1442 /* We do unsigned mul and then correct it. */
1443 wi_unpack (u
, op1val
, op1len
, half_blocks_needed
, prec
, SIGNED
);
1444 wi_unpack (v
, op2val
, op2len
, half_blocks_needed
, prec
, SIGNED
);
1446 /* The 2 is for a full mult. */
1447 memset (r
, 0, half_blocks_needed
* 2
1448 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1450 for (j
= 0; j
< half_blocks_needed
; j
++)
1453 for (i
= 0; i
< half_blocks_needed
; i
++)
1455 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1457 r
[i
+ j
] = t
& HALF_INT_MASK
;
1458 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1460 r
[j
+ half_blocks_needed
] = k
;
1463 /* We did unsigned math above. For signed we must adjust the
1464 product (assuming we need to see that). */
1465 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1467 unsigned HOST_WIDE_INT b
;
1468 if (wi::neg_p (op1
))
1471 for (i
= 0; i
< half_blocks_needed
; i
++)
1473 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1474 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1475 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1476 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1479 if (wi::neg_p (op2
))
1482 for (i
= 0; i
< half_blocks_needed
; i
++)
1484 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1485 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1486 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1487 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1496 /* For unsigned, overflow is true if any of the top bits are set.
1497 For signed, overflow is true if any of the top bits are not equal
1499 if (sgn
== UNSIGNED
)
1503 top
= r
[(half_blocks_needed
) - 1];
1504 top
= SIGN_MASK (top
<< (HOST_BITS_PER_WIDE_INT
/ 2));
1508 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1509 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1510 /* FIXME: Signed overflow type is not implemented yet. */
1511 *overflow
= (sgn
== UNSIGNED
) ? wi::OVF_OVERFLOW
: wi::OVF_UNKNOWN
;
1514 int r_offset
= high
? half_blocks_needed
: 0;
1515 return wi_pack (val
, &r
[r_offset
], half_blocks_needed
, prec
);
1518 /* Compute the population count of X. */
1520 wi::popcount (const wide_int_ref
&x
)
1525 /* The high order block is special if it is the last block and the
1526 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1527 have to clear out any ones above the precision before doing
1528 popcount on this block. */
1529 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1530 unsigned int stop
= x
.len
;
1533 count
= popcount_hwi (x
.uhigh () << -count
);
1538 if (x
.sign_mask () >= 0)
1542 for (i
= 0; i
< stop
; ++i
)
1543 count
+= popcount_hwi (x
.val
[i
]);
1548 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1549 whether the result overflows when OP0 and OP1 are treated as having
1550 signedness SGN. Return the number of blocks in VAL. */
1552 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1553 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1554 unsigned int op1len
, unsigned int prec
,
1555 signop sgn
, wi::overflow_type
*overflow
)
1557 unsigned HOST_WIDE_INT o0
= 0;
1558 unsigned HOST_WIDE_INT o1
= 0;
1559 unsigned HOST_WIDE_INT x
= 0;
1560 /* We implement subtraction as an in place negate and add. Negation
1561 is just inversion and add 1, so we can do the add of 1 by just
1562 starting the borrow in of the first element at 1. */
1563 unsigned HOST_WIDE_INT borrow
= 0;
1564 unsigned HOST_WIDE_INT old_borrow
= 0;
1566 unsigned HOST_WIDE_INT mask0
, mask1
;
1569 unsigned int len
= MAX (op0len
, op1len
);
1570 mask0
= -top_bit_of (op0
, op0len
, prec
);
1571 mask1
= -top_bit_of (op1
, op1len
, prec
);
1573 /* Subtract all of the explicitly defined elements. */
1574 for (i
= 0; i
< len
; i
++)
1576 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1577 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1578 x
= o0
- o1
- borrow
;
1580 old_borrow
= borrow
;
1581 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1584 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1586 val
[len
] = mask0
- mask1
- borrow
;
1589 *overflow
= (sgn
== UNSIGNED
&& borrow
) ? OVF_UNDERFLOW
: OVF_NONE
;
1593 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1596 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1597 if ((HOST_WIDE_INT
) (x
<< shift
) < 0)
1600 *overflow
= OVF_UNDERFLOW
;
1602 *overflow
= OVF_OVERFLOW
;
1604 *overflow
= OVF_NONE
;
1607 *overflow
= OVF_NONE
;
1611 /* Put the MSB of X and O0 and in the top of the HWI. */
1615 *overflow
= (x
>= o0
) ? OVF_UNDERFLOW
: OVF_NONE
;
1617 *overflow
= (x
> o0
) ? OVF_UNDERFLOW
: OVF_NONE
;
1621 return canonize (val
, len
, prec
);
1629 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1630 algorithm is a small modification of the algorithm in Hacker's
1631 Delight by Warren, which itself is a small modification of Knuth's
1632 algorithm. M is the number of significant elements of U however
1633 there needs to be at least one extra element of B_DIVIDEND
1634 allocated, N is the number of elements of B_DIVISOR. */
1636 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1637 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1638 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1639 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1642 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1643 HOST_WIDE_INT and stored in the lower bits of each word. This
1644 algorithm should work properly on both 32 and 64 bit
1646 unsigned HOST_WIDE_INT b
1647 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1648 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1649 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1650 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1654 /* Single digit divisor. */
1658 for (j
= m
- 1; j
>= 0; j
--)
1660 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1661 k
= ((k
* b
+ b_dividend
[j
])
1662 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1663 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1669 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1673 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1674 algorithm, we can overwrite b_dividend and b_divisor, so we do
1676 for (i
= n
- 1; i
> 0; i
--)
1677 b_divisor
[i
] = (b_divisor
[i
] << s
)
1678 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1679 b_divisor
[0] = b_divisor
[0] << s
;
1681 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1682 for (i
= m
- 1; i
> 0; i
--)
1683 b_dividend
[i
] = (b_dividend
[i
] << s
)
1684 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1685 b_dividend
[0] = b_dividend
[0] << s
;
1689 for (j
= m
- n
; j
>= 0; j
--)
1691 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1692 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1694 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1697 rhat
+= b_divisor
[n
-1];
1702 /* Multiply and subtract. */
1704 for (i
= 0; i
< n
; i
++)
1706 p
= qhat
* b_divisor
[i
];
1707 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1708 b_dividend
[i
+ j
] = t
;
1709 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1710 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1712 t
= b_dividend
[j
+n
] - k
;
1713 b_dividend
[j
+n
] = t
;
1715 b_quotient
[j
] = qhat
;
1720 for (i
= 0; i
< n
; i
++)
1722 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1723 b_dividend
[i
+j
] = t
;
1724 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1726 b_dividend
[j
+n
] += k
;
1730 for (i
= 0; i
< n
; i
++)
1731 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1732 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1734 for (i
= 0; i
< n
; i
++)
1735 b_remainder
[i
] = b_dividend
[i
];
1739 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1740 the result. If QUOTIENT is nonnull, store the value of the quotient
1741 there and return the number of blocks in it. The return value is
1742 not defined otherwise. If REMAINDER is nonnull, store the value
1743 of the remainder there and store the number of blocks in
1744 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1745 the division overflowed. */
1747 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1748 HOST_WIDE_INT
*remainder
,
1749 const HOST_WIDE_INT
*dividend_val
,
1750 unsigned int dividend_len
, unsigned int dividend_prec
,
1751 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1752 unsigned int divisor_prec
, signop sgn
,
1753 wi::overflow_type
*oflow
)
1755 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1756 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1757 unsigned HOST_HALF_WIDE_INT
1758 b_quotient
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1759 unsigned HOST_HALF_WIDE_INT
1760 b_remainder
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1761 unsigned HOST_HALF_WIDE_INT
1762 b_dividend
[(4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1763 unsigned HOST_HALF_WIDE_INT
1764 b_divisor
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1766 bool dividend_neg
= false;
1767 bool divisor_neg
= false;
1768 bool overflow
= false;
1769 wide_int neg_dividend
, neg_divisor
;
1771 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1773 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1778 /* The smallest signed number / -1 causes overflow. The dividend_len
1779 check is for speed rather than correctness. */
1781 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1783 && wi::only_sign_bit_p (dividend
))
1786 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1787 (signed min / -1) has the same representation as the orignal dividend.
1788 We have traditionally made division by zero act as division by one,
1789 so there too we use the original dividend. */
1798 *oflow
= OVF_OVERFLOW
;
1800 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1801 quotient
[i
] = dividend_val
[i
];
1802 return dividend_len
;
1808 /* Do it on the host if you can. */
1810 && wi::fits_shwi_p (dividend
)
1811 && wi::fits_shwi_p (divisor
))
1813 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1814 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1816 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1818 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1821 quotient
[0] = HOST_WIDE_INT_MIN
;
1834 quotient
[0] = o0
/ o1
;
1837 remainder
[0] = o0
% o1
;
1845 && wi::fits_uhwi_p (dividend
)
1846 && wi::fits_uhwi_p (divisor
))
1848 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1849 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1850 unsigned int quotient_len
= 1;
1854 quotient
[0] = o0
/ o1
;
1855 quotient_len
= canonize_uhwi (quotient
, dividend_prec
);
1859 remainder
[0] = o0
% o1
;
1860 *remainder_len
= canonize_uhwi (remainder
, dividend_prec
);
1862 return quotient_len
;
1865 /* Make the divisor and dividend positive and remember what we
1869 if (wi::neg_p (dividend
))
1871 neg_dividend
= -dividend
;
1872 dividend
= neg_dividend
;
1873 dividend_neg
= true;
1875 if (wi::neg_p (divisor
))
1877 neg_divisor
= -divisor
;
1878 divisor
= neg_divisor
;
1883 wi_unpack (b_dividend
, dividend
.get_val (), dividend
.get_len (),
1884 dividend_blocks_needed
, dividend_prec
, sgn
);
1885 wi_unpack (b_divisor
, divisor
.get_val (), divisor
.get_len (),
1886 divisor_blocks_needed
, divisor_prec
, sgn
);
1888 m
= dividend_blocks_needed
;
1890 while (m
> 1 && b_dividend
[m
- 1] == 0)
1893 n
= divisor_blocks_needed
;
1894 while (n
> 1 && b_divisor
[n
- 1] == 0)
1897 memset (b_quotient
, 0, sizeof (b_quotient
));
1899 divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
1901 unsigned int quotient_len
= 0;
1904 quotient_len
= wi_pack (quotient
, b_quotient
, m
, dividend_prec
);
1905 /* The quotient is neg if exactly one of the divisor or dividend is
1907 if (dividend_neg
!= divisor_neg
)
1908 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
1909 quotient_len
, dividend_prec
,
1915 *remainder_len
= wi_pack (remainder
, b_remainder
, n
, dividend_prec
);
1916 /* The remainder is always the same sign as the dividend. */
1918 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
1919 *remainder_len
, dividend_prec
,
1923 return quotient_len
;
1927 * Shifting, rotating and extraction.
1930 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1931 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1933 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1934 unsigned int xlen
, unsigned int precision
,
1937 /* Split the shift into a whole-block shift and a subblock shift. */
1938 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1939 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1941 /* The whole-block shift fills with zeros. */
1942 unsigned int len
= BLOCKS_NEEDED (precision
);
1943 for (unsigned int i
= 0; i
< skip
; ++i
)
1946 /* It's easier to handle the simple block case specially. */
1947 if (small_shift
== 0)
1948 for (unsigned int i
= skip
; i
< len
; ++i
)
1949 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
1952 /* The first unfilled output block is a left shift of the first
1953 block in XVAL. The other output blocks contain bits from two
1954 consecutive input blocks. */
1955 unsigned HOST_WIDE_INT carry
= 0;
1956 for (unsigned int i
= skip
; i
< len
; ++i
)
1958 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
1959 val
[i
] = (x
<< small_shift
) | carry
;
1960 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1963 return canonize (val
, len
, precision
);
1966 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1967 number of blocks in VAL. The input has XPRECISION bits and the
1968 output has XPRECISION - SHIFT bits. */
1970 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1971 unsigned int xlen
, unsigned int xprecision
,
1974 /* Split the shift into a whole-block shift and a subblock shift. */
1975 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1976 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1978 /* Work out how many blocks are needed to store the significant bits
1979 (excluding the upper zeros or signs). */
1980 unsigned int len
= BLOCKS_NEEDED (xprecision
- shift
);
1982 /* It's easier to handle the simple block case specially. */
1983 if (small_shift
== 0)
1984 for (unsigned int i
= 0; i
< len
; ++i
)
1985 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
1988 /* Each output block but the last is a combination of two input blocks.
1989 The last block is a right shift of the last block in XVAL. */
1990 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
1991 for (unsigned int i
= 0; i
< len
; ++i
)
1993 val
[i
] = curr
>> small_shift
;
1994 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
1995 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
2001 /* Logically right shift XVAL by SHIFT and store the result in VAL.
2002 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2003 VAL has PRECISION bits. */
2005 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
2006 unsigned int xlen
, unsigned int xprecision
,
2007 unsigned int precision
, unsigned int shift
)
2009 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
2011 /* The value we just created has precision XPRECISION - SHIFT.
2012 Zero-extend it to wider precisions. */
2013 if (precision
> xprecision
- shift
)
2015 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
2017 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
2018 else if (val
[len
- 1] < 0)
2020 /* Add a new block with a zero. */
2025 return canonize (val
, len
, precision
);
2028 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
2029 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2030 VAL has PRECISION bits. */
2032 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
2033 unsigned int xlen
, unsigned int xprecision
,
2034 unsigned int precision
, unsigned int shift
)
2036 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
2038 /* The value we just created has precision XPRECISION - SHIFT.
2039 Sign-extend it to wider types. */
2040 if (precision
> xprecision
- shift
)
2042 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
2044 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
2046 return canonize (val
, len
, precision
);
2049 /* Return the number of leading (upper) zeros in X. */
2051 wi::clz (const wide_int_ref
&x
)
2053 if (x
.sign_mask () < 0)
2054 /* The upper bit is set, so there are no leading zeros. */
2057 /* Calculate how many bits there above the highest represented block. */
2058 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2060 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2062 /* The upper -COUNT bits of HIGH are not part of the value.
2064 high
= (high
<< -count
) >> -count
;
2066 /* We don't need to look below HIGH. Either HIGH is nonzero,
2067 or the top bit of the block below is nonzero; clz_hwi is
2068 HOST_BITS_PER_WIDE_INT in the latter case. */
2069 return count
+ clz_hwi (high
);
2072 /* Return the number of redundant sign bits in X. (That is, the number
2073 of bits immediately below the sign bit that have the same value as
2076 wi::clrsb (const wide_int_ref
&x
)
2078 /* Calculate how many bits there above the highest represented block. */
2079 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2081 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2082 unsigned HOST_WIDE_INT mask
= -1;
2085 /* The upper -COUNT bits of HIGH are not part of the value.
2086 Clear them from both MASK and HIGH. */
2091 /* If the top bit is 1, count the number of leading 1s. If the top
2092 bit is zero, count the number of leading zeros. */
2093 if (high
> mask
/ 2)
2096 /* There are no sign bits below the top block, so we don't need to look
2097 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2099 return count
+ clz_hwi (high
) - 1;
2102 /* Return the number of trailing (lower) zeros in X. */
2104 wi::ctz (const wide_int_ref
&x
)
2106 if (x
.len
== 1 && x
.ulow () == 0)
2109 /* Having dealt with the zero case, there must be a block with a
2110 nonzero bit. We don't care about the bits above the first 1. */
2112 while (x
.val
[i
] == 0)
2114 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2117 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2120 wi::exact_log2 (const wide_int_ref
&x
)
2122 /* Reject cases where there are implicit -1 blocks above HIGH. */
2123 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2126 /* Set CRUX to the index of the entry that should be nonzero.
2127 If the top block is zero then the next lowest block (if any)
2128 must have the high bit set. */
2129 unsigned int crux
= x
.len
- 1;
2130 if (crux
> 0 && x
.val
[crux
] == 0)
2133 /* Check that all lower blocks are zero. */
2134 for (unsigned int i
= 0; i
< crux
; ++i
)
2138 /* Get a zero-extended form of block CRUX. */
2139 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2140 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2141 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2143 /* Now it's down to whether HWI is a power of 2. */
2144 int res
= ::exact_log2 (hwi
);
2146 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2150 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2152 wi::floor_log2 (const wide_int_ref
&x
)
2154 return x
.precision
- 1 - clz (x
);
2157 /* Return the index of the first (lowest) set bit in X, counting from 1.
2158 Return 0 if X is 0. */
2160 wi::ffs (const wide_int_ref
&x
)
2162 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2165 /* Return true if sign-extending X to have precision PRECISION would give
2166 the minimum signed value at that precision. */
2168 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2170 return ctz (x
) + 1 == int (precision
);
2173 /* Return true if X represents the minimum signed value. */
2175 wi::only_sign_bit_p (const wide_int_ref
&x
)
2177 return only_sign_bit_p (x
, x
.precision
);
2180 /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2181 down to the previous value that has no bits set outside MASK.
2182 This rounding wraps for signed values if VAL is negative and
2183 the top bit of MASK is clear.
2185 For example, round_down_for_mask (6, 0xf1) would give 1 and
2186 round_down_for_mask (24, 0xf1) would give 17. */
2189 wi::round_down_for_mask (const wide_int
&val
, const wide_int
&mask
)
2191 /* Get the bits in VAL that are outside the mask. */
2192 wide_int extra_bits
= wi::bit_and_not (val
, mask
);
2193 if (extra_bits
== 0)
2196 /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
2198 unsigned int precision
= val
.get_precision ();
2199 wide_int lower_mask
= wi::mask (precision
- wi::clz (extra_bits
),
2202 /* Clear the bits that aren't in MASK, but ensure that all bits
2203 in MASK below the top cleared bit are set. */
2204 return (val
& mask
) | (mask
& lower_mask
);
2207 /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2208 up to the next value that has no bits set outside MASK. The rounding
2209 wraps if there are no suitable values greater than VAL.
2211 For example, round_up_for_mask (6, 0xf1) would give 16 and
2212 round_up_for_mask (24, 0xf1) would give 32. */
2215 wi::round_up_for_mask (const wide_int
&val
, const wide_int
&mask
)
2217 /* Get the bits in VAL that are outside the mask. */
2218 wide_int extra_bits
= wi::bit_and_not (val
, mask
);
2219 if (extra_bits
== 0)
2222 /* Get a mask that is all 1s above the top bit in EXTRA_BITS. */
2223 unsigned int precision
= val
.get_precision ();
2224 wide_int upper_mask
= wi::mask (precision
- wi::clz (extra_bits
),
2227 /* Get the bits of the mask that are above the top bit in EXTRA_BITS. */
2230 /* Conceptually we need to:
2232 - clear bits of VAL outside UPPER_MASK
2233 - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
2234 - propagate the carry through the bits of VAL in UPPER_MASK
2236 If (~VAL & UPPER_MASK) is nonzero, the carry eventually
2237 reaches that bit and the process leaves all lower bits clear.
2238 If (~VAL & UPPER_MASK) is zero then the result is also zero. */
2239 wide_int tmp
= wi::bit_and_not (upper_mask
, val
);
2241 return (val
| tmp
) & -tmp
;
2244 /* Compute the modular multiplicative inverse of A modulo B
2245 using extended Euclid's algorithm. Assumes A and B are coprime,
2246 and that A and B have the same precision. */
2248 wi::mod_inv (const wide_int
&a
, const wide_int
&b
)
2250 /* Verify the assumption. */
2251 gcc_checking_assert (wi::eq_p (wi::gcd (a
, b
), 1));
2253 unsigned int p
= a
.get_precision () + 1;
2254 gcc_checking_assert (b
.get_precision () + 1 == p
);
2255 wide_int c
= wide_int::from (a
, p
, UNSIGNED
);
2256 wide_int d
= wide_int::from (b
, p
, UNSIGNED
);
2257 wide_int x0
= wide_int::from (0, p
, UNSIGNED
);
2258 wide_int x1
= wide_int::from (1, p
, UNSIGNED
);
2260 if (wi::eq_p (b
, 1))
2261 return wide_int::from (1, p
, UNSIGNED
);
2263 while (wi::gt_p (c
, 1, UNSIGNED
))
2266 wide_int q
= wi::divmod_trunc (c
, d
, UNSIGNED
, &d
);
2269 x0
= wi::sub (x1
, wi::mul (q
, x0
));
2272 if (wi::lt_p (x1
, 0, SIGNED
))
2278 * Private utilities.
2281 void gt_ggc_mx (widest_int
*) { }
2282 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2283 void gt_pch_nx (widest_int
*) { }
2285 template void wide_int::dump () const;
2286 template void generic_wide_int
<wide_int_ref_storage
<false> >::dump () const;
2287 template void generic_wide_int
<wide_int_ref_storage
<true> >::dump () const;
2288 template void offset_int::dump () const;
2289 template void widest_int::dump () const;
2291 /* We could add all the above ::dump variants here, but wide_int and
2292 widest_int should handle the common cases. Besides, you can always
2293 call the dump method directly. */
2296 debug (const wide_int
&ref
)
2302 debug (const wide_int
*ptr
)
2307 fprintf (stderr
, "<nil>\n");
2311 debug (const widest_int
&ref
)
2317 debug (const widest_int
*ptr
)
2322 fprintf (stderr
, "<nil>\n");
2327 namespace selftest
{
2329 /* Selftests for wide ints. We run these multiple times, once per type. */
2331 /* Helper function for building a test value. */
2333 template <class VALUE_TYPE
>
2337 /* Specializations of the fixture for each wide-int type. */
2339 /* Specialization for VALUE_TYPE == wide_int. */
2345 return wi::shwi (i
, 32);
2348 /* Specialization for VALUE_TYPE == offset_int. */
2354 return offset_int (i
);
2357 /* Specialization for VALUE_TYPE == widest_int. */
2363 return widest_int (i
);
2366 /* Verify that print_dec (WI, ..., SGN) gives the expected string
2367 representation (using base 10). */
2370 assert_deceq (const char *expected
, const wide_int_ref
&wi
, signop sgn
)
2372 char buf
[WIDE_INT_PRINT_BUFFER_SIZE
];
2373 print_dec (wi
, buf
, sgn
);
2374 ASSERT_STREQ (expected
, buf
);
2377 /* Likewise for base 16. */
2380 assert_hexeq (const char *expected
, const wide_int_ref
&wi
)
2382 char buf
[WIDE_INT_PRINT_BUFFER_SIZE
];
2383 print_hex (wi
, buf
);
2384 ASSERT_STREQ (expected
, buf
);
2389 /* Verify that print_dec and print_hex work for VALUE_TYPE. */
2391 template <class VALUE_TYPE
>
2395 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (42);
2396 assert_deceq ("42", a
, SIGNED
);
2397 assert_hexeq ("0x2a", a
);
2398 assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
2399 assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
2400 assert_hexeq ("0xffffffffffffffff", wi::mask
<widest_int
> (64, false));
2401 if (WIDE_INT_MAX_PRECISION
> 128)
2403 assert_hexeq ("0x20000000000000000fffffffffffffffe",
2404 wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
2405 assert_hexeq ("0x200000000000004000123456789abcdef",
2406 wi::lshift (1, 129) + wi::lshift (1, 74)
2407 + wi::lshift (0x1234567, 32) + 0x89abcdef);
2411 /* Verify that various operations work correctly for VALUE_TYPE,
2412 unary and binary, using both function syntax, and
2413 overloaded-operators. */
2415 template <class VALUE_TYPE
>
2419 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (7);
2420 VALUE_TYPE b
= from_int
<VALUE_TYPE
> (3);
2422 /* Using functions. */
2423 assert_deceq ("-7", wi::neg (a
), SIGNED
);
2424 assert_deceq ("10", wi::add (a
, b
), SIGNED
);
2425 assert_deceq ("4", wi::sub (a
, b
), SIGNED
);
2426 assert_deceq ("-4", wi::sub (b
, a
), SIGNED
);
2427 assert_deceq ("21", wi::mul (a
, b
), SIGNED
);
2429 /* Using operators. */
2430 assert_deceq ("-7", -a
, SIGNED
);
2431 assert_deceq ("10", a
+ b
, SIGNED
);
2432 assert_deceq ("4", a
- b
, SIGNED
);
2433 assert_deceq ("-4", b
- a
, SIGNED
);
2434 assert_deceq ("21", a
* b
, SIGNED
);
2437 /* Verify that various comparisons work correctly for VALUE_TYPE. */
2439 template <class VALUE_TYPE
>
2443 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (7);
2444 VALUE_TYPE b
= from_int
<VALUE_TYPE
> (3);
2447 ASSERT_TRUE (wi::eq_p (a
, a
));
2448 ASSERT_FALSE (wi::eq_p (a
, b
));
2451 ASSERT_TRUE (wi::ne_p (a
, b
));
2452 ASSERT_FALSE (wi::ne_p (a
, a
));
2455 ASSERT_FALSE (wi::lts_p (a
, a
));
2456 ASSERT_FALSE (wi::lts_p (a
, b
));
2457 ASSERT_TRUE (wi::lts_p (b
, a
));
2460 ASSERT_TRUE (wi::les_p (a
, a
));
2461 ASSERT_FALSE (wi::les_p (a
, b
));
2462 ASSERT_TRUE (wi::les_p (b
, a
));
2465 ASSERT_FALSE (wi::gts_p (a
, a
));
2466 ASSERT_TRUE (wi::gts_p (a
, b
));
2467 ASSERT_FALSE (wi::gts_p (b
, a
));
2470 ASSERT_TRUE (wi::ges_p (a
, a
));
2471 ASSERT_TRUE (wi::ges_p (a
, b
));
2472 ASSERT_FALSE (wi::ges_p (b
, a
));
2475 ASSERT_EQ (-1, wi::cmps (b
, a
));
2476 ASSERT_EQ (0, wi::cmps (a
, a
));
2477 ASSERT_EQ (1, wi::cmps (a
, b
));
2480 /* Run all of the selftests, using the given VALUE_TYPE. */
2482 template <class VALUE_TYPE
>
2483 static void run_all_wide_int_tests ()
2485 test_printing
<VALUE_TYPE
> ();
2486 test_ops
<VALUE_TYPE
> ();
2487 test_comparisons
<VALUE_TYPE
> ();
2490 /* Test overflow conditions. */
2495 static int precs
[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
2496 static int offsets
[] = { 16, 1, 0 };
2497 for (unsigned int i
= 0; i
< ARRAY_SIZE (precs
); ++i
)
2498 for (unsigned int j
= 0; j
< ARRAY_SIZE (offsets
); ++j
)
2500 int prec
= precs
[i
];
2501 int offset
= offsets
[j
];
2502 wi::overflow_type overflow
;
2505 sum
= wi::add (wi::max_value (prec
, UNSIGNED
) - offset
, 1,
2506 UNSIGNED
, &overflow
);
2507 ASSERT_EQ (sum
, -offset
);
2508 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
== 0);
2510 sum
= wi::add (1, wi::max_value (prec
, UNSIGNED
) - offset
,
2511 UNSIGNED
, &overflow
);
2512 ASSERT_EQ (sum
, -offset
);
2513 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
== 0);
2515 diff
= wi::sub (wi::max_value (prec
, UNSIGNED
) - offset
,
2516 wi::max_value (prec
, UNSIGNED
),
2517 UNSIGNED
, &overflow
);
2518 ASSERT_EQ (diff
, -offset
);
2519 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
!= 0);
2521 diff
= wi::sub (wi::max_value (prec
, UNSIGNED
) - offset
,
2522 wi::max_value (prec
, UNSIGNED
) - 1,
2523 UNSIGNED
, &overflow
);
2524 ASSERT_EQ (diff
, 1 - offset
);
2525 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
> 1);
2529 /* Test the round_{down,up}_for_mask functions. */
2532 test_round_for_mask ()
2534 unsigned int prec
= 18;
2535 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec
),
2536 wi::shwi (0xf1, prec
)));
2537 ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec
),
2538 wi::shwi (0xf1, prec
)));
2540 ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec
),
2541 wi::shwi (0xf1, prec
)));
2542 ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec
),
2543 wi::shwi (0xf1, prec
)));
2545 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec
),
2546 wi::shwi (0xf1, prec
)));
2547 ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec
),
2548 wi::shwi (0xf1, prec
)));
2550 ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec
),
2551 wi::shwi (0x111, prec
)));
2552 ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec
),
2553 wi::shwi (0x111, prec
)));
2555 ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec
),
2556 wi::shwi (0xfc, prec
)));
2557 ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec
),
2558 wi::shwi (0xfc, prec
)));
2560 ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec
),
2561 wi::shwi (0xabc, prec
)));
2562 ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec
),
2563 wi::shwi (0xabc, prec
)));
2565 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec
),
2566 wi::shwi (0xabc, prec
)));
2567 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec
),
2568 wi::shwi (0xabc, prec
)));
2570 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec
),
2571 wi::shwi (0xabc, prec
)));
2572 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec
),
2573 wi::shwi (0xabc, prec
)));
2576 /* Run all of the selftests within this file, for all value types. */
2579 wide_int_cc_tests ()
2581 run_all_wide_int_tests
<wide_int
> ();
2582 run_all_wide_int_tests
<offset_int
> ();
2583 run_all_wide_int_tests
<widest_int
> ();
2585 test_round_for_mask ();
2588 } // namespace selftest
2589 #endif /* CHECKING_P */