1 /* Operations with very long integers.
2 Copyright (C) 2012-2018 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #define HOST_BITS_PER_HALF_WIDE_INT 32
30 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
31 # define HOST_HALF_WIDE_INT long
32 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
33 # define HOST_HALF_WIDE_INT int
35 #error Please add support for HOST_HALF_WIDE_INT
38 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
39 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
40 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
41 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
42 typedef unsigned HOST_WIDE_INT UWtype
;
43 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
44 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
45 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
47 typedef unsigned int UDWtype
__attribute__ ((mode (DI
)));
49 typedef unsigned int UDWtype
__attribute__ ((mode (TI
)));
54 static const HOST_WIDE_INT zeros
[WIDE_INT_MAX_ELTS
] = {};
60 /* Quantities to deal with values that hold half of a wide int. Used
61 in multiply and divide. */
62 #define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
64 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
65 #define BLOCKS_NEEDED(PREC) \
66 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
67 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
69 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
70 based on the top existing bit of VAL. */
72 static unsigned HOST_WIDE_INT
73 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
75 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? HOST_WIDE_INT_M1
: 0;
78 /* Convert the integer in VAL to canonical form, returning its new length.
79 LEN is the number of blocks currently in VAL and PRECISION is the number
80 of bits in the integer it represents.
82 This function only changes the representation, not the value. */
84 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
86 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
90 if (len
> blocks_needed
)
97 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
98 val
[len
- 1] = top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
99 if (top
!= 0 && top
!= (HOST_WIDE_INT
)-1)
102 /* At this point we know that the top is either 0 or -1. Find the
103 first block that is not a copy of this. */
104 for (i
= len
- 2; i
>= 0; i
--)
106 HOST_WIDE_INT x
= val
[i
];
109 if (SIGN_MASK (x
) == top
)
112 /* We need an extra block because the top bit block i does
113 not match the extension. */
118 /* The number is 0 or -1. */
122 /* VAL[0] is the unsigned result of an operation. Canonize it by adding
123 another 0 block if needed, and return number of blocks needed. */
125 static inline unsigned int
126 canonize_uhwi (HOST_WIDE_INT
*val
, unsigned int precision
)
128 if (val
[0] < 0 && precision
> HOST_BITS_PER_WIDE_INT
)
137 * Conversion routines in and out of wide_int.
140 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
141 result for an integer with precision PRECISION. Return the length
142 of VAL (after any canonization. */
144 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
145 unsigned int xlen
, unsigned int precision
, bool need_canon
)
147 for (unsigned i
= 0; i
< xlen
; i
++)
149 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
152 /* Construct a wide int from a buffer of length LEN. BUFFER will be
153 read according to byte endianness and word endianness of the target.
154 Only the lower BUFFER_LEN bytes of the result are set; the remaining
155 high bytes are cleared. */
157 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
159 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
160 wide_int result
= wide_int::create (precision
);
161 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
163 /* We have to clear all the bits ourself, as we merely or in values
165 unsigned int len
= BLOCKS_NEEDED (precision
);
166 HOST_WIDE_INT
*val
= result
.write_val ();
167 for (unsigned int i
= 0; i
< len
; ++i
)
170 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
174 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
175 unsigned HOST_WIDE_INT value
;
177 if (buffer_len
> UNITS_PER_WORD
)
179 unsigned int word
= byte
/ UNITS_PER_WORD
;
181 if (WORDS_BIG_ENDIAN
)
182 word
= (words
- 1) - word
;
184 offset
= word
* UNITS_PER_WORD
;
186 if (BYTES_BIG_ENDIAN
)
187 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
189 offset
+= byte
% UNITS_PER_WORD
;
192 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
194 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
196 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
197 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
200 result
.set_len (canonize (val
, len
, precision
));
205 /* Sets RESULT from X, the sign is taken according to SGN. */
207 wi::to_mpz (const wide_int_ref
&x
, mpz_t result
, signop sgn
)
209 int len
= x
.get_len ();
210 const HOST_WIDE_INT
*v
= x
.get_val ();
211 int excess
= len
* HOST_BITS_PER_WIDE_INT
- x
.get_precision ();
213 if (wi::neg_p (x
, sgn
))
215 /* We use ones complement to avoid -x80..0 edge case that -
217 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
218 for (int i
= 0; i
< len
; i
++)
221 t
[len
- 1] = (unsigned HOST_WIDE_INT
) t
[len
- 1] << excess
>> excess
;
222 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
223 mpz_com (result
, result
);
227 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
228 for (int i
= 0; i
< len
- 1; i
++)
230 t
[len
- 1] = (unsigned HOST_WIDE_INT
) v
[len
- 1] << excess
>> excess
;
231 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
234 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
237 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
238 values of VAL will be wrapped; otherwise, they will be set to the
239 appropriate minimum or maximum TYPE bound. */
241 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
244 unsigned int prec
= TYPE_PRECISION (type
);
245 wide_int res
= wide_int::create (prec
);
253 get_type_static_bounds (type
, min
, max
);
255 if (mpz_cmp (x
, min
) < 0)
257 else if (mpz_cmp (x
, max
) > 0)
264 /* Determine the number of unsigned HOST_WIDE_INTs that are required
265 for representing the absolute value. The code to calculate count is
266 extracted from the GMP manual, section "Integer Import and Export":
267 http://gmplib.org/manual/Integer-Import-and-Export.html */
268 numb
= CHAR_BIT
* sizeof (HOST_WIDE_INT
);
269 count
= (mpz_sizeinbase (x
, 2) + numb
- 1) / numb
;
270 HOST_WIDE_INT
*val
= res
.write_val ();
271 /* Read the absolute value.
273 Write directly to the wide_int storage if possible, otherwise leave
274 GMP to allocate the memory for us. It might be slightly more efficient
275 to use mpz_tdiv_r_2exp for the latter case, but the situation is
276 pathological and it seems safer to operate on the original mpz value
278 void *valres
= mpz_export (count
<= WIDE_INT_MAX_ELTS
? val
: 0,
279 &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
285 count
= MIN (count
, BLOCKS_NEEDED (prec
));
288 memcpy (val
, valres
, count
* sizeof (HOST_WIDE_INT
));
291 /* Zero-extend the absolute value to PREC bits. */
292 if (count
< BLOCKS_NEEDED (prec
) && val
[count
- 1] < 0)
295 count
= canonize (val
, count
, prec
);
305 * Largest and smallest values in a mode.
308 /* Return the largest SGNed number that is representable in PRECISION bits.
310 TODO: There is still code from the double_int era that trys to
311 make up for the fact that double int's could not represent the
312 min and max values of all types. This code should be removed
313 because the min and max values can always be represented in
314 wide_ints and int-csts. */
316 wi::max_value (unsigned int precision
, signop sgn
)
318 gcc_checking_assert (precision
!= 0);
320 /* The unsigned max is just all ones. */
321 return shwi (-1, precision
);
323 /* The signed max is all ones except the top bit. This must be
324 explicitly represented. */
325 return mask (precision
- 1, false, precision
);
328 /* Return the largest SGNed number that is representable in PRECISION bits. */
330 wi::min_value (unsigned int precision
, signop sgn
)
332 gcc_checking_assert (precision
!= 0);
334 return uhwi (0, precision
);
336 /* The signed min is all zeros except the top bit. This must be
337 explicitly represented. */
338 return wi::set_bit_in_zero (precision
- 1, precision
);
345 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
346 signedness SGN, to an integer that has PRECISION bits. Store the blocks
347 in VAL and return the number of blocks used.
349 This function can handle both extension (PRECISION > XPRECISION)
350 and truncation (PRECISION < XPRECISION). */
352 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
353 unsigned int xlen
, unsigned int xprecision
,
354 unsigned int precision
, signop sgn
)
356 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
357 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
358 for (unsigned i
= 0; i
< len
; i
++)
361 if (precision
> xprecision
)
363 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
368 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
369 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
370 else if (val
[len
- 1] < 0)
372 while (len
< BLOCKS_NEEDED (xprecision
))
374 if (small_xprecision
)
375 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
382 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
383 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
386 len
= canonize (val
, len
, precision
);
391 /* This function hides the fact that we cannot rely on the bits beyond
392 the precision. This issue comes up in the relational comparisions
393 where we do allow comparisons of values of different precisions. */
394 static inline HOST_WIDE_INT
395 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
396 unsigned int blocks_needed
, unsigned int small_prec
,
397 unsigned int index
, signop sgn
)
402 else if (index
< blocks_needed
|| sgn
== SIGNED
)
403 /* Signed or within the precision. */
404 val
= SIGN_MASK (a
[len
- 1]);
406 /* Unsigned extension beyond the precision. */
409 if (small_prec
&& index
== blocks_needed
- 1)
410 return (sgn
== SIGNED
411 ? sext_hwi (val
, small_prec
)
412 : zext_hwi (val
, small_prec
));
417 /* Find the highest bit represented in a wide int. This will in
418 general have the same value as the sign bit. */
419 static inline HOST_WIDE_INT
420 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
422 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
423 unsigned HOST_WIDE_INT val
= a
[len
- 1];
426 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
430 * Comparisons, note that only equality is an operator. The other
431 * comparisons cannot be operators since they are inherently signed or
432 * unsigned and C++ has no such operators.
435 /* Return true if OP0 == OP1. */
437 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
438 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
442 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
444 if (op0len
!= op1len
)
447 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
449 /* It does not matter if we zext or sext here, we just have to
450 do both the same way. */
451 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
457 if (op0
[l0
] != op1
[l0
])
465 /* Return true if OP0 < OP1 using signed comparisons. */
467 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
468 unsigned int precision
,
469 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
471 HOST_WIDE_INT s0
, s1
;
472 unsigned HOST_WIDE_INT u0
, u1
;
473 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
474 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
475 int l
= MAX (op0len
- 1, op1len
- 1);
477 /* Only the top block is compared as signed. The rest are unsigned
479 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
480 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
489 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
490 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
502 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
505 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
506 unsigned int precision
,
507 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
509 HOST_WIDE_INT s0
, s1
;
510 unsigned HOST_WIDE_INT u0
, u1
;
511 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
512 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
513 int l
= MAX (op0len
- 1, op1len
- 1);
515 /* Only the top block is compared as signed. The rest are unsigned
517 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
518 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
527 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
528 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
540 /* Return true if OP0 < OP1 using unsigned comparisons. */
542 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
543 unsigned int precision
,
544 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
546 unsigned HOST_WIDE_INT x0
;
547 unsigned HOST_WIDE_INT x1
;
548 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
549 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
550 int l
= MAX (op0len
- 1, op1len
- 1);
554 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
555 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
566 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
567 unsigned compares. */
569 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
570 unsigned int precision
,
571 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
573 unsigned HOST_WIDE_INT x0
;
574 unsigned HOST_WIDE_INT x1
;
575 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
576 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
577 int l
= MAX (op0len
- 1, op1len
- 1);
581 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
582 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
597 /* Sign-extend the number represented by XVAL and XLEN into VAL,
598 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
599 and VAL have PRECISION bits. */
601 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
602 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
604 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
605 /* Extending beyond the precision is a no-op. If we have only stored
606 OFFSET bits or fewer, the rest are already signs. */
607 if (offset
>= precision
|| len
>= xlen
)
609 for (unsigned i
= 0; i
< xlen
; ++i
)
613 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
614 for (unsigned int i
= 0; i
< len
; i
++)
618 val
[len
] = sext_hwi (xval
[len
], suboffset
);
621 return canonize (val
, len
, precision
);
624 /* Zero-extend the number represented by XVAL and XLEN into VAL,
625 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
626 and VAL have PRECISION bits. */
628 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
629 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
631 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
632 /* Extending beyond the precision is a no-op. If we have only stored
633 OFFSET bits or fewer, and the upper stored bit is zero, then there
635 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
637 for (unsigned i
= 0; i
< xlen
; ++i
)
641 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
642 for (unsigned int i
= 0; i
< len
; i
++)
643 val
[i
] = i
< xlen
? xval
[i
] : -1;
645 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
648 return canonize (val
, len
+ 1, precision
);
652 * Masking, inserting, shifting, rotating.
655 /* Insert WIDTH bits from Y into X starting at START. */
657 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
664 unsigned int precision
= x
.get_precision ();
665 if (start
>= precision
)
668 gcc_checking_assert (precision
>= width
);
670 if (start
+ width
>= precision
)
671 width
= precision
- start
;
673 mask
= wi::shifted_mask (start
, width
, false, precision
);
674 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
677 tmp
= wi::bit_and_not (x
, mask
);
678 result
= result
| tmp
;
683 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
684 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
687 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
688 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
690 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
691 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
693 if (block
+ 1 >= xlen
)
695 /* The operation either affects the last current block or needs
697 unsigned int len
= block
+ 1;
698 for (unsigned int i
= 0; i
< len
; i
++)
699 val
[i
] = safe_uhwi (xval
, xlen
, i
);
700 val
[block
] |= HOST_WIDE_INT_1U
<< subbit
;
702 /* If the bit we just set is at the msb of the block, make sure
703 that any higher bits are zeros. */
704 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
710 for (unsigned int i
= 0; i
< xlen
; i
++)
712 val
[block
] |= HOST_WIDE_INT_1U
<< subbit
;
713 return canonize (val
, xlen
, precision
);
719 wide_int_storage::bswap () const
721 wide_int result
= wide_int::create (precision
);
723 unsigned int len
= BLOCKS_NEEDED (precision
);
724 unsigned int xlen
= get_len ();
725 const HOST_WIDE_INT
*xval
= get_val ();
726 HOST_WIDE_INT
*val
= result
.write_val ();
728 /* This is not a well defined operation if the precision is not a
730 gcc_assert ((precision
& 0x7) == 0);
732 for (i
= 0; i
< len
; i
++)
735 /* Only swap the bytes that are not the padding. */
736 for (s
= 0; s
< precision
; s
+= 8)
738 unsigned int d
= precision
- s
- 8;
739 unsigned HOST_WIDE_INT byte
;
741 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
742 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
744 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
746 block
= d
/ HOST_BITS_PER_WIDE_INT
;
747 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
749 val
[block
] |= byte
<< offset
;
752 result
.set_len (canonize (val
, len
, precision
));
756 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
757 above that up to PREC are zeros. The result is inverted if NEGATE
758 is true. Return the number of blocks in VAL. */
760 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
765 val
[0] = negate
? 0 : -1;
770 val
[0] = negate
? -1 : 0;
775 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
776 val
[i
++] = negate
? 0 : -1;
778 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
781 HOST_WIDE_INT last
= (HOST_WIDE_INT_1U
<< shift
) - 1;
782 val
[i
++] = negate
? ~last
: last
;
785 val
[i
++] = negate
? -1 : 0;
790 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
791 bits are ones, and the bits above that up to PREC are zeros. The result
792 is inverted if NEGATE is true. Return the number of blocks in VAL. */
794 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
795 bool negate
, unsigned int prec
)
797 if (start
>= prec
|| width
== 0)
799 val
[0] = negate
? -1 : 0;
803 if (width
> prec
- start
)
804 width
= prec
- start
;
805 unsigned int end
= start
+ width
;
808 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
809 val
[i
++] = negate
? -1 : 0;
811 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
814 HOST_WIDE_INT block
= (HOST_WIDE_INT_1U
<< shift
) - 1;
816 if (shift
< HOST_BITS_PER_WIDE_INT
)
819 block
= (HOST_WIDE_INT_1U
<< shift
) - block
- 1;
820 val
[i
++] = negate
? ~block
: block
;
825 val
[i
++] = negate
? block
: ~block
;
828 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
830 val
[i
++] = negate
? 0 : -1;
832 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
836 HOST_WIDE_INT block
= (HOST_WIDE_INT_1U
<< shift
) - 1;
837 val
[i
++] = negate
? ~block
: block
;
840 val
[i
++] = negate
? -1 : 0;
846 * logical operations.
849 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
851 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
852 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
853 unsigned int op1len
, unsigned int prec
)
857 bool need_canon
= true;
859 unsigned int len
= MAX (op0len
, op1len
);
862 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
880 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
896 val
[l0
] = op0
[l0
] & op1
[l0
];
901 len
= canonize (val
, len
, prec
);
906 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
908 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
909 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
910 unsigned int op1len
, unsigned int prec
)
915 bool need_canon
= true;
917 unsigned int len
= MAX (op0len
, op1len
);
920 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
938 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
954 val
[l0
] = op0
[l0
] & ~op1
[l0
];
959 len
= canonize (val
, len
, prec
);
964 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
966 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
967 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
968 unsigned int op1len
, unsigned int prec
)
973 bool need_canon
= true;
975 unsigned int len
= MAX (op0len
, op1len
);
978 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
996 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1012 val
[l0
] = op0
[l0
] | op1
[l0
];
1017 len
= canonize (val
, len
, prec
);
1022 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1024 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1025 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1026 unsigned int op1len
, unsigned int prec
)
1029 int l0
= op0len
- 1;
1030 int l1
= op1len
- 1;
1031 bool need_canon
= true;
1033 unsigned int len
= MAX (op0len
, op1len
);
1036 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1054 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1070 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1075 len
= canonize (val
, len
, prec
);
1080 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1082 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1083 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1084 unsigned int op1len
, unsigned int prec
)
1087 int l0
= op0len
- 1;
1088 int l1
= op1len
- 1;
1090 unsigned int len
= MAX (op0len
, op1len
);
1093 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1096 val
[l0
] = op0
[l0
] ^ op1mask
;
1103 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1106 val
[l1
] = op0mask
^ op1
[l1
];
1113 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1117 return canonize (val
, len
, prec
);
1124 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1125 whether the result overflows when OP0 and OP1 are treated as having
1126 signedness SGN. Return the number of blocks in VAL. */
1128 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1129 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1130 unsigned int op1len
, unsigned int prec
,
1131 signop sgn
, wi::overflow_type
*overflow
)
1133 unsigned HOST_WIDE_INT o0
= 0;
1134 unsigned HOST_WIDE_INT o1
= 0;
1135 unsigned HOST_WIDE_INT x
= 0;
1136 unsigned HOST_WIDE_INT carry
= 0;
1137 unsigned HOST_WIDE_INT old_carry
= 0;
1138 unsigned HOST_WIDE_INT mask0
, mask1
;
1141 unsigned int len
= MAX (op0len
, op1len
);
1142 mask0
= -top_bit_of (op0
, op0len
, prec
);
1143 mask1
= -top_bit_of (op1
, op1len
, prec
);
1144 /* Add all of the explicitly defined elements. */
1146 for (i
= 0; i
< len
; i
++)
1148 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1149 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1150 x
= o0
+ o1
+ carry
;
1153 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1156 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1158 val
[len
] = mask0
+ mask1
+ carry
;
1162 = (sgn
== UNSIGNED
&& carry
) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1166 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1169 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1170 if ((HOST_WIDE_INT
) (x
<< shift
) < 0)
1172 if (o0
> (unsigned HOST_WIDE_INT
) val
[len
- 1])
1173 *overflow
= wi::OVF_UNDERFLOW
;
1174 else if (o0
< (unsigned HOST_WIDE_INT
) val
[len
- 1])
1175 *overflow
= wi::OVF_OVERFLOW
;
1177 *overflow
= wi::OVF_NONE
;
1180 *overflow
= wi::OVF_NONE
;
1184 /* Put the MSB of X and O0 and in the top of the HWI. */
1188 *overflow
= (x
<= o0
) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1190 *overflow
= (x
< o0
) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1194 return canonize (val
, len
, prec
);
1197 /* Subroutines of the multiplication and division operations. Unpack
1198 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1199 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1200 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1202 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
, const HOST_WIDE_INT
*input
,
1203 unsigned int in_len
, unsigned int out_len
,
1204 unsigned int prec
, signop sgn
)
1208 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1209 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1214 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1215 mask
&= HALF_INT_MASK
;
1220 for (i
= 0; i
< blocks_needed
- 1; i
++)
1222 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1224 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1227 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1231 x
= sext_hwi (x
, small_prec
);
1233 x
= zext_hwi (x
, small_prec
);
1236 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1238 /* Smear the sign bit. */
1243 /* The inverse of wi_unpack. IN_LEN is the number of input
1244 blocks and PRECISION is the precision of the result. Return the
1245 number of blocks in the canonicalized result. */
1247 wi_pack (HOST_WIDE_INT
*result
,
1248 const unsigned HOST_HALF_WIDE_INT
*input
,
1249 unsigned int in_len
, unsigned int precision
)
1253 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
1255 while (i
+ 1 < in_len
)
1257 result
[j
++] = ((unsigned HOST_WIDE_INT
) input
[i
]
1258 | ((unsigned HOST_WIDE_INT
) input
[i
+ 1]
1259 << HOST_BITS_PER_HALF_WIDE_INT
));
1263 /* Handle the case where in_len is odd. For this we zero extend. */
1265 result
[j
++] = (unsigned HOST_WIDE_INT
) input
[i
];
1266 else if (j
< blocks_needed
)
1268 return canonize (result
, j
, precision
);
1271 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1274 If HIGH is not set, throw away the upper half after the check is
1275 made to see if it overflows. Unfortunately there is no better way
1276 to check for overflow than to do this. If OVERFLOW is nonnull,
1277 record in *OVERFLOW whether the result overflowed. SGN controls
1278 the signedness and is used to check overflow or if HIGH is set.
1280 NOTE: Overflow type for signed overflow is not yet implemented. */
1282 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1283 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1284 unsigned int op2len
, unsigned int prec
, signop sgn
,
1285 wi::overflow_type
*overflow
, bool high
)
1287 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1290 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1291 unsigned int half_blocks_needed
= blocks_needed
* 2;
1292 /* The sizes here are scaled to support a 2x largest mode by 2x
1293 largest mode yielding a 4x largest mode result. This is what is
1296 unsigned HOST_HALF_WIDE_INT
1297 u
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1298 unsigned HOST_HALF_WIDE_INT
1299 v
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1300 /* The '2' in 'R' is because we are internally doing a full
1302 unsigned HOST_HALF_WIDE_INT
1303 r
[2 * 4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1304 HOST_WIDE_INT mask
= ((HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1306 /* If the top level routine did not really pass in an overflow, then
1307 just make sure that we never attempt to set it. */
1308 bool needs_overflow
= (overflow
!= 0);
1310 *overflow
= wi::OVF_NONE
;
1312 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1313 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1315 /* This is a surprisingly common case, so do it first. */
1316 if (op1
== 0 || op2
== 0)
1323 if (sgn
== UNSIGNED
)
1325 /* If the inputs are single HWIs and the output has room for at
1326 least two HWIs, we can use umul_ppmm directly. */
1327 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1328 && wi::fits_uhwi_p (op1
)
1329 && wi::fits_uhwi_p (op2
))
1331 /* This case never overflows. */
1337 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1338 if (val
[1] < 0 && prec
> HOST_BITS_PER_WIDE_INT
* 2)
1343 return 1 + (val
[1] != 0 || val
[0] < 0);
1345 /* Likewise if the output is a full single HWI, except that the
1346 upper HWI of the result is only used for determining overflow.
1347 (We handle this case inline when overflow isn't needed.) */
1348 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1350 unsigned HOST_WIDE_INT upper
;
1351 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1353 /* Unsigned overflow can only be +OVERFLOW. */
1354 *overflow
= (upper
!= 0) ? wi::OVF_OVERFLOW
: wi::OVF_NONE
;
1362 /* Handle multiplications by 1. */
1367 val
[0] = wi::neg_p (op2
, sgn
) ? -1 : 0;
1370 for (i
= 0; i
< op2len
; i
++)
1378 val
[0] = wi::neg_p (op1
, sgn
) ? -1 : 0;
1381 for (i
= 0; i
< op1len
; i
++)
1386 /* If we need to check for overflow, we can only do half wide
1387 multiplies quickly because we need to look at the top bits to
1388 check for the overflow. */
1389 if ((high
|| needs_overflow
)
1390 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1392 unsigned HOST_WIDE_INT r
;
1396 o0
= op1
.to_shwi ();
1397 o1
= op2
.to_shwi ();
1401 o0
= op1
.to_uhwi ();
1402 o1
= op2
.to_uhwi ();
1410 if ((HOST_WIDE_INT
) r
!= sext_hwi (r
, prec
))
1411 /* FIXME: Signed overflow type is not implemented yet. */
1412 *overflow
= OVF_UNKNOWN
;
1416 if ((r
>> prec
) != 0)
1417 /* Unsigned overflow can only be +OVERFLOW. */
1418 *overflow
= OVF_OVERFLOW
;
1421 val
[0] = high
? r
>> prec
: r
;
1425 /* We do unsigned mul and then correct it. */
1426 wi_unpack (u
, op1val
, op1len
, half_blocks_needed
, prec
, SIGNED
);
1427 wi_unpack (v
, op2val
, op2len
, half_blocks_needed
, prec
, SIGNED
);
1429 /* The 2 is for a full mult. */
1430 memset (r
, 0, half_blocks_needed
* 2
1431 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1433 for (j
= 0; j
< half_blocks_needed
; j
++)
1436 for (i
= 0; i
< half_blocks_needed
; i
++)
1438 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1440 r
[i
+ j
] = t
& HALF_INT_MASK
;
1441 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1443 r
[j
+ half_blocks_needed
] = k
;
1446 /* We did unsigned math above. For signed we must adjust the
1447 product (assuming we need to see that). */
1448 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1450 unsigned HOST_WIDE_INT b
;
1451 if (wi::neg_p (op1
))
1454 for (i
= 0; i
< half_blocks_needed
; i
++)
1456 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1457 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1458 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1459 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1462 if (wi::neg_p (op2
))
1465 for (i
= 0; i
< half_blocks_needed
; i
++)
1467 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1468 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1469 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1470 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1479 /* For unsigned, overflow is true if any of the top bits are set.
1480 For signed, overflow is true if any of the top bits are not equal
1482 if (sgn
== UNSIGNED
)
1486 top
= r
[(half_blocks_needed
) - 1];
1487 top
= SIGN_MASK (top
<< (HOST_BITS_PER_WIDE_INT
/ 2));
1491 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1492 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1493 /* FIXME: Signed overflow type is not implemented yet. */
1494 *overflow
= (sgn
== UNSIGNED
) ? wi::OVF_OVERFLOW
: wi::OVF_UNKNOWN
;
1497 int r_offset
= high
? half_blocks_needed
: 0;
1498 return wi_pack (val
, &r
[r_offset
], half_blocks_needed
, prec
);
1501 /* Compute the population count of X. */
1503 wi::popcount (const wide_int_ref
&x
)
1508 /* The high order block is special if it is the last block and the
1509 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1510 have to clear out any ones above the precision before doing
1511 popcount on this block. */
1512 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1513 unsigned int stop
= x
.len
;
1516 count
= popcount_hwi (x
.uhigh () << -count
);
1521 if (x
.sign_mask () >= 0)
1525 for (i
= 0; i
< stop
; ++i
)
1526 count
+= popcount_hwi (x
.val
[i
]);
1531 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1532 whether the result overflows when OP0 and OP1 are treated as having
1533 signedness SGN. Return the number of blocks in VAL. */
1535 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1536 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1537 unsigned int op1len
, unsigned int prec
,
1538 signop sgn
, wi::overflow_type
*overflow
)
1540 unsigned HOST_WIDE_INT o0
= 0;
1541 unsigned HOST_WIDE_INT o1
= 0;
1542 unsigned HOST_WIDE_INT x
= 0;
1543 /* We implement subtraction as an in place negate and add. Negation
1544 is just inversion and add 1, so we can do the add of 1 by just
1545 starting the borrow in of the first element at 1. */
1546 unsigned HOST_WIDE_INT borrow
= 0;
1547 unsigned HOST_WIDE_INT old_borrow
= 0;
1549 unsigned HOST_WIDE_INT mask0
, mask1
;
1552 unsigned int len
= MAX (op0len
, op1len
);
1553 mask0
= -top_bit_of (op0
, op0len
, prec
);
1554 mask1
= -top_bit_of (op1
, op1len
, prec
);
1556 /* Subtract all of the explicitly defined elements. */
1557 for (i
= 0; i
< len
; i
++)
1559 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1560 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1561 x
= o0
- o1
- borrow
;
1563 old_borrow
= borrow
;
1564 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1567 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1569 val
[len
] = mask0
- mask1
- borrow
;
1572 *overflow
= (sgn
== UNSIGNED
&& borrow
) ? OVF_UNDERFLOW
: OVF_NONE
;
1576 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1579 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1580 if ((HOST_WIDE_INT
) (x
<< shift
) < 0)
1583 *overflow
= OVF_UNDERFLOW
;
1585 *overflow
= OVF_OVERFLOW
;
1587 *overflow
= OVF_NONE
;
1590 *overflow
= OVF_NONE
;
1594 /* Put the MSB of X and O0 and in the top of the HWI. */
1598 *overflow
= (x
>= o0
) ? OVF_UNDERFLOW
: OVF_NONE
;
1600 *overflow
= (x
> o0
) ? OVF_UNDERFLOW
: OVF_NONE
;
1604 return canonize (val
, len
, prec
);
1612 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1613 algorithm is a small modification of the algorithm in Hacker's
1614 Delight by Warren, which itself is a small modification of Knuth's
1615 algorithm. M is the number of significant elements of U however
1616 there needs to be at least one extra element of B_DIVIDEND
1617 allocated, N is the number of elements of B_DIVISOR. */
1619 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1620 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1621 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1622 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1625 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1626 HOST_WIDE_INT and stored in the lower bits of each word. This
1627 algorithm should work properly on both 32 and 64 bit
1629 unsigned HOST_WIDE_INT b
1630 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1631 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1632 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1633 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1637 /* Single digit divisor. */
1641 for (j
= m
- 1; j
>= 0; j
--)
1643 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1644 k
= ((k
* b
+ b_dividend
[j
])
1645 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1646 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1652 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1656 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1657 algorithm, we can overwrite b_dividend and b_divisor, so we do
1659 for (i
= n
- 1; i
> 0; i
--)
1660 b_divisor
[i
] = (b_divisor
[i
] << s
)
1661 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1662 b_divisor
[0] = b_divisor
[0] << s
;
1664 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1665 for (i
= m
- 1; i
> 0; i
--)
1666 b_dividend
[i
] = (b_dividend
[i
] << s
)
1667 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1668 b_dividend
[0] = b_dividend
[0] << s
;
1672 for (j
= m
- n
; j
>= 0; j
--)
1674 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1675 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1677 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1680 rhat
+= b_divisor
[n
-1];
1685 /* Multiply and subtract. */
1687 for (i
= 0; i
< n
; i
++)
1689 p
= qhat
* b_divisor
[i
];
1690 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1691 b_dividend
[i
+ j
] = t
;
1692 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1693 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1695 t
= b_dividend
[j
+n
] - k
;
1696 b_dividend
[j
+n
] = t
;
1698 b_quotient
[j
] = qhat
;
1703 for (i
= 0; i
< n
; i
++)
1705 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1706 b_dividend
[i
+j
] = t
;
1707 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1709 b_dividend
[j
+n
] += k
;
1713 for (i
= 0; i
< n
; i
++)
1714 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1715 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1717 for (i
= 0; i
< n
; i
++)
1718 b_remainder
[i
] = b_dividend
[i
];
1722 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1723 the result. If QUOTIENT is nonnull, store the value of the quotient
1724 there and return the number of blocks in it. The return value is
1725 not defined otherwise. If REMAINDER is nonnull, store the value
1726 of the remainder there and store the number of blocks in
1727 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1728 the division overflowed. */
1730 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1731 HOST_WIDE_INT
*remainder
,
1732 const HOST_WIDE_INT
*dividend_val
,
1733 unsigned int dividend_len
, unsigned int dividend_prec
,
1734 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1735 unsigned int divisor_prec
, signop sgn
,
1736 wi::overflow_type
*oflow
)
1738 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1739 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1740 unsigned HOST_HALF_WIDE_INT
1741 b_quotient
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1742 unsigned HOST_HALF_WIDE_INT
1743 b_remainder
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1744 unsigned HOST_HALF_WIDE_INT
1745 b_dividend
[(4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1746 unsigned HOST_HALF_WIDE_INT
1747 b_divisor
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1749 bool dividend_neg
= false;
1750 bool divisor_neg
= false;
1751 bool overflow
= false;
1752 wide_int neg_dividend
, neg_divisor
;
1754 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1756 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1761 /* The smallest signed number / -1 causes overflow. The dividend_len
1762 check is for speed rather than correctness. */
1764 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1766 && wi::only_sign_bit_p (dividend
))
1769 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1770 (signed min / -1) has the same representation as the orignal dividend.
1771 We have traditionally made division by zero act as division by one,
1772 so there too we use the original dividend. */
1781 *oflow
= OVF_OVERFLOW
;
1783 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1784 quotient
[i
] = dividend_val
[i
];
1785 return dividend_len
;
1791 /* Do it on the host if you can. */
1793 && wi::fits_shwi_p (dividend
)
1794 && wi::fits_shwi_p (divisor
))
1796 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1797 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1799 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1801 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1804 quotient
[0] = HOST_WIDE_INT_MIN
;
1817 quotient
[0] = o0
/ o1
;
1820 remainder
[0] = o0
% o1
;
1828 && wi::fits_uhwi_p (dividend
)
1829 && wi::fits_uhwi_p (divisor
))
1831 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1832 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1833 unsigned int quotient_len
= 1;
1837 quotient
[0] = o0
/ o1
;
1838 quotient_len
= canonize_uhwi (quotient
, dividend_prec
);
1842 remainder
[0] = o0
% o1
;
1843 *remainder_len
= canonize_uhwi (remainder
, dividend_prec
);
1845 return quotient_len
;
1848 /* Make the divisor and dividend positive and remember what we
1852 if (wi::neg_p (dividend
))
1854 neg_dividend
= -dividend
;
1855 dividend
= neg_dividend
;
1856 dividend_neg
= true;
1858 if (wi::neg_p (divisor
))
1860 neg_divisor
= -divisor
;
1861 divisor
= neg_divisor
;
1866 wi_unpack (b_dividend
, dividend
.get_val (), dividend
.get_len (),
1867 dividend_blocks_needed
, dividend_prec
, sgn
);
1868 wi_unpack (b_divisor
, divisor
.get_val (), divisor
.get_len (),
1869 divisor_blocks_needed
, divisor_prec
, sgn
);
1871 m
= dividend_blocks_needed
;
1873 while (m
> 1 && b_dividend
[m
- 1] == 0)
1876 n
= divisor_blocks_needed
;
1877 while (n
> 1 && b_divisor
[n
- 1] == 0)
1880 memset (b_quotient
, 0, sizeof (b_quotient
));
1882 divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
1884 unsigned int quotient_len
= 0;
1887 quotient_len
= wi_pack (quotient
, b_quotient
, m
, dividend_prec
);
1888 /* The quotient is neg if exactly one of the divisor or dividend is
1890 if (dividend_neg
!= divisor_neg
)
1891 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
1892 quotient_len
, dividend_prec
,
1898 *remainder_len
= wi_pack (remainder
, b_remainder
, n
, dividend_prec
);
1899 /* The remainder is always the same sign as the dividend. */
1901 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
1902 *remainder_len
, dividend_prec
,
1906 return quotient_len
;
1910 * Shifting, rotating and extraction.
1913 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1914 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1916 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1917 unsigned int xlen
, unsigned int precision
,
1920 /* Split the shift into a whole-block shift and a subblock shift. */
1921 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1922 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1924 /* The whole-block shift fills with zeros. */
1925 unsigned int len
= BLOCKS_NEEDED (precision
);
1926 for (unsigned int i
= 0; i
< skip
; ++i
)
1929 /* It's easier to handle the simple block case specially. */
1930 if (small_shift
== 0)
1931 for (unsigned int i
= skip
; i
< len
; ++i
)
1932 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
1935 /* The first unfilled output block is a left shift of the first
1936 block in XVAL. The other output blocks contain bits from two
1937 consecutive input blocks. */
1938 unsigned HOST_WIDE_INT carry
= 0;
1939 for (unsigned int i
= skip
; i
< len
; ++i
)
1941 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
1942 val
[i
] = (x
<< small_shift
) | carry
;
1943 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1946 return canonize (val
, len
, precision
);
1949 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1950 number of blocks in VAL. The input has XPRECISION bits and the
1951 output has XPRECISION - SHIFT bits. */
1953 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1954 unsigned int xlen
, unsigned int xprecision
,
1957 /* Split the shift into a whole-block shift and a subblock shift. */
1958 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1959 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1961 /* Work out how many blocks are needed to store the significant bits
1962 (excluding the upper zeros or signs). */
1963 unsigned int len
= BLOCKS_NEEDED (xprecision
- shift
);
1965 /* It's easier to handle the simple block case specially. */
1966 if (small_shift
== 0)
1967 for (unsigned int i
= 0; i
< len
; ++i
)
1968 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
1971 /* Each output block but the last is a combination of two input blocks.
1972 The last block is a right shift of the last block in XVAL. */
1973 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
1974 for (unsigned int i
= 0; i
< len
; ++i
)
1976 val
[i
] = curr
>> small_shift
;
1977 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
1978 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1984 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1985 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1986 VAL has PRECISION bits. */
1988 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1989 unsigned int xlen
, unsigned int xprecision
,
1990 unsigned int precision
, unsigned int shift
)
1992 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1994 /* The value we just created has precision XPRECISION - SHIFT.
1995 Zero-extend it to wider precisions. */
1996 if (precision
> xprecision
- shift
)
1998 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
2000 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
2001 else if (val
[len
- 1] < 0)
2003 /* Add a new block with a zero. */
2008 return canonize (val
, len
, precision
);
2011 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
2012 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2013 VAL has PRECISION bits. */
2015 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
2016 unsigned int xlen
, unsigned int xprecision
,
2017 unsigned int precision
, unsigned int shift
)
2019 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
2021 /* The value we just created has precision XPRECISION - SHIFT.
2022 Sign-extend it to wider types. */
2023 if (precision
> xprecision
- shift
)
2025 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
2027 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
2029 return canonize (val
, len
, precision
);
2032 /* Return the number of leading (upper) zeros in X. */
2034 wi::clz (const wide_int_ref
&x
)
2036 /* Calculate how many bits there above the highest represented block. */
2037 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2039 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2041 /* The upper -COUNT bits of HIGH are not part of the value.
2043 high
= (high
<< -count
) >> -count
;
2044 else if (x
.sign_mask () < 0)
2045 /* The upper bit is set, so there are no leading zeros. */
2048 /* We don't need to look below HIGH. Either HIGH is nonzero,
2049 or the top bit of the block below is nonzero; clz_hwi is
2050 HOST_BITS_PER_WIDE_INT in the latter case. */
2051 return count
+ clz_hwi (high
);
2054 /* Return the number of redundant sign bits in X. (That is, the number
2055 of bits immediately below the sign bit that have the same value as
2058 wi::clrsb (const wide_int_ref
&x
)
2060 /* Calculate how many bits there above the highest represented block. */
2061 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2063 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2064 unsigned HOST_WIDE_INT mask
= -1;
2067 /* The upper -COUNT bits of HIGH are not part of the value.
2068 Clear them from both MASK and HIGH. */
2073 /* If the top bit is 1, count the number of leading 1s. If the top
2074 bit is zero, count the number of leading zeros. */
2075 if (high
> mask
/ 2)
2078 /* There are no sign bits below the top block, so we don't need to look
2079 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2081 return count
+ clz_hwi (high
) - 1;
2084 /* Return the number of trailing (lower) zeros in X. */
2086 wi::ctz (const wide_int_ref
&x
)
2088 if (x
.len
== 1 && x
.ulow () == 0)
2091 /* Having dealt with the zero case, there must be a block with a
2092 nonzero bit. We don't care about the bits above the first 1. */
2094 while (x
.val
[i
] == 0)
2096 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2099 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2102 wi::exact_log2 (const wide_int_ref
&x
)
2104 /* Reject cases where there are implicit -1 blocks above HIGH. */
2105 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2108 /* Set CRUX to the index of the entry that should be nonzero.
2109 If the top block is zero then the next lowest block (if any)
2110 must have the high bit set. */
2111 unsigned int crux
= x
.len
- 1;
2112 if (crux
> 0 && x
.val
[crux
] == 0)
2115 /* Check that all lower blocks are zero. */
2116 for (unsigned int i
= 0; i
< crux
; ++i
)
2120 /* Get a zero-extended form of block CRUX. */
2121 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2122 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2123 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2125 /* Now it's down to whether HWI is a power of 2. */
2126 int res
= ::exact_log2 (hwi
);
2128 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2132 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2134 wi::floor_log2 (const wide_int_ref
&x
)
2136 return x
.precision
- 1 - clz (x
);
2139 /* Return the index of the first (lowest) set bit in X, counting from 1.
2140 Return 0 if X is 0. */
2142 wi::ffs (const wide_int_ref
&x
)
2144 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2147 /* Return true if sign-extending X to have precision PRECISION would give
2148 the minimum signed value at that precision. */
2150 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2152 return ctz (x
) + 1 == int (precision
);
2155 /* Return true if X represents the minimum signed value. */
2157 wi::only_sign_bit_p (const wide_int_ref
&x
)
2159 return only_sign_bit_p (x
, x
.precision
);
2162 /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2163 down to the previous value that has no bits set outside MASK.
2164 This rounding wraps for signed values if VAL is negative and
2165 the top bit of MASK is clear.
2167 For example, round_down_for_mask (6, 0xf1) would give 1 and
2168 round_down_for_mask (24, 0xf1) would give 17. */
2171 wi::round_down_for_mask (const wide_int
&val
, const wide_int
&mask
)
2173 /* Get the bits in VAL that are outside the mask. */
2174 wide_int extra_bits
= wi::bit_and_not (val
, mask
);
2175 if (extra_bits
== 0)
2178 /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
2180 unsigned int precision
= val
.get_precision ();
2181 wide_int lower_mask
= wi::mask (precision
- wi::clz (extra_bits
),
2184 /* Clear the bits that aren't in MASK, but ensure that all bits
2185 in MASK below the top cleared bit are set. */
2186 return (val
& mask
) | (mask
& lower_mask
);
2189 /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2190 up to the next value that has no bits set outside MASK. The rounding
2191 wraps if there are no suitable values greater than VAL.
2193 For example, round_up_for_mask (6, 0xf1) would give 16 and
2194 round_up_for_mask (24, 0xf1) would give 32. */
2197 wi::round_up_for_mask (const wide_int
&val
, const wide_int
&mask
)
2199 /* Get the bits in VAL that are outside the mask. */
2200 wide_int extra_bits
= wi::bit_and_not (val
, mask
);
2201 if (extra_bits
== 0)
2204 /* Get a mask that is all 1s above the top bit in EXTRA_BITS. */
2205 unsigned int precision
= val
.get_precision ();
2206 wide_int upper_mask
= wi::mask (precision
- wi::clz (extra_bits
),
2209 /* Get the bits of the mask that are above the top bit in EXTRA_BITS. */
2212 /* Conceptually we need to:
2214 - clear bits of VAL outside UPPER_MASK
2215 - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
2216 - propagate the carry through the bits of VAL in UPPER_MASK
2218 If (~VAL & UPPER_MASK) is nonzero, the carry eventually
2219 reaches that bit and the process leaves all lower bits clear.
2220 If (~VAL & UPPER_MASK) is zero then the result is also zero. */
2221 wide_int tmp
= wi::bit_and_not (upper_mask
, val
);
2223 return (val
| tmp
) & -tmp
;
2227 * Private utilities.
2230 void gt_ggc_mx (widest_int
*) { }
2231 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2232 void gt_pch_nx (widest_int
*) { }
2234 template void wide_int::dump () const;
2235 template void generic_wide_int
<wide_int_ref_storage
<false> >::dump () const;
2236 template void generic_wide_int
<wide_int_ref_storage
<true> >::dump () const;
2237 template void offset_int::dump () const;
2238 template void widest_int::dump () const;
2240 /* We could add all the above ::dump variants here, but wide_int and
2241 widest_int should handle the common cases. Besides, you can always
2242 call the dump method directly. */
2245 debug (const wide_int
&ref
)
2251 debug (const wide_int
*ptr
)
2256 fprintf (stderr
, "<nil>\n");
2260 debug (const widest_int
&ref
)
2266 debug (const widest_int
*ptr
)
2271 fprintf (stderr
, "<nil>\n");
2276 namespace selftest
{
2278 /* Selftests for wide ints. We run these multiple times, once per type. */
2280 /* Helper function for building a test value. */
2282 template <class VALUE_TYPE
>
2286 /* Specializations of the fixture for each wide-int type. */
2288 /* Specialization for VALUE_TYPE == wide_int. */
2294 return wi::shwi (i
, 32);
2297 /* Specialization for VALUE_TYPE == offset_int. */
2303 return offset_int (i
);
2306 /* Specialization for VALUE_TYPE == widest_int. */
2312 return widest_int (i
);
2315 /* Verify that print_dec (WI, ..., SGN) gives the expected string
2316 representation (using base 10). */
2319 assert_deceq (const char *expected
, const wide_int_ref
&wi
, signop sgn
)
2321 char buf
[WIDE_INT_PRINT_BUFFER_SIZE
];
2322 print_dec (wi
, buf
, sgn
);
2323 ASSERT_STREQ (expected
, buf
);
2326 /* Likewise for base 16. */
2329 assert_hexeq (const char *expected
, const wide_int_ref
&wi
)
2331 char buf
[WIDE_INT_PRINT_BUFFER_SIZE
];
2332 print_hex (wi
, buf
);
2333 ASSERT_STREQ (expected
, buf
);
2338 /* Verify that print_dec and print_hex work for VALUE_TYPE. */
2340 template <class VALUE_TYPE
>
2344 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (42);
2345 assert_deceq ("42", a
, SIGNED
);
2346 assert_hexeq ("0x2a", a
);
2347 assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
2348 assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
2349 assert_hexeq ("0xffffffffffffffff", wi::mask
<widest_int
> (64, false));
2350 if (WIDE_INT_MAX_PRECISION
> 128)
2352 assert_hexeq ("0x20000000000000000fffffffffffffffe",
2353 wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
2354 assert_hexeq ("0x200000000000004000123456789abcdef",
2355 wi::lshift (1, 129) + wi::lshift (1, 74)
2356 + wi::lshift (0x1234567, 32) + 0x89abcdef);
2360 /* Verify that various operations work correctly for VALUE_TYPE,
2361 unary and binary, using both function syntax, and
2362 overloaded-operators. */
2364 template <class VALUE_TYPE
>
2368 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (7);
2369 VALUE_TYPE b
= from_int
<VALUE_TYPE
> (3);
2371 /* Using functions. */
2372 assert_deceq ("-7", wi::neg (a
), SIGNED
);
2373 assert_deceq ("10", wi::add (a
, b
), SIGNED
);
2374 assert_deceq ("4", wi::sub (a
, b
), SIGNED
);
2375 assert_deceq ("-4", wi::sub (b
, a
), SIGNED
);
2376 assert_deceq ("21", wi::mul (a
, b
), SIGNED
);
2378 /* Using operators. */
2379 assert_deceq ("-7", -a
, SIGNED
);
2380 assert_deceq ("10", a
+ b
, SIGNED
);
2381 assert_deceq ("4", a
- b
, SIGNED
);
2382 assert_deceq ("-4", b
- a
, SIGNED
);
2383 assert_deceq ("21", a
* b
, SIGNED
);
2386 /* Verify that various comparisons work correctly for VALUE_TYPE. */
2388 template <class VALUE_TYPE
>
2392 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (7);
2393 VALUE_TYPE b
= from_int
<VALUE_TYPE
> (3);
2396 ASSERT_TRUE (wi::eq_p (a
, a
));
2397 ASSERT_FALSE (wi::eq_p (a
, b
));
2400 ASSERT_TRUE (wi::ne_p (a
, b
));
2401 ASSERT_FALSE (wi::ne_p (a
, a
));
2404 ASSERT_FALSE (wi::lts_p (a
, a
));
2405 ASSERT_FALSE (wi::lts_p (a
, b
));
2406 ASSERT_TRUE (wi::lts_p (b
, a
));
2409 ASSERT_TRUE (wi::les_p (a
, a
));
2410 ASSERT_FALSE (wi::les_p (a
, b
));
2411 ASSERT_TRUE (wi::les_p (b
, a
));
2414 ASSERT_FALSE (wi::gts_p (a
, a
));
2415 ASSERT_TRUE (wi::gts_p (a
, b
));
2416 ASSERT_FALSE (wi::gts_p (b
, a
));
2419 ASSERT_TRUE (wi::ges_p (a
, a
));
2420 ASSERT_TRUE (wi::ges_p (a
, b
));
2421 ASSERT_FALSE (wi::ges_p (b
, a
));
2424 ASSERT_EQ (-1, wi::cmps (b
, a
));
2425 ASSERT_EQ (0, wi::cmps (a
, a
));
2426 ASSERT_EQ (1, wi::cmps (a
, b
));
2429 /* Run all of the selftests, using the given VALUE_TYPE. */
2431 template <class VALUE_TYPE
>
2432 static void run_all_wide_int_tests ()
2434 test_printing
<VALUE_TYPE
> ();
2435 test_ops
<VALUE_TYPE
> ();
2436 test_comparisons
<VALUE_TYPE
> ();
2439 /* Test overflow conditions. */
2444 static int precs
[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
2445 static int offsets
[] = { 16, 1, 0 };
2446 for (unsigned int i
= 0; i
< ARRAY_SIZE (precs
); ++i
)
2447 for (unsigned int j
= 0; j
< ARRAY_SIZE (offsets
); ++j
)
2449 int prec
= precs
[i
];
2450 int offset
= offsets
[j
];
2451 wi::overflow_type overflow
;
2454 sum
= wi::add (wi::max_value (prec
, UNSIGNED
) - offset
, 1,
2455 UNSIGNED
, &overflow
);
2456 ASSERT_EQ (sum
, -offset
);
2457 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
== 0);
2459 sum
= wi::add (1, wi::max_value (prec
, UNSIGNED
) - offset
,
2460 UNSIGNED
, &overflow
);
2461 ASSERT_EQ (sum
, -offset
);
2462 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
== 0);
2464 diff
= wi::sub (wi::max_value (prec
, UNSIGNED
) - offset
,
2465 wi::max_value (prec
, UNSIGNED
),
2466 UNSIGNED
, &overflow
);
2467 ASSERT_EQ (diff
, -offset
);
2468 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
!= 0);
2470 diff
= wi::sub (wi::max_value (prec
, UNSIGNED
) - offset
,
2471 wi::max_value (prec
, UNSIGNED
) - 1,
2472 UNSIGNED
, &overflow
);
2473 ASSERT_EQ (diff
, 1 - offset
);
2474 ASSERT_EQ (overflow
!= wi::OVF_NONE
, offset
> 1);
2478 /* Test the round_{down,up}_for_mask functions. */
2481 test_round_for_mask ()
2483 unsigned int prec
= 18;
2484 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec
),
2485 wi::shwi (0xf1, prec
)));
2486 ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec
),
2487 wi::shwi (0xf1, prec
)));
2489 ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec
),
2490 wi::shwi (0xf1, prec
)));
2491 ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec
),
2492 wi::shwi (0xf1, prec
)));
2494 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec
),
2495 wi::shwi (0xf1, prec
)));
2496 ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec
),
2497 wi::shwi (0xf1, prec
)));
2499 ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec
),
2500 wi::shwi (0x111, prec
)));
2501 ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec
),
2502 wi::shwi (0x111, prec
)));
2504 ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec
),
2505 wi::shwi (0xfc, prec
)));
2506 ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec
),
2507 wi::shwi (0xfc, prec
)));
2509 ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec
),
2510 wi::shwi (0xabc, prec
)));
2511 ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec
),
2512 wi::shwi (0xabc, prec
)));
2514 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec
),
2515 wi::shwi (0xabc, prec
)));
2516 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec
),
2517 wi::shwi (0xabc, prec
)));
2519 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec
),
2520 wi::shwi (0xabc, prec
)));
2521 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec
),
2522 wi::shwi (0xabc, prec
)));
2525 /* Run all of the selftests within this file, for all value types. */
2528 wide_int_cc_tests ()
2530 run_all_wide_int_tests
<wide_int
> ();
2531 run_all_wide_int_tests
<offset_int
> ();
2532 run_all_wide_int_tests
<widest_int
> ();
2534 test_round_for_mask ();
2537 } // namespace selftest
2538 #endif /* CHECKING_P */