1 /* Operations with very long integers.
2 Copyright (C) 2012-2017 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "wide-int-print.h"
30 #define HOST_BITS_PER_HALF_WIDE_INT 32
31 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
32 # define HOST_HALF_WIDE_INT long
33 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
34 # define HOST_HALF_WIDE_INT int
36 #error Please add support for HOST_HALF_WIDE_INT
39 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
40 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
41 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
42 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
43 typedef unsigned HOST_WIDE_INT UWtype
;
44 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
45 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
46 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
48 typedef unsigned int UDWtype
__attribute__ ((mode (DI
)));
50 typedef unsigned int UDWtype
__attribute__ ((mode (TI
)));
55 static const HOST_WIDE_INT zeros
[WIDE_INT_MAX_ELTS
] = {};
61 /* Quantities to deal with values that hold half of a wide int. Used
62 in multiply and divide. */
63 #define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
65 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
66 #define BLOCKS_NEEDED(PREC) \
67 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
68 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
70 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
71 based on the top existing bit of VAL. */
73 static unsigned HOST_WIDE_INT
74 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
76 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? HOST_WIDE_INT_M1
: 0;
79 /* Convert the integer in VAL to canonical form, returning its new length.
80 LEN is the number of blocks currently in VAL and PRECISION is the number
81 of bits in the integer it represents.
83 This function only changes the representation, not the value. */
85 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
87 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
91 if (len
> blocks_needed
)
98 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
99 val
[len
- 1] = top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
100 if (top
!= 0 && top
!= (HOST_WIDE_INT
)-1)
103 /* At this point we know that the top is either 0 or -1. Find the
104 first block that is not a copy of this. */
105 for (i
= len
- 2; i
>= 0; i
--)
107 HOST_WIDE_INT x
= val
[i
];
110 if (SIGN_MASK (x
) == top
)
113 /* We need an extra block because the top bit block i does
114 not match the extension. */
119 /* The number is 0 or -1. */
123 /* VAL[0] is the unsigned result of an operation. Canonize it by adding
124 another 0 block if needed, and return number of blocks needed. */
126 static inline unsigned int
127 canonize_uhwi (HOST_WIDE_INT
*val
, unsigned int precision
)
129 if (val
[0] < 0 && precision
> HOST_BITS_PER_WIDE_INT
)
138 * Conversion routines in and out of wide_int.
141 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
142 result for an integer with precision PRECISION. Return the length
143 of VAL (after any canonization. */
145 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
146 unsigned int xlen
, unsigned int precision
, bool need_canon
)
148 for (unsigned i
= 0; i
< xlen
; i
++)
150 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
153 /* Construct a wide int from a buffer of length LEN. BUFFER will be
154 read according to byte endianness and word endianness of the target.
155 Only the lower BUFFER_LEN bytes of the result are set; the remaining
156 high bytes are cleared. */
158 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
160 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
161 wide_int result
= wide_int::create (precision
);
162 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
164 /* We have to clear all the bits ourself, as we merely or in values
166 unsigned int len
= BLOCKS_NEEDED (precision
);
167 HOST_WIDE_INT
*val
= result
.write_val ();
168 for (unsigned int i
= 0; i
< len
; ++i
)
171 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
175 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
176 unsigned HOST_WIDE_INT value
;
178 if (buffer_len
> UNITS_PER_WORD
)
180 unsigned int word
= byte
/ UNITS_PER_WORD
;
182 if (WORDS_BIG_ENDIAN
)
183 word
= (words
- 1) - word
;
185 offset
= word
* UNITS_PER_WORD
;
187 if (BYTES_BIG_ENDIAN
)
188 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
190 offset
+= byte
% UNITS_PER_WORD
;
193 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
195 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
197 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
198 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
201 result
.set_len (canonize (val
, len
, precision
));
206 /* Sets RESULT from X, the sign is taken according to SGN. */
208 wi::to_mpz (const wide_int_ref
&x
, mpz_t result
, signop sgn
)
210 int len
= x
.get_len ();
211 const HOST_WIDE_INT
*v
= x
.get_val ();
212 int excess
= len
* HOST_BITS_PER_WIDE_INT
- x
.get_precision ();
214 if (wi::neg_p (x
, sgn
))
216 /* We use ones complement to avoid -x80..0 edge case that -
218 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
219 for (int i
= 0; i
< len
; i
++)
222 t
[len
- 1] = (unsigned HOST_WIDE_INT
) t
[len
- 1] << excess
>> excess
;
223 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
224 mpz_com (result
, result
);
228 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
229 for (int i
= 0; i
< len
- 1; i
++)
231 t
[len
- 1] = (unsigned HOST_WIDE_INT
) v
[len
- 1] << excess
>> excess
;
232 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
235 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
238 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
239 values of VAL will be wrapped; otherwise, they will be set to the
240 appropriate minimum or maximum TYPE bound. */
242 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
245 unsigned int prec
= TYPE_PRECISION (type
);
246 wide_int res
= wide_int::create (prec
);
254 get_type_static_bounds (type
, min
, max
);
256 if (mpz_cmp (x
, min
) < 0)
258 else if (mpz_cmp (x
, max
) > 0)
265 /* Determine the number of unsigned HOST_WIDE_INTs that are required
266 for representing the absolute value. The code to calculate count is
267 extracted from the GMP manual, section "Integer Import and Export":
268 http://gmplib.org/manual/Integer-Import-and-Export.html */
269 numb
= CHAR_BIT
* sizeof (HOST_WIDE_INT
);
270 count
= (mpz_sizeinbase (x
, 2) + numb
- 1) / numb
;
271 HOST_WIDE_INT
*val
= res
.write_val ();
272 /* Read the absolute value.
274 Write directly to the wide_int storage if possible, otherwise leave
275 GMP to allocate the memory for us. It might be slightly more efficient
276 to use mpz_tdiv_r_2exp for the latter case, but the situation is
277 pathological and it seems safer to operate on the original mpz value
279 void *valres
= mpz_export (count
<= WIDE_INT_MAX_ELTS
? val
: 0,
280 &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
286 count
= MIN (count
, BLOCKS_NEEDED (prec
));
289 memcpy (val
, valres
, count
* sizeof (HOST_WIDE_INT
));
292 /* Zero-extend the absolute value to PREC bits. */
293 if (count
< BLOCKS_NEEDED (prec
) && val
[count
- 1] < 0)
296 count
= canonize (val
, count
, prec
);
306 * Largest and smallest values in a mode.
309 /* Return the largest SGNed number that is representable in PRECISION bits.
311 TODO: There is still code from the double_int era that trys to
312 make up for the fact that double int's could not represent the
313 min and max values of all types. This code should be removed
314 because the min and max values can always be represented in
315 wide_ints and int-csts. */
317 wi::max_value (unsigned int precision
, signop sgn
)
319 gcc_checking_assert (precision
!= 0);
321 /* The unsigned max is just all ones. */
322 return shwi (-1, precision
);
324 /* The signed max is all ones except the top bit. This must be
325 explicitly represented. */
326 return mask (precision
- 1, false, precision
);
329 /* Return the largest SGNed number that is representable in PRECISION bits. */
331 wi::min_value (unsigned int precision
, signop sgn
)
333 gcc_checking_assert (precision
!= 0);
335 return uhwi (0, precision
);
337 /* The signed min is all zeros except the top bit. This must be
338 explicitly represented. */
339 return wi::set_bit_in_zero (precision
- 1, precision
);
346 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
347 signedness SGN, to an integer that has PRECISION bits. Store the blocks
348 in VAL and return the number of blocks used.
350 This function can handle both extension (PRECISION > XPRECISION)
351 and truncation (PRECISION < XPRECISION). */
353 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
354 unsigned int xlen
, unsigned int xprecision
,
355 unsigned int precision
, signop sgn
)
357 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
358 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
359 for (unsigned i
= 0; i
< len
; i
++)
362 if (precision
> xprecision
)
364 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
369 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
370 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
371 else if (val
[len
- 1] < 0)
373 while (len
< BLOCKS_NEEDED (xprecision
))
375 if (small_xprecision
)
376 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
383 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
384 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
387 len
= canonize (val
, len
, precision
);
392 /* This function hides the fact that we cannot rely on the bits beyond
393 the precision. This issue comes up in the relational comparisions
394 where we do allow comparisons of values of different precisions. */
395 static inline HOST_WIDE_INT
396 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
397 unsigned int blocks_needed
, unsigned int small_prec
,
398 unsigned int index
, signop sgn
)
403 else if (index
< blocks_needed
|| sgn
== SIGNED
)
404 /* Signed or within the precision. */
405 val
= SIGN_MASK (a
[len
- 1]);
407 /* Unsigned extension beyond the precision. */
410 if (small_prec
&& index
== blocks_needed
- 1)
411 return (sgn
== SIGNED
412 ? sext_hwi (val
, small_prec
)
413 : zext_hwi (val
, small_prec
));
418 /* Find the highest bit represented in a wide int. This will in
419 general have the same value as the sign bit. */
420 static inline HOST_WIDE_INT
421 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
423 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
424 unsigned HOST_WIDE_INT val
= a
[len
- 1];
427 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
431 * Comparisons, note that only equality is an operator. The other
432 * comparisons cannot be operators since they are inherently signed or
433 * unsigned and C++ has no such operators.
436 /* Return true if OP0 == OP1. */
438 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
439 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
443 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
445 if (op0len
!= op1len
)
448 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
450 /* It does not matter if we zext or sext here, we just have to
451 do both the same way. */
452 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
458 if (op0
[l0
] != op1
[l0
])
466 /* Return true if OP0 < OP1 using signed comparisons. */
468 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
469 unsigned int precision
,
470 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
472 HOST_WIDE_INT s0
, s1
;
473 unsigned HOST_WIDE_INT u0
, u1
;
474 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
475 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
476 int l
= MAX (op0len
- 1, op1len
- 1);
478 /* Only the top block is compared as signed. The rest are unsigned
480 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
481 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
490 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
491 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
503 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
506 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
507 unsigned int precision
,
508 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
510 HOST_WIDE_INT s0
, s1
;
511 unsigned HOST_WIDE_INT u0
, u1
;
512 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
513 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
514 int l
= MAX (op0len
- 1, op1len
- 1);
516 /* Only the top block is compared as signed. The rest are unsigned
518 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
519 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
528 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
529 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
541 /* Return true if OP0 < OP1 using unsigned comparisons. */
543 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
544 unsigned int precision
,
545 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
547 unsigned HOST_WIDE_INT x0
;
548 unsigned HOST_WIDE_INT x1
;
549 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
550 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
551 int l
= MAX (op0len
- 1, op1len
- 1);
555 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
556 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
567 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
568 unsigned compares. */
570 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
571 unsigned int precision
,
572 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
574 unsigned HOST_WIDE_INT x0
;
575 unsigned HOST_WIDE_INT x1
;
576 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
577 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
578 int l
= MAX (op0len
- 1, op1len
- 1);
582 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
583 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
598 /* Sign-extend the number represented by XVAL and XLEN into VAL,
599 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
600 and VAL have PRECISION bits. */
602 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
603 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
605 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
606 /* Extending beyond the precision is a no-op. If we have only stored
607 OFFSET bits or fewer, the rest are already signs. */
608 if (offset
>= precision
|| len
>= xlen
)
610 for (unsigned i
= 0; i
< xlen
; ++i
)
614 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
615 for (unsigned int i
= 0; i
< len
; i
++)
619 val
[len
] = sext_hwi (xval
[len
], suboffset
);
622 return canonize (val
, len
, precision
);
625 /* Zero-extend the number represented by XVAL and XLEN into VAL,
626 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
627 and VAL have PRECISION bits. */
629 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
630 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
632 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
633 /* Extending beyond the precision is a no-op. If we have only stored
634 OFFSET bits or fewer, and the upper stored bit is zero, then there
636 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
638 for (unsigned i
= 0; i
< xlen
; ++i
)
642 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
643 for (unsigned int i
= 0; i
< len
; i
++)
644 val
[i
] = i
< xlen
? xval
[i
] : -1;
646 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
649 return canonize (val
, len
+ 1, precision
);
653 * Masking, inserting, shifting, rotating.
656 /* Insert WIDTH bits from Y into X starting at START. */
658 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
665 unsigned int precision
= x
.get_precision ();
666 if (start
>= precision
)
669 gcc_checking_assert (precision
>= width
);
671 if (start
+ width
>= precision
)
672 width
= precision
- start
;
674 mask
= wi::shifted_mask (start
, width
, false, precision
);
675 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
678 tmp
= wi::bit_and_not (x
, mask
);
679 result
= result
| tmp
;
684 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
685 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
688 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
689 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
691 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
692 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
694 if (block
+ 1 >= xlen
)
696 /* The operation either affects the last current block or needs
698 unsigned int len
= block
+ 1;
699 for (unsigned int i
= 0; i
< len
; i
++)
700 val
[i
] = safe_uhwi (xval
, xlen
, i
);
701 val
[block
] |= HOST_WIDE_INT_1U
<< subbit
;
703 /* If the bit we just set is at the msb of the block, make sure
704 that any higher bits are zeros. */
705 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
711 for (unsigned int i
= 0; i
< xlen
; i
++)
713 val
[block
] |= HOST_WIDE_INT_1U
<< subbit
;
714 return canonize (val
, xlen
, precision
);
720 wide_int_storage::bswap () const
722 wide_int result
= wide_int::create (precision
);
724 unsigned int len
= BLOCKS_NEEDED (precision
);
725 unsigned int xlen
= get_len ();
726 const HOST_WIDE_INT
*xval
= get_val ();
727 HOST_WIDE_INT
*val
= result
.write_val ();
729 /* This is not a well defined operation if the precision is not a
731 gcc_assert ((precision
& 0x7) == 0);
733 for (i
= 0; i
< len
; i
++)
736 /* Only swap the bytes that are not the padding. */
737 for (s
= 0; s
< precision
; s
+= 8)
739 unsigned int d
= precision
- s
- 8;
740 unsigned HOST_WIDE_INT byte
;
742 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
743 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
745 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
747 block
= d
/ HOST_BITS_PER_WIDE_INT
;
748 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
750 val
[block
] |= byte
<< offset
;
753 result
.set_len (canonize (val
, len
, precision
));
757 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
758 above that up to PREC are zeros. The result is inverted if NEGATE
759 is true. Return the number of blocks in VAL. */
761 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
766 val
[0] = negate
? 0 : -1;
771 val
[0] = negate
? -1 : 0;
776 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
777 val
[i
++] = negate
? 0 : -1;
779 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
782 HOST_WIDE_INT last
= (HOST_WIDE_INT_1U
<< shift
) - 1;
783 val
[i
++] = negate
? ~last
: last
;
786 val
[i
++] = negate
? -1 : 0;
791 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
792 bits are ones, and the bits above that up to PREC are zeros. The result
793 is inverted if NEGATE is true. Return the number of blocks in VAL. */
795 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
796 bool negate
, unsigned int prec
)
798 if (start
>= prec
|| width
== 0)
800 val
[0] = negate
? -1 : 0;
804 if (width
> prec
- start
)
805 width
= prec
- start
;
806 unsigned int end
= start
+ width
;
809 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
810 val
[i
++] = negate
? -1 : 0;
812 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
815 HOST_WIDE_INT block
= (HOST_WIDE_INT_1U
<< shift
) - 1;
817 if (shift
< HOST_BITS_PER_WIDE_INT
)
820 block
= (HOST_WIDE_INT_1U
<< shift
) - block
- 1;
821 val
[i
++] = negate
? ~block
: block
;
826 val
[i
++] = negate
? block
: ~block
;
829 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
831 val
[i
++] = negate
? 0 : -1;
833 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
837 HOST_WIDE_INT block
= (HOST_WIDE_INT_1U
<< shift
) - 1;
838 val
[i
++] = negate
? ~block
: block
;
841 val
[i
++] = negate
? -1 : 0;
847 * logical operations.
850 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
852 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
853 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
854 unsigned int op1len
, unsigned int prec
)
858 bool need_canon
= true;
860 unsigned int len
= MAX (op0len
, op1len
);
863 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
881 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
897 val
[l0
] = op0
[l0
] & op1
[l0
];
902 len
= canonize (val
, len
, prec
);
907 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
909 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
910 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
911 unsigned int op1len
, unsigned int prec
)
916 bool need_canon
= true;
918 unsigned int len
= MAX (op0len
, op1len
);
921 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
939 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
955 val
[l0
] = op0
[l0
] & ~op1
[l0
];
960 len
= canonize (val
, len
, prec
);
965 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
967 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
968 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
969 unsigned int op1len
, unsigned int prec
)
974 bool need_canon
= true;
976 unsigned int len
= MAX (op0len
, op1len
);
979 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
997 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1013 val
[l0
] = op0
[l0
] | op1
[l0
];
1018 len
= canonize (val
, len
, prec
);
1023 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1025 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1026 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1027 unsigned int op1len
, unsigned int prec
)
1030 int l0
= op0len
- 1;
1031 int l1
= op1len
- 1;
1032 bool need_canon
= true;
1034 unsigned int len
= MAX (op0len
, op1len
);
1037 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1055 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1071 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1076 len
= canonize (val
, len
, prec
);
1081 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1083 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1084 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1085 unsigned int op1len
, unsigned int prec
)
1088 int l0
= op0len
- 1;
1089 int l1
= op1len
- 1;
1091 unsigned int len
= MAX (op0len
, op1len
);
1094 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1097 val
[l0
] = op0
[l0
] ^ op1mask
;
1104 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1107 val
[l1
] = op0mask
^ op1
[l1
];
1114 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1118 return canonize (val
, len
, prec
);
1125 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1126 whether the result overflows when OP0 and OP1 are treated as having
1127 signedness SGN. Return the number of blocks in VAL. */
1129 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1130 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1131 unsigned int op1len
, unsigned int prec
,
1132 signop sgn
, bool *overflow
)
1134 unsigned HOST_WIDE_INT o0
= 0;
1135 unsigned HOST_WIDE_INT o1
= 0;
1136 unsigned HOST_WIDE_INT x
= 0;
1137 unsigned HOST_WIDE_INT carry
= 0;
1138 unsigned HOST_WIDE_INT old_carry
= 0;
1139 unsigned HOST_WIDE_INT mask0
, mask1
;
1142 unsigned int len
= MAX (op0len
, op1len
);
1143 mask0
= -top_bit_of (op0
, op0len
, prec
);
1144 mask1
= -top_bit_of (op1
, op1len
, prec
);
1145 /* Add all of the explicitly defined elements. */
1147 for (i
= 0; i
< len
; i
++)
1149 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1150 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1151 x
= o0
+ o1
+ carry
;
1154 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1157 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1159 val
[len
] = mask0
+ mask1
+ carry
;
1166 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1169 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1170 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1174 /* Put the MSB of X and O0 and in the top of the HWI. */
1178 *overflow
= (x
<= o0
);
1180 *overflow
= (x
< o0
);
1184 return canonize (val
, len
, prec
);
1187 /* Subroutines of the multiplication and division operations. Unpack
1188 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1189 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1190 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1192 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
, const HOST_WIDE_INT
*input
,
1193 unsigned int in_len
, unsigned int out_len
,
1194 unsigned int prec
, signop sgn
)
1198 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1199 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1204 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1205 mask
&= HALF_INT_MASK
;
1210 for (i
= 0; i
< blocks_needed
- 1; i
++)
1212 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1214 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1217 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1221 x
= sext_hwi (x
, small_prec
);
1223 x
= zext_hwi (x
, small_prec
);
1226 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1228 /* Smear the sign bit. */
1233 /* The inverse of wi_unpack. IN_LEN is the number of input
1234 blocks and PRECISION is the precision of the result. Return the
1235 number of blocks in the canonicalized result. */
1237 wi_pack (HOST_WIDE_INT
*result
,
1238 const unsigned HOST_HALF_WIDE_INT
*input
,
1239 unsigned int in_len
, unsigned int precision
)
1243 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
1245 while (i
+ 1 < in_len
)
1247 result
[j
++] = ((unsigned HOST_WIDE_INT
) input
[i
]
1248 | ((unsigned HOST_WIDE_INT
) input
[i
+ 1]
1249 << HOST_BITS_PER_HALF_WIDE_INT
));
1253 /* Handle the case where in_len is odd. For this we zero extend. */
1255 result
[j
++] = (unsigned HOST_WIDE_INT
) input
[i
];
1256 else if (j
< blocks_needed
)
1258 return canonize (result
, j
, precision
);
1261 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1264 If HIGH is not set, throw away the upper half after the check is
1265 made to see if it overflows. Unfortunately there is no better way
1266 to check for overflow than to do this. If OVERFLOW is nonnull,
1267 record in *OVERFLOW whether the result overflowed. SGN controls
1268 the signedness and is used to check overflow or if HIGH is set. */
1270 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1271 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1272 unsigned int op2len
, unsigned int prec
, signop sgn
,
1273 bool *overflow
, bool high
)
1275 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1278 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1279 unsigned int half_blocks_needed
= blocks_needed
* 2;
1280 /* The sizes here are scaled to support a 2x largest mode by 2x
1281 largest mode yielding a 4x largest mode result. This is what is
1284 unsigned HOST_HALF_WIDE_INT
1285 u
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1286 unsigned HOST_HALF_WIDE_INT
1287 v
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1288 /* The '2' in 'R' is because we are internally doing a full
1290 unsigned HOST_HALF_WIDE_INT
1291 r
[2 * 4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1292 HOST_WIDE_INT mask
= ((HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1294 /* If the top level routine did not really pass in an overflow, then
1295 just make sure that we never attempt to set it. */
1296 bool needs_overflow
= (overflow
!= 0);
1300 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1301 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1303 /* This is a surprisingly common case, so do it first. */
1304 if (op1
== 0 || op2
== 0)
1311 if (sgn
== UNSIGNED
)
1313 /* If the inputs are single HWIs and the output has room for at
1314 least two HWIs, we can use umul_ppmm directly. */
1315 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1316 && wi::fits_uhwi_p (op1
)
1317 && wi::fits_uhwi_p (op2
))
1319 /* This case never overflows. */
1325 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1326 if (val
[1] < 0 && prec
> HOST_BITS_PER_WIDE_INT
* 2)
1331 return 1 + (val
[1] != 0 || val
[0] < 0);
1333 /* Likewise if the output is a full single HWI, except that the
1334 upper HWI of the result is only used for determining overflow.
1335 (We handle this case inline when overflow isn't needed.) */
1336 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1338 unsigned HOST_WIDE_INT upper
;
1339 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1341 *overflow
= (upper
!= 0);
1349 /* Handle multiplications by 1. */
1354 val
[0] = wi::neg_p (op2
, sgn
) ? -1 : 0;
1357 for (i
= 0; i
< op2len
; i
++)
1365 val
[0] = wi::neg_p (op1
, sgn
) ? -1 : 0;
1368 for (i
= 0; i
< op1len
; i
++)
1373 /* If we need to check for overflow, we can only do half wide
1374 multiplies quickly because we need to look at the top bits to
1375 check for the overflow. */
1376 if ((high
|| needs_overflow
)
1377 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1379 unsigned HOST_WIDE_INT r
;
1383 o0
= op1
.to_shwi ();
1384 o1
= op2
.to_shwi ();
1388 o0
= op1
.to_uhwi ();
1389 o1
= op2
.to_uhwi ();
1397 if ((HOST_WIDE_INT
) r
!= sext_hwi (r
, prec
))
1402 if ((r
>> prec
) != 0)
1406 val
[0] = high
? r
>> prec
: r
;
1410 /* We do unsigned mul and then correct it. */
1411 wi_unpack (u
, op1val
, op1len
, half_blocks_needed
, prec
, SIGNED
);
1412 wi_unpack (v
, op2val
, op2len
, half_blocks_needed
, prec
, SIGNED
);
1414 /* The 2 is for a full mult. */
1415 memset (r
, 0, half_blocks_needed
* 2
1416 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1418 for (j
= 0; j
< half_blocks_needed
; j
++)
1421 for (i
= 0; i
< half_blocks_needed
; i
++)
1423 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1425 r
[i
+ j
] = t
& HALF_INT_MASK
;
1426 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1428 r
[j
+ half_blocks_needed
] = k
;
1431 /* We did unsigned math above. For signed we must adjust the
1432 product (assuming we need to see that). */
1433 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1435 unsigned HOST_WIDE_INT b
;
1436 if (wi::neg_p (op1
))
1439 for (i
= 0; i
< half_blocks_needed
; i
++)
1441 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1442 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1443 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1444 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1447 if (wi::neg_p (op2
))
1450 for (i
= 0; i
< half_blocks_needed
; i
++)
1452 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1453 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1454 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1455 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1464 /* For unsigned, overflow is true if any of the top bits are set.
1465 For signed, overflow is true if any of the top bits are not equal
1467 if (sgn
== UNSIGNED
)
1471 top
= r
[(half_blocks_needed
) - 1];
1472 top
= SIGN_MASK (top
<< (HOST_BITS_PER_WIDE_INT
/ 2));
1476 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1477 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1481 int r_offset
= high
? half_blocks_needed
: 0;
1482 return wi_pack (val
, &r
[r_offset
], half_blocks_needed
, prec
);
1485 /* Compute the population count of X. */
1487 wi::popcount (const wide_int_ref
&x
)
1492 /* The high order block is special if it is the last block and the
1493 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1494 have to clear out any ones above the precision before doing
1495 popcount on this block. */
1496 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1497 unsigned int stop
= x
.len
;
1500 count
= popcount_hwi (x
.uhigh () << -count
);
1505 if (x
.sign_mask () >= 0)
1509 for (i
= 0; i
< stop
; ++i
)
1510 count
+= popcount_hwi (x
.val
[i
]);
1515 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1516 whether the result overflows when OP0 and OP1 are treated as having
1517 signedness SGN. Return the number of blocks in VAL. */
1519 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1520 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1521 unsigned int op1len
, unsigned int prec
,
1522 signop sgn
, bool *overflow
)
1524 unsigned HOST_WIDE_INT o0
= 0;
1525 unsigned HOST_WIDE_INT o1
= 0;
1526 unsigned HOST_WIDE_INT x
= 0;
1527 /* We implement subtraction as an in place negate and add. Negation
1528 is just inversion and add 1, so we can do the add of 1 by just
1529 starting the borrow in of the first element at 1. */
1530 unsigned HOST_WIDE_INT borrow
= 0;
1531 unsigned HOST_WIDE_INT old_borrow
= 0;
1533 unsigned HOST_WIDE_INT mask0
, mask1
;
1536 unsigned int len
= MAX (op0len
, op1len
);
1537 mask0
= -top_bit_of (op0
, op0len
, prec
);
1538 mask1
= -top_bit_of (op1
, op1len
, prec
);
1540 /* Subtract all of the explicitly defined elements. */
1541 for (i
= 0; i
< len
; i
++)
1543 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1544 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1545 x
= o0
- o1
- borrow
;
1547 old_borrow
= borrow
;
1548 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1551 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1553 val
[len
] = mask0
- mask1
- borrow
;
1560 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1563 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1564 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1568 /* Put the MSB of X and O0 and in the top of the HWI. */
1572 *overflow
= (x
>= o0
);
1574 *overflow
= (x
> o0
);
1578 return canonize (val
, len
, prec
);
1586 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1587 algorithm is a small modification of the algorithm in Hacker's
1588 Delight by Warren, which itself is a small modification of Knuth's
1589 algorithm. M is the number of significant elements of U however
1590 there needs to be at least one extra element of B_DIVIDEND
1591 allocated, N is the number of elements of B_DIVISOR. */
1593 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1594 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1595 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1596 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1599 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1600 HOST_WIDE_INT and stored in the lower bits of each word. This
1601 algorithm should work properly on both 32 and 64 bit
1603 unsigned HOST_WIDE_INT b
1604 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1605 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1606 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1607 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1611 /* Single digit divisor. */
1615 for (j
= m
- 1; j
>= 0; j
--)
1617 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1618 k
= ((k
* b
+ b_dividend
[j
])
1619 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1620 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1626 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1630 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1631 algorithm, we can overwrite b_dividend and b_divisor, so we do
1633 for (i
= n
- 1; i
> 0; i
--)
1634 b_divisor
[i
] = (b_divisor
[i
] << s
)
1635 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1636 b_divisor
[0] = b_divisor
[0] << s
;
1638 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1639 for (i
= m
- 1; i
> 0; i
--)
1640 b_dividend
[i
] = (b_dividend
[i
] << s
)
1641 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1642 b_dividend
[0] = b_dividend
[0] << s
;
1646 for (j
= m
- n
; j
>= 0; j
--)
1648 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1649 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1651 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1654 rhat
+= b_divisor
[n
-1];
1659 /* Multiply and subtract. */
1661 for (i
= 0; i
< n
; i
++)
1663 p
= qhat
* b_divisor
[i
];
1664 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1665 b_dividend
[i
+ j
] = t
;
1666 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1667 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1669 t
= b_dividend
[j
+n
] - k
;
1670 b_dividend
[j
+n
] = t
;
1672 b_quotient
[j
] = qhat
;
1677 for (i
= 0; i
< n
; i
++)
1679 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1680 b_dividend
[i
+j
] = t
;
1681 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1683 b_dividend
[j
+n
] += k
;
1687 for (i
= 0; i
< n
; i
++)
1688 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1689 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1691 for (i
= 0; i
< n
; i
++)
1692 b_remainder
[i
] = b_dividend
[i
];
1696 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1697 the result. If QUOTIENT is nonnull, store the value of the quotient
1698 there and return the number of blocks in it. The return value is
1699 not defined otherwise. If REMAINDER is nonnull, store the value
1700 of the remainder there and store the number of blocks in
1701 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1702 the division overflowed. */
1704 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1705 HOST_WIDE_INT
*remainder
,
1706 const HOST_WIDE_INT
*dividend_val
,
1707 unsigned int dividend_len
, unsigned int dividend_prec
,
1708 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1709 unsigned int divisor_prec
, signop sgn
,
1712 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1713 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1714 unsigned HOST_HALF_WIDE_INT
1715 b_quotient
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1716 unsigned HOST_HALF_WIDE_INT
1717 b_remainder
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1718 unsigned HOST_HALF_WIDE_INT
1719 b_dividend
[(4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1720 unsigned HOST_HALF_WIDE_INT
1721 b_divisor
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1723 bool dividend_neg
= false;
1724 bool divisor_neg
= false;
1725 bool overflow
= false;
1726 wide_int neg_dividend
, neg_divisor
;
1728 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1730 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1735 /* The smallest signed number / -1 causes overflow. The dividend_len
1736 check is for speed rather than correctness. */
1738 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1740 && wi::only_sign_bit_p (dividend
))
1743 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1744 (signed min / -1) has the same representation as the orignal dividend.
1745 We have traditionally made division by zero act as division by one,
1746 so there too we use the original dividend. */
1757 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1758 quotient
[i
] = dividend_val
[i
];
1759 return dividend_len
;
1765 /* Do it on the host if you can. */
1767 && wi::fits_shwi_p (dividend
)
1768 && wi::fits_shwi_p (divisor
))
1770 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1771 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1773 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1775 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1778 quotient
[0] = HOST_WIDE_INT_MIN
;
1791 quotient
[0] = o0
/ o1
;
1794 remainder
[0] = o0
% o1
;
1802 && wi::fits_uhwi_p (dividend
)
1803 && wi::fits_uhwi_p (divisor
))
1805 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1806 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1807 unsigned int quotient_len
= 1;
1811 quotient
[0] = o0
/ o1
;
1812 quotient_len
= canonize_uhwi (quotient
, dividend_prec
);
1816 remainder
[0] = o0
% o1
;
1817 *remainder_len
= canonize_uhwi (remainder
, dividend_prec
);
1819 return quotient_len
;
1822 /* Make the divisor and dividend positive and remember what we
1826 if (wi::neg_p (dividend
))
1828 neg_dividend
= -dividend
;
1829 dividend
= neg_dividend
;
1830 dividend_neg
= true;
1832 if (wi::neg_p (divisor
))
1834 neg_divisor
= -divisor
;
1835 divisor
= neg_divisor
;
1840 wi_unpack (b_dividend
, dividend
.get_val (), dividend
.get_len (),
1841 dividend_blocks_needed
, dividend_prec
, sgn
);
1842 wi_unpack (b_divisor
, divisor
.get_val (), divisor
.get_len (),
1843 divisor_blocks_needed
, divisor_prec
, sgn
);
1845 m
= dividend_blocks_needed
;
1847 while (m
> 1 && b_dividend
[m
- 1] == 0)
1850 n
= divisor_blocks_needed
;
1851 while (n
> 1 && b_divisor
[n
- 1] == 0)
1854 memset (b_quotient
, 0, sizeof (b_quotient
));
1856 divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
1858 unsigned int quotient_len
= 0;
1861 quotient_len
= wi_pack (quotient
, b_quotient
, m
, dividend_prec
);
1862 /* The quotient is neg if exactly one of the divisor or dividend is
1864 if (dividend_neg
!= divisor_neg
)
1865 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
1866 quotient_len
, dividend_prec
,
1872 *remainder_len
= wi_pack (remainder
, b_remainder
, n
, dividend_prec
);
1873 /* The remainder is always the same sign as the dividend. */
1875 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
1876 *remainder_len
, dividend_prec
,
1880 return quotient_len
;
1884 * Shifting, rotating and extraction.
1887 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1888 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1890 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1891 unsigned int xlen
, unsigned int precision
,
1894 /* Split the shift into a whole-block shift and a subblock shift. */
1895 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1896 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1898 /* The whole-block shift fills with zeros. */
1899 unsigned int len
= BLOCKS_NEEDED (precision
);
1900 for (unsigned int i
= 0; i
< skip
; ++i
)
1903 /* It's easier to handle the simple block case specially. */
1904 if (small_shift
== 0)
1905 for (unsigned int i
= skip
; i
< len
; ++i
)
1906 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
1909 /* The first unfilled output block is a left shift of the first
1910 block in XVAL. The other output blocks contain bits from two
1911 consecutive input blocks. */
1912 unsigned HOST_WIDE_INT carry
= 0;
1913 for (unsigned int i
= skip
; i
< len
; ++i
)
1915 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
1916 val
[i
] = (x
<< small_shift
) | carry
;
1917 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1920 return canonize (val
, len
, precision
);
1923 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1924 number of blocks in VAL. The input has XPRECISION bits and the
1925 output has XPRECISION - SHIFT bits. */
1927 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1928 unsigned int xlen
, unsigned int xprecision
,
1931 /* Split the shift into a whole-block shift and a subblock shift. */
1932 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1933 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1935 /* Work out how many blocks are needed to store the significant bits
1936 (excluding the upper zeros or signs). */
1937 unsigned int len
= BLOCKS_NEEDED (xprecision
- shift
);
1939 /* It's easier to handle the simple block case specially. */
1940 if (small_shift
== 0)
1941 for (unsigned int i
= 0; i
< len
; ++i
)
1942 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
1945 /* Each output block but the last is a combination of two input blocks.
1946 The last block is a right shift of the last block in XVAL. */
1947 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
1948 for (unsigned int i
= 0; i
< len
; ++i
)
1950 val
[i
] = curr
>> small_shift
;
1951 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
1952 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1958 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1959 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1960 VAL has PRECISION bits. */
1962 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1963 unsigned int xlen
, unsigned int xprecision
,
1964 unsigned int precision
, unsigned int shift
)
1966 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1968 /* The value we just created has precision XPRECISION - SHIFT.
1969 Zero-extend it to wider precisions. */
1970 if (precision
> xprecision
- shift
)
1972 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1974 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
1975 else if (val
[len
- 1] < 0)
1977 /* Add a new block with a zero. */
1982 return canonize (val
, len
, precision
);
1985 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1986 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1987 VAL has PRECISION bits. */
1989 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1990 unsigned int xlen
, unsigned int xprecision
,
1991 unsigned int precision
, unsigned int shift
)
1993 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1995 /* The value we just created has precision XPRECISION - SHIFT.
1996 Sign-extend it to wider types. */
1997 if (precision
> xprecision
- shift
)
1999 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
2001 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
2003 return canonize (val
, len
, precision
);
2006 /* Return the number of leading (upper) zeros in X. */
2008 wi::clz (const wide_int_ref
&x
)
2010 /* Calculate how many bits there above the highest represented block. */
2011 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2013 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2015 /* The upper -COUNT bits of HIGH are not part of the value.
2017 high
= (high
<< -count
) >> -count
;
2018 else if (x
.sign_mask () < 0)
2019 /* The upper bit is set, so there are no leading zeros. */
2022 /* We don't need to look below HIGH. Either HIGH is nonzero,
2023 or the top bit of the block below is nonzero; clz_hwi is
2024 HOST_BITS_PER_WIDE_INT in the latter case. */
2025 return count
+ clz_hwi (high
);
2028 /* Return the number of redundant sign bits in X. (That is, the number
2029 of bits immediately below the sign bit that have the same value as
2032 wi::clrsb (const wide_int_ref
&x
)
2034 /* Calculate how many bits there above the highest represented block. */
2035 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2037 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2038 unsigned HOST_WIDE_INT mask
= -1;
2041 /* The upper -COUNT bits of HIGH are not part of the value.
2042 Clear them from both MASK and HIGH. */
2047 /* If the top bit is 1, count the number of leading 1s. If the top
2048 bit is zero, count the number of leading zeros. */
2049 if (high
> mask
/ 2)
2052 /* There are no sign bits below the top block, so we don't need to look
2053 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2055 return count
+ clz_hwi (high
) - 1;
2058 /* Return the number of trailing (lower) zeros in X. */
2060 wi::ctz (const wide_int_ref
&x
)
2062 if (x
.len
== 1 && x
.ulow () == 0)
2065 /* Having dealt with the zero case, there must be a block with a
2066 nonzero bit. We don't care about the bits above the first 1. */
2068 while (x
.val
[i
] == 0)
2070 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2073 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2076 wi::exact_log2 (const wide_int_ref
&x
)
2078 /* Reject cases where there are implicit -1 blocks above HIGH. */
2079 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2082 /* Set CRUX to the index of the entry that should be nonzero.
2083 If the top block is zero then the next lowest block (if any)
2084 must have the high bit set. */
2085 unsigned int crux
= x
.len
- 1;
2086 if (crux
> 0 && x
.val
[crux
] == 0)
2089 /* Check that all lower blocks are zero. */
2090 for (unsigned int i
= 0; i
< crux
; ++i
)
2094 /* Get a zero-extended form of block CRUX. */
2095 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2096 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2097 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2099 /* Now it's down to whether HWI is a power of 2. */
2100 int res
= ::exact_log2 (hwi
);
2102 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2106 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2108 wi::floor_log2 (const wide_int_ref
&x
)
2110 return x
.precision
- 1 - clz (x
);
2113 /* Return the index of the first (lowest) set bit in X, counting from 1.
2114 Return 0 if X is 0. */
2116 wi::ffs (const wide_int_ref
&x
)
2118 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2121 /* Return true if sign-extending X to have precision PRECISION would give
2122 the minimum signed value at that precision. */
2124 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2126 return ctz (x
) + 1 == int (precision
);
2129 /* Return true if X represents the minimum signed value. */
2131 wi::only_sign_bit_p (const wide_int_ref
&x
)
2133 return only_sign_bit_p (x
, x
.precision
);
2137 * Private utilities.
2140 void gt_ggc_mx (widest_int
*) { }
2141 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2142 void gt_pch_nx (widest_int
*) { }
2144 template void wide_int::dump () const;
2145 template void generic_wide_int
<wide_int_ref_storage
<false> >::dump () const;
2146 template void generic_wide_int
<wide_int_ref_storage
<true> >::dump () const;
2147 template void offset_int::dump () const;
2148 template void widest_int::dump () const;
2153 namespace selftest
{
2155 /* Selftests for wide ints. We run these multiple times, once per type. */
2157 /* Helper function for building a test value. */
2159 template <class VALUE_TYPE
>
2163 /* Specializations of the fixture for each wide-int type. */
2165 /* Specialization for VALUE_TYPE == wide_int. */
2171 return wi::shwi (i
, 32);
2174 /* Specialization for VALUE_TYPE == offset_int. */
2180 return offset_int (i
);
2183 /* Specialization for VALUE_TYPE == widest_int. */
2189 return widest_int (i
);
2192 /* Verify that print_dec (WI, ..., SGN) gives the expected string
2193 representation (using base 10). */
2196 assert_deceq (const char *expected
, const wide_int_ref
&wi
, signop sgn
)
2198 char buf
[WIDE_INT_PRINT_BUFFER_SIZE
];
2199 print_dec (wi
, buf
, sgn
);
2200 ASSERT_STREQ (expected
, buf
);
2203 /* Likewise for base 16. */
2206 assert_hexeq (const char *expected
, const wide_int_ref
&wi
)
2208 char buf
[WIDE_INT_PRINT_BUFFER_SIZE
];
2209 print_hex (wi
, buf
);
2210 ASSERT_STREQ (expected
, buf
);
2215 /* Verify that print_dec and print_hex work for VALUE_TYPE. */
2217 template <class VALUE_TYPE
>
2221 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (42);
2222 assert_deceq ("42", a
, SIGNED
);
2223 assert_hexeq ("0x2a", a
);
2226 /* Verify that various operations work correctly for VALUE_TYPE,
2227 unary and binary, using both function syntax, and
2228 overloaded-operators. */
2230 template <class VALUE_TYPE
>
2234 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (7);
2235 VALUE_TYPE b
= from_int
<VALUE_TYPE
> (3);
2237 /* Using functions. */
2238 assert_deceq ("-7", wi::neg (a
), SIGNED
);
2239 assert_deceq ("10", wi::add (a
, b
), SIGNED
);
2240 assert_deceq ("4", wi::sub (a
, b
), SIGNED
);
2241 assert_deceq ("-4", wi::sub (b
, a
), SIGNED
);
2242 assert_deceq ("21", wi::mul (a
, b
), SIGNED
);
2244 /* Using operators. */
2245 assert_deceq ("-7", -a
, SIGNED
);
2246 assert_deceq ("10", a
+ b
, SIGNED
);
2247 assert_deceq ("4", a
- b
, SIGNED
);
2248 assert_deceq ("-4", b
- a
, SIGNED
);
2249 assert_deceq ("21", a
* b
, SIGNED
);
2252 /* Verify that various comparisons work correctly for VALUE_TYPE. */
2254 template <class VALUE_TYPE
>
2258 VALUE_TYPE a
= from_int
<VALUE_TYPE
> (7);
2259 VALUE_TYPE b
= from_int
<VALUE_TYPE
> (3);
2262 ASSERT_TRUE (wi::eq_p (a
, a
));
2263 ASSERT_FALSE (wi::eq_p (a
, b
));
2266 ASSERT_TRUE (wi::ne_p (a
, b
));
2267 ASSERT_FALSE (wi::ne_p (a
, a
));
2270 ASSERT_FALSE (wi::lts_p (a
, a
));
2271 ASSERT_FALSE (wi::lts_p (a
, b
));
2272 ASSERT_TRUE (wi::lts_p (b
, a
));
2275 ASSERT_TRUE (wi::les_p (a
, a
));
2276 ASSERT_FALSE (wi::les_p (a
, b
));
2277 ASSERT_TRUE (wi::les_p (b
, a
));
2280 ASSERT_FALSE (wi::gts_p (a
, a
));
2281 ASSERT_TRUE (wi::gts_p (a
, b
));
2282 ASSERT_FALSE (wi::gts_p (b
, a
));
2285 ASSERT_TRUE (wi::ges_p (a
, a
));
2286 ASSERT_TRUE (wi::ges_p (a
, b
));
2287 ASSERT_FALSE (wi::ges_p (b
, a
));
2290 ASSERT_EQ (-1, wi::cmps (b
, a
));
2291 ASSERT_EQ (0, wi::cmps (a
, a
));
2292 ASSERT_EQ (1, wi::cmps (a
, b
));
2295 /* Run all of the selftests, using the given VALUE_TYPE. */
2297 template <class VALUE_TYPE
>
2298 static void run_all_wide_int_tests ()
2300 test_printing
<VALUE_TYPE
> ();
2301 test_ops
<VALUE_TYPE
> ();
2302 test_comparisons
<VALUE_TYPE
> ();
2305 /* Run all of the selftests within this file, for all value types. */
2308 wide_int_cc_tests ()
2310 run_all_wide_int_tests
<wide_int
> ();
2311 run_all_wide_int_tests
<offset_int
> ();
2312 run_all_wide_int_tests
<widest_int
> ();
2315 } // namespace selftest
2316 #endif /* CHECKING_P */