1 /* Operations with very long integers.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "double-int.h"
39 #define HOST_BITS_PER_HALF_WIDE_INT 32
40 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
41 # define HOST_HALF_WIDE_INT long
42 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
43 # define HOST_HALF_WIDE_INT int
45 #error Please add support for HOST_HALF_WIDE_INT
48 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
49 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
50 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
51 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
52 typedef unsigned HOST_WIDE_INT UWtype
;
53 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
54 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
55 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
57 typedef unsigned int UDWtype
__attribute__ ((mode (DI
)));
59 typedef unsigned int UDWtype
__attribute__ ((mode (TI
)));
64 static const HOST_WIDE_INT zeros
[WIDE_INT_MAX_ELTS
] = {};
70 /* Quantities to deal with values that hold half of a wide int. Used
71 in multiply and divide. */
72 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
74 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
75 #define BLOCKS_NEEDED(PREC) \
76 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
77 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
79 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
80 based on the top existing bit of VAL. */
82 static unsigned HOST_WIDE_INT
83 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
85 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? (HOST_WIDE_INT
) -1 : 0;
88 /* Convert the integer in VAL to canonical form, returning its new length.
89 LEN is the number of blocks currently in VAL and PRECISION is the number
90 of bits in the integer it represents.
92 This function only changes the representation, not the value. */
94 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
96 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
100 if (len
> blocks_needed
)
107 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
108 val
[len
- 1] = top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
109 if (top
!= 0 && top
!= (HOST_WIDE_INT
)-1)
112 /* At this point we know that the top is either 0 or -1. Find the
113 first block that is not a copy of this. */
114 for (i
= len
- 2; i
>= 0; i
--)
116 HOST_WIDE_INT x
= val
[i
];
119 if (SIGN_MASK (x
) == top
)
122 /* We need an extra block because the top bit block i does
123 not match the extension. */
128 /* The number is 0 or -1. */
133 * Conversion routines in and out of wide_int.
136 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
137 result for an integer with precision PRECISION. Return the length
138 of VAL (after any canonization. */
140 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
141 unsigned int xlen
, unsigned int precision
, bool need_canon
)
143 for (unsigned i
= 0; i
< xlen
; i
++)
145 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
148 /* Construct a wide int from a buffer of length LEN. BUFFER will be
149 read according to byte endianess and word endianess of the target.
150 Only the lower BUFFER_LEN bytes of the result are set; the remaining
151 high bytes are cleared. */
153 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
155 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
156 wide_int result
= wide_int::create (precision
);
157 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
159 /* We have to clear all the bits ourself, as we merely or in values
161 unsigned int len
= BLOCKS_NEEDED (precision
);
162 HOST_WIDE_INT
*val
= result
.write_val ();
163 for (unsigned int i
= 0; i
< len
; ++i
)
166 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
170 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
171 unsigned HOST_WIDE_INT value
;
173 if (buffer_len
> UNITS_PER_WORD
)
175 unsigned int word
= byte
/ UNITS_PER_WORD
;
177 if (WORDS_BIG_ENDIAN
)
178 word
= (words
- 1) - word
;
180 offset
= word
* UNITS_PER_WORD
;
182 if (BYTES_BIG_ENDIAN
)
183 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
185 offset
+= byte
% UNITS_PER_WORD
;
188 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
190 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
192 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
193 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
196 result
.set_len (canonize (val
, len
, precision
));
201 /* Sets RESULT from X, the sign is taken according to SGN. */
203 wi::to_mpz (const wide_int_ref
&x
, mpz_t result
, signop sgn
)
205 int len
= x
.get_len ();
206 const HOST_WIDE_INT
*v
= x
.get_val ();
207 int excess
= len
* HOST_BITS_PER_WIDE_INT
- x
.get_precision ();
209 if (wi::neg_p (x
, sgn
))
211 /* We use ones complement to avoid -x80..0 edge case that -
213 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
214 for (int i
= 0; i
< len
; i
++)
217 t
[len
- 1] = (unsigned HOST_WIDE_INT
) t
[len
- 1] << excess
>> excess
;
218 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
219 mpz_com (result
, result
);
223 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
224 for (int i
= 0; i
< len
- 1; i
++)
226 t
[len
- 1] = (unsigned HOST_WIDE_INT
) v
[len
- 1] << excess
>> excess
;
227 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
230 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
233 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
234 values of VAL will be wrapped; otherwise, they will be set to the
235 appropriate minimum or maximum TYPE bound. */
237 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
240 int prec
= TYPE_PRECISION (type
);
241 wide_int res
= wide_int::create (prec
);
249 get_type_static_bounds (type
, min
, max
);
251 if (mpz_cmp (x
, min
) < 0)
253 else if (mpz_cmp (x
, max
) > 0)
260 /* Determine the number of unsigned HOST_WIDE_INTs that are required
261 for representing the value. The code to calculate count is
262 extracted from the GMP manual, section "Integer Import and Export":
263 http://gmplib.org/manual/Integer-Import-and-Export.html */
264 numb
= 8 * sizeof(HOST_WIDE_INT
);
265 count
= (mpz_sizeinbase (x
, 2) + numb
- 1) / numb
;
266 HOST_WIDE_INT
*val
= res
.write_val ();
267 mpz_export (val
, &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
282 * Largest and smallest values in a mode.
285 /* Return the largest SGNed number that is representable in PRECISION bits.
287 TODO: There is still code from the double_int era that trys to
288 make up for the fact that double int's could not represent the
289 min and max values of all types. This code should be removed
290 because the min and max values can always be represented in
291 wide_ints and int-csts. */
293 wi::max_value (unsigned int precision
, signop sgn
)
295 gcc_checking_assert (precision
!= 0);
297 /* The unsigned max is just all ones. */
298 return shwi (-1, precision
);
300 /* The signed max is all ones except the top bit. This must be
301 explicitly represented. */
302 return mask (precision
- 1, false, precision
);
305 /* Return the largest SGNed number that is representable in PRECISION bits. */
307 wi::min_value (unsigned int precision
, signop sgn
)
309 gcc_checking_assert (precision
!= 0);
311 return uhwi (0, precision
);
313 /* The signed min is all zeros except the top bit. This must be
314 explicitly represented. */
315 return wi::set_bit_in_zero (precision
- 1, precision
);
322 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
323 signedness SGN, to an integer that has PRECISION bits. Store the blocks
324 in VAL and return the number of blocks used.
326 This function can handle both extension (PRECISION > XPRECISION)
327 and truncation (PRECISION < XPRECISION). */
329 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
330 unsigned int xlen
, unsigned int xprecision
,
331 unsigned int precision
, signop sgn
)
333 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
334 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
335 for (unsigned i
= 0; i
< len
; i
++)
338 if (precision
> xprecision
)
340 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
345 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
346 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
347 else if (val
[len
- 1] < 0)
349 while (len
< BLOCKS_NEEDED (xprecision
))
351 if (small_xprecision
)
352 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
359 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
360 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
363 len
= canonize (val
, len
, precision
);
368 /* This function hides the fact that we cannot rely on the bits beyond
369 the precision. This issue comes up in the relational comparisions
370 where we do allow comparisons of values of different precisions. */
371 static inline HOST_WIDE_INT
372 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
373 unsigned int blocks_needed
, unsigned int small_prec
,
374 unsigned int index
, signop sgn
)
379 else if (index
< blocks_needed
|| sgn
== SIGNED
)
380 /* Signed or within the precision. */
381 val
= SIGN_MASK (a
[len
- 1]);
383 /* Unsigned extension beyond the precision. */
386 if (small_prec
&& index
== blocks_needed
- 1)
387 return (sgn
== SIGNED
388 ? sext_hwi (val
, small_prec
)
389 : zext_hwi (val
, small_prec
));
394 /* Find the highest bit represented in a wide int. This will in
395 general have the same value as the sign bit. */
396 static inline HOST_WIDE_INT
397 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
399 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
400 unsigned HOST_WIDE_INT val
= a
[len
- 1];
403 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
407 * Comparisons, note that only equality is an operator. The other
408 * comparisons cannot be operators since they are inherently signed or
409 * unsigned and C++ has no such operators.
412 /* Return true if OP0 == OP1. */
414 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
415 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
419 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
421 if (op0len
!= op1len
)
424 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
426 /* It does not matter if we zext or sext here, we just have to
427 do both the same way. */
428 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
434 if (op0
[l0
] != op1
[l0
])
442 /* Return true if OP0 < OP1 using signed comparisons. */
444 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
445 unsigned int precision
,
446 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
448 HOST_WIDE_INT s0
, s1
;
449 unsigned HOST_WIDE_INT u0
, u1
;
450 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
451 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
452 int l
= MAX (op0len
- 1, op1len
- 1);
454 /* Only the top block is compared as signed. The rest are unsigned
456 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
457 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
466 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
467 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
479 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
482 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
483 unsigned int precision
,
484 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
486 HOST_WIDE_INT s0
, s1
;
487 unsigned HOST_WIDE_INT u0
, u1
;
488 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
489 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
490 int l
= MAX (op0len
- 1, op1len
- 1);
492 /* Only the top block is compared as signed. The rest are unsigned
494 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
495 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
504 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
505 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
517 /* Return true if OP0 < OP1 using unsigned comparisons. */
519 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
520 unsigned int precision
,
521 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
523 unsigned HOST_WIDE_INT x0
;
524 unsigned HOST_WIDE_INT x1
;
525 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
526 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
527 int l
= MAX (op0len
- 1, op1len
- 1);
531 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
532 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
543 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
544 unsigned compares. */
546 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
547 unsigned int precision
,
548 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
550 unsigned HOST_WIDE_INT x0
;
551 unsigned HOST_WIDE_INT x1
;
552 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
553 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
554 int l
= MAX (op0len
- 1, op1len
- 1);
558 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
559 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
574 /* Sign-extend the number represented by XVAL and XLEN into VAL,
575 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
576 and VAL have PRECISION bits. */
578 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
579 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
581 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
582 /* Extending beyond the precision is a no-op. If we have only stored
583 OFFSET bits or fewer, the rest are already signs. */
584 if (offset
>= precision
|| len
>= xlen
)
586 for (unsigned i
= 0; i
< xlen
; ++i
)
590 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
591 for (unsigned int i
= 0; i
< len
; i
++)
595 val
[len
] = sext_hwi (xval
[len
], suboffset
);
598 return canonize (val
, len
, precision
);
601 /* Zero-extend the number represented by XVAL and XLEN into VAL,
602 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
603 and VAL have PRECISION bits. */
605 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
606 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
608 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
609 /* Extending beyond the precision is a no-op. If we have only stored
610 OFFSET bits or fewer, and the upper stored bit is zero, then there
612 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
614 for (unsigned i
= 0; i
< xlen
; ++i
)
618 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
619 for (unsigned int i
= 0; i
< len
; i
++)
620 val
[i
] = i
< xlen
? xval
[i
] : -1;
622 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
625 return canonize (val
, len
+ 1, precision
);
629 * Masking, inserting, shifting, rotating.
632 /* Insert WIDTH bits from Y into X starting at START. */
634 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
641 unsigned int precision
= x
.get_precision ();
642 if (start
>= precision
)
645 gcc_checking_assert (precision
>= width
);
647 if (start
+ width
>= precision
)
648 width
= precision
- start
;
650 mask
= wi::shifted_mask (start
, width
, false, precision
);
651 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
654 tmp
= wi::bit_and_not (x
, mask
);
655 result
= result
| tmp
;
660 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
661 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
664 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
665 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
667 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
668 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
670 if (block
+ 1 >= xlen
)
672 /* The operation either affects the last current block or needs
674 unsigned int len
= block
+ 1;
675 for (unsigned int i
= 0; i
< len
; i
++)
676 val
[i
] = safe_uhwi (xval
, xlen
, i
);
677 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
679 /* If the bit we just set is at the msb of the block, make sure
680 that any higher bits are zeros. */
681 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
687 for (unsigned int i
= 0; i
< xlen
; i
++)
689 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
690 return canonize (val
, xlen
, precision
);
696 wide_int_storage::bswap () const
698 wide_int result
= wide_int::create (precision
);
700 unsigned int len
= BLOCKS_NEEDED (precision
);
701 unsigned int xlen
= get_len ();
702 const HOST_WIDE_INT
*xval
= get_val ();
703 HOST_WIDE_INT
*val
= result
.write_val ();
705 /* This is not a well defined operation if the precision is not a
707 gcc_assert ((precision
& 0x7) == 0);
709 for (i
= 0; i
< len
; i
++)
712 /* Only swap the bytes that are not the padding. */
713 for (s
= 0; s
< precision
; s
+= 8)
715 unsigned int d
= precision
- s
- 8;
716 unsigned HOST_WIDE_INT byte
;
718 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
719 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
721 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
723 block
= d
/ HOST_BITS_PER_WIDE_INT
;
724 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
726 val
[block
] |= byte
<< offset
;
729 result
.set_len (canonize (val
, len
, precision
));
733 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
734 above that up to PREC are zeros. The result is inverted if NEGATE
735 is true. Return the number of blocks in VAL. */
737 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
742 val
[0] = negate
? 0 : -1;
747 val
[0] = negate
? -1 : 0;
752 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
753 val
[i
++] = negate
? 0 : -1;
755 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
758 HOST_WIDE_INT last
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
759 val
[i
++] = negate
? ~last
: last
;
762 val
[i
++] = negate
? -1 : 0;
767 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
768 bits are ones, and the bits above that up to PREC are zeros. The result
769 is inverted if NEGATE is true. Return the number of blocks in VAL. */
771 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
772 bool negate
, unsigned int prec
)
774 if (start
>= prec
|| width
== 0)
776 val
[0] = negate
? -1 : 0;
780 if (width
> prec
- start
)
781 width
= prec
- start
;
782 unsigned int end
= start
+ width
;
785 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
786 val
[i
++] = negate
? -1 : 0;
788 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
791 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
793 if (shift
< HOST_BITS_PER_WIDE_INT
)
796 block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - block
- 1;
797 val
[i
++] = negate
? ~block
: block
;
802 val
[i
++] = negate
? block
: ~block
;
805 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
807 val
[i
++] = negate
? 0 : -1;
809 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
813 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
814 val
[i
++] = negate
? ~block
: block
;
817 val
[i
++] = negate
? -1 : 0;
823 * logical operations.
826 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
828 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
829 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
830 unsigned int op1len
, unsigned int prec
)
834 bool need_canon
= true;
836 unsigned int len
= MAX (op0len
, op1len
);
839 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
857 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
873 val
[l0
] = op0
[l0
] & op1
[l0
];
878 len
= canonize (val
, len
, prec
);
883 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
885 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
886 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
887 unsigned int op1len
, unsigned int prec
)
892 bool need_canon
= true;
894 unsigned int len
= MAX (op0len
, op1len
);
897 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
915 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
931 val
[l0
] = op0
[l0
] & ~op1
[l0
];
936 len
= canonize (val
, len
, prec
);
941 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
943 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
944 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
945 unsigned int op1len
, unsigned int prec
)
950 bool need_canon
= true;
952 unsigned int len
= MAX (op0len
, op1len
);
955 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
973 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
989 val
[l0
] = op0
[l0
] | op1
[l0
];
994 len
= canonize (val
, len
, prec
);
999 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1001 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1002 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1003 unsigned int op1len
, unsigned int prec
)
1006 int l0
= op0len
- 1;
1007 int l1
= op1len
- 1;
1008 bool need_canon
= true;
1010 unsigned int len
= MAX (op0len
, op1len
);
1013 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1031 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1047 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1052 len
= canonize (val
, len
, prec
);
1057 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1059 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1060 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1061 unsigned int op1len
, unsigned int prec
)
1064 int l0
= op0len
- 1;
1065 int l1
= op1len
- 1;
1067 unsigned int len
= MAX (op0len
, op1len
);
1070 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1073 val
[l0
] = op0
[l0
] ^ op1mask
;
1080 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1083 val
[l1
] = op0mask
^ op1
[l1
];
1090 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1094 return canonize (val
, len
, prec
);
1101 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1102 whether the result overflows when OP0 and OP1 are treated as having
1103 signedness SGN. Return the number of blocks in VAL. */
1105 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1106 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1107 unsigned int op1len
, unsigned int prec
,
1108 signop sgn
, bool *overflow
)
1110 unsigned HOST_WIDE_INT o0
= 0;
1111 unsigned HOST_WIDE_INT o1
= 0;
1112 unsigned HOST_WIDE_INT x
= 0;
1113 unsigned HOST_WIDE_INT carry
= 0;
1114 unsigned HOST_WIDE_INT old_carry
= 0;
1115 unsigned HOST_WIDE_INT mask0
, mask1
;
1118 unsigned int len
= MAX (op0len
, op1len
);
1119 mask0
= -top_bit_of (op0
, op0len
, prec
);
1120 mask1
= -top_bit_of (op1
, op1len
, prec
);
1121 /* Add all of the explicitly defined elements. */
1123 for (i
= 0; i
< len
; i
++)
1125 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1126 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1127 x
= o0
+ o1
+ carry
;
1130 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1133 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1135 val
[len
] = mask0
+ mask1
+ carry
;
1142 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1145 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1146 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1150 /* Put the MSB of X and O0 and in the top of the HWI. */
1154 *overflow
= (x
<= o0
);
1156 *overflow
= (x
< o0
);
1160 return canonize (val
, len
, prec
);
1163 /* Subroutines of the multiplication and division operations. Unpack
1164 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1165 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1166 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1168 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
, const HOST_WIDE_INT
*input
,
1169 unsigned int in_len
, unsigned int out_len
,
1170 unsigned int prec
, signop sgn
)
1174 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1175 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1180 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1181 mask
&= HALF_INT_MASK
;
1186 for (i
= 0; i
< blocks_needed
- 1; i
++)
1188 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1190 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1193 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1197 x
= sext_hwi (x
, small_prec
);
1199 x
= zext_hwi (x
, small_prec
);
1202 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1204 /* Smear the sign bit. */
1209 /* The inverse of wi_unpack. IN_LEN is the the number of input
1210 blocks. The number of output blocks will be half this amount. */
1212 wi_pack (unsigned HOST_WIDE_INT
*result
,
1213 const unsigned HOST_HALF_WIDE_INT
*input
,
1214 unsigned int in_len
)
1219 while (i
+ 2 < in_len
)
1221 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1222 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1]
1223 << HOST_BITS_PER_HALF_WIDE_INT
);
1227 /* Handle the case where in_len is odd. For this we zero extend. */
1229 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
];
1231 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1232 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1] << HOST_BITS_PER_HALF_WIDE_INT
);
1235 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1238 If HIGH is not set, throw away the upper half after the check is
1239 made to see if it overflows. Unfortunately there is no better way
1240 to check for overflow than to do this. If OVERFLOW is nonnull,
1241 record in *OVERFLOW whether the result overflowed. SGN controls
1242 the signedness and is used to check overflow or if HIGH is set. */
1244 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1245 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1246 unsigned int op2len
, unsigned int prec
, signop sgn
,
1247 bool *overflow
, bool high
)
1249 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1252 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1253 unsigned int half_blocks_needed
= blocks_needed
* 2;
1254 /* The sizes here are scaled to support a 2x largest mode by 2x
1255 largest mode yielding a 4x largest mode result. This is what is
1258 unsigned HOST_HALF_WIDE_INT
1259 u
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1260 unsigned HOST_HALF_WIDE_INT
1261 v
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1262 /* The '2' in 'R' is because we are internally doing a full
1264 unsigned HOST_HALF_WIDE_INT
1265 r
[2 * 4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1266 HOST_WIDE_INT mask
= ((HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1268 /* If the top level routine did not really pass in an overflow, then
1269 just make sure that we never attempt to set it. */
1270 bool needs_overflow
= (overflow
!= 0);
1274 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1275 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1277 /* This is a surprisingly common case, so do it first. */
1278 if (op1
== 0 || op2
== 0)
1285 if (sgn
== UNSIGNED
)
1287 /* If the inputs are single HWIs and the output has room for at
1288 least two HWIs, we can use umul_ppmm directly. */
1289 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1290 && wi::fits_uhwi_p (op1
)
1291 && wi::fits_uhwi_p (op2
))
1293 /* This case never overflows. */
1299 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1300 return 1 + (val
[1] != 0 || val
[0] < 0);
1302 /* Likewise if the output is a full single HWI, except that the
1303 upper HWI of the result is only used for determining overflow.
1304 (We handle this case inline when overflow isn't needed.) */
1305 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1307 unsigned HOST_WIDE_INT upper
;
1308 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1310 *overflow
= (upper
!= 0);
1318 /* Handle multiplications by 1. */
1323 val
[0] = wi::neg_p (op2
, sgn
) ? -1 : 0;
1326 for (i
= 0; i
< op2len
; i
++)
1334 val
[0] = wi::neg_p (op1
, sgn
) ? -1 : 0;
1337 for (i
= 0; i
< op1len
; i
++)
1342 /* If we need to check for overflow, we can only do half wide
1343 multiplies quickly because we need to look at the top bits to
1344 check for the overflow. */
1345 if ((high
|| needs_overflow
)
1346 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1348 unsigned HOST_WIDE_INT r
;
1352 o0
= op1
.to_shwi ();
1353 o1
= op2
.to_shwi ();
1357 o0
= op1
.to_uhwi ();
1358 o1
= op2
.to_uhwi ();
1366 if ((HOST_WIDE_INT
) r
!= sext_hwi (r
, prec
))
1371 if ((r
>> prec
) != 0)
1375 val
[0] = high
? r
>> prec
: r
;
1379 /* We do unsigned mul and then correct it. */
1380 wi_unpack (u
, op1val
, op1len
, half_blocks_needed
, prec
, SIGNED
);
1381 wi_unpack (v
, op2val
, op2len
, half_blocks_needed
, prec
, SIGNED
);
1383 /* The 2 is for a full mult. */
1384 memset (r
, 0, half_blocks_needed
* 2
1385 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1387 for (j
= 0; j
< half_blocks_needed
; j
++)
1390 for (i
= 0; i
< half_blocks_needed
; i
++)
1392 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1394 r
[i
+ j
] = t
& HALF_INT_MASK
;
1395 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1397 r
[j
+ half_blocks_needed
] = k
;
1400 /* We did unsigned math above. For signed we must adjust the
1401 product (assuming we need to see that). */
1402 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1404 unsigned HOST_WIDE_INT b
;
1405 if (wi::neg_p (op1
))
1408 for (i
= 0; i
< half_blocks_needed
; i
++)
1410 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1411 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1412 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1413 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1416 if (wi::neg_p (op2
))
1419 for (i
= 0; i
< half_blocks_needed
; i
++)
1421 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1422 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1423 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1424 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1433 /* For unsigned, overflow is true if any of the top bits are set.
1434 For signed, overflow is true if any of the top bits are not equal
1436 if (sgn
== UNSIGNED
)
1440 top
= r
[(half_blocks_needed
) - 1];
1441 top
= SIGN_MASK (top
<< (HOST_BITS_PER_WIDE_INT
/ 2));
1445 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1446 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1452 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1453 wi_pack ((unsigned HOST_WIDE_INT
*) val
,
1454 &r
[half_blocks_needed
], half_blocks_needed
);
1455 return canonize (val
, blocks_needed
, prec
);
1459 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1460 wi_pack ((unsigned HOST_WIDE_INT
*) val
, r
, half_blocks_needed
);
1461 return canonize (val
, blocks_needed
, prec
);
1465 /* Compute the population count of X. */
1467 wi::popcount (const wide_int_ref
&x
)
1472 /* The high order block is special if it is the last block and the
1473 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1474 have to clear out any ones above the precision before doing
1475 popcount on this block. */
1476 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1477 unsigned int stop
= x
.len
;
1480 count
= popcount_hwi (x
.uhigh () << -count
);
1485 if (x
.sign_mask () >= 0)
1489 for (i
= 0; i
< stop
; ++i
)
1490 count
+= popcount_hwi (x
.val
[i
]);
1495 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1496 whether the result overflows when OP0 and OP1 are treated as having
1497 signedness SGN. Return the number of blocks in VAL. */
1499 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1500 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1501 unsigned int op1len
, unsigned int prec
,
1502 signop sgn
, bool *overflow
)
1504 unsigned HOST_WIDE_INT o0
= 0;
1505 unsigned HOST_WIDE_INT o1
= 0;
1506 unsigned HOST_WIDE_INT x
= 0;
1507 /* We implement subtraction as an in place negate and add. Negation
1508 is just inversion and add 1, so we can do the add of 1 by just
1509 starting the borrow in of the first element at 1. */
1510 unsigned HOST_WIDE_INT borrow
= 0;
1511 unsigned HOST_WIDE_INT old_borrow
= 0;
1513 unsigned HOST_WIDE_INT mask0
, mask1
;
1516 unsigned int len
= MAX (op0len
, op1len
);
1517 mask0
= -top_bit_of (op0
, op0len
, prec
);
1518 mask1
= -top_bit_of (op1
, op1len
, prec
);
1520 /* Subtract all of the explicitly defined elements. */
1521 for (i
= 0; i
< len
; i
++)
1523 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1524 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1525 x
= o0
- o1
- borrow
;
1527 old_borrow
= borrow
;
1528 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1531 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1533 val
[len
] = mask0
- mask1
- borrow
;
1540 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1543 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1544 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1548 /* Put the MSB of X and O0 and in the top of the HWI. */
1552 *overflow
= (x
>= o0
);
1554 *overflow
= (x
> o0
);
1558 return canonize (val
, len
, prec
);
1566 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1567 algorithm is a small modification of the algorithm in Hacker's
1568 Delight by Warren, which itself is a small modification of Knuth's
1569 algorithm. M is the number of significant elements of U however
1570 there needs to be at least one extra element of B_DIVIDEND
1571 allocated, N is the number of elements of B_DIVISOR. */
1573 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1574 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1575 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1576 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1579 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1580 HOST_WIDE_INT and stored in the lower bits of each word. This
1581 algorithm should work properly on both 32 and 64 bit
1583 unsigned HOST_WIDE_INT b
1584 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1585 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1586 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1587 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1591 /* Single digit divisor. */
1595 for (j
= m
- 1; j
>= 0; j
--)
1597 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1598 k
= ((k
* b
+ b_dividend
[j
])
1599 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1600 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1606 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1610 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1611 algorithm, we can overwrite b_dividend and b_divisor, so we do
1613 for (i
= n
- 1; i
> 0; i
--)
1614 b_divisor
[i
] = (b_divisor
[i
] << s
)
1615 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1616 b_divisor
[0] = b_divisor
[0] << s
;
1618 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1619 for (i
= m
- 1; i
> 0; i
--)
1620 b_dividend
[i
] = (b_dividend
[i
] << s
)
1621 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1622 b_dividend
[0] = b_dividend
[0] << s
;
1626 for (j
= m
- n
; j
>= 0; j
--)
1628 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1629 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1631 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1634 rhat
+= b_divisor
[n
-1];
1639 /* Multiply and subtract. */
1641 for (i
= 0; i
< n
; i
++)
1643 p
= qhat
* b_divisor
[i
];
1644 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1645 b_dividend
[i
+ j
] = t
;
1646 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1647 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1649 t
= b_dividend
[j
+n
] - k
;
1650 b_dividend
[j
+n
] = t
;
1652 b_quotient
[j
] = qhat
;
1657 for (i
= 0; i
< n
; i
++)
1659 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1660 b_dividend
[i
+j
] = t
;
1661 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1663 b_dividend
[j
+n
] += k
;
1667 for (i
= 0; i
< n
; i
++)
1668 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1669 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1671 for (i
= 0; i
< n
; i
++)
1672 b_remainder
[i
] = b_dividend
[i
];
1676 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1677 the result. If QUOTIENT is nonnull, store the value of the quotient
1678 there and return the number of blocks in it. The return value is
1679 not defined otherwise. If REMAINDER is nonnull, store the value
1680 of the remainder there and store the number of blocks in
1681 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1682 the division overflowed. */
1684 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1685 HOST_WIDE_INT
*remainder
,
1686 const HOST_WIDE_INT
*dividend_val
,
1687 unsigned int dividend_len
, unsigned int dividend_prec
,
1688 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1689 unsigned int divisor_prec
, signop sgn
,
1692 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1693 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1694 unsigned HOST_HALF_WIDE_INT
1695 b_quotient
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1696 unsigned HOST_HALF_WIDE_INT
1697 b_remainder
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1698 unsigned HOST_HALF_WIDE_INT
1699 b_dividend
[(4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1700 unsigned HOST_HALF_WIDE_INT
1701 b_divisor
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1703 bool dividend_neg
= false;
1704 bool divisor_neg
= false;
1705 bool overflow
= false;
1706 wide_int neg_dividend
, neg_divisor
;
1708 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1710 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1715 /* The smallest signed number / -1 causes overflow. The dividend_len
1716 check is for speed rather than correctness. */
1718 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1720 && wi::only_sign_bit_p (dividend
))
1723 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1724 (signed min / -1) has the same representation as the orignal dividend.
1725 We have traditionally made division by zero act as division by one,
1726 so there too we use the original dividend. */
1737 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1738 quotient
[i
] = dividend_val
[i
];
1739 return dividend_len
;
1745 /* Do it on the host if you can. */
1747 && wi::fits_shwi_p (dividend
)
1748 && wi::fits_shwi_p (divisor
))
1750 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1751 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1753 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1755 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1758 quotient
[0] = HOST_WIDE_INT_MIN
;
1771 quotient
[0] = o0
/ o1
;
1774 remainder
[0] = o0
% o1
;
1782 && wi::fits_uhwi_p (dividend
)
1783 && wi::fits_uhwi_p (divisor
))
1785 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1786 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1789 quotient
[0] = o0
/ o1
;
1792 remainder
[0] = o0
% o1
;
1798 /* Make the divisor and dividend positive and remember what we
1802 if (wi::neg_p (dividend
))
1804 neg_dividend
= -dividend
;
1805 dividend
= neg_dividend
;
1806 dividend_neg
= true;
1808 if (wi::neg_p (divisor
))
1810 neg_divisor
= -divisor
;
1811 divisor
= neg_divisor
;
1816 wi_unpack (b_dividend
, dividend
.get_val (), dividend
.get_len (),
1817 dividend_blocks_needed
, dividend_prec
, sgn
);
1818 wi_unpack (b_divisor
, divisor
.get_val (), divisor
.get_len (),
1819 divisor_blocks_needed
, divisor_prec
, sgn
);
1821 m
= dividend_blocks_needed
;
1823 while (m
> 1 && b_dividend
[m
- 1] == 0)
1826 n
= divisor_blocks_needed
;
1827 while (n
> 1 && b_divisor
[n
- 1] == 0)
1830 memset (b_quotient
, 0, sizeof (b_quotient
));
1832 divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
1834 unsigned int quotient_len
= 0;
1837 wi_pack ((unsigned HOST_WIDE_INT
*) quotient
, b_quotient
, m
);
1838 quotient_len
= canonize (quotient
, (m
+ 1) / 2, dividend_prec
);
1839 /* The quotient is neg if exactly one of the divisor or dividend is
1841 if (dividend_neg
!= divisor_neg
)
1842 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
1843 quotient_len
, dividend_prec
,
1849 wi_pack ((unsigned HOST_WIDE_INT
*) remainder
, b_remainder
, n
);
1850 *remainder_len
= canonize (remainder
, (n
+ 1) / 2, dividend_prec
);
1851 /* The remainder is always the same sign as the dividend. */
1853 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
1854 *remainder_len
, dividend_prec
,
1858 return quotient_len
;
1862 * Shifting, rotating and extraction.
1865 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1866 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1868 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1869 unsigned int xlen
, unsigned int precision
,
1872 /* Split the shift into a whole-block shift and a subblock shift. */
1873 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1874 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1876 /* The whole-block shift fills with zeros. */
1877 unsigned int len
= BLOCKS_NEEDED (precision
);
1878 for (unsigned int i
= 0; i
< skip
; ++i
)
1881 /* It's easier to handle the simple block case specially. */
1882 if (small_shift
== 0)
1883 for (unsigned int i
= skip
; i
< len
; ++i
)
1884 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
1887 /* The first unfilled output block is a left shift of the first
1888 block in XVAL. The other output blocks contain bits from two
1889 consecutive input blocks. */
1890 unsigned HOST_WIDE_INT carry
= 0;
1891 for (unsigned int i
= skip
; i
< len
; ++i
)
1893 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
1894 val
[i
] = (x
<< small_shift
) | carry
;
1895 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1898 return canonize (val
, len
, precision
);
1901 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1902 number of blocks in VAL. The input has XPRECISION bits and the
1903 output has XPRECISION - SHIFT bits. */
1905 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1906 unsigned int xlen
, unsigned int xprecision
,
1909 /* Split the shift into a whole-block shift and a subblock shift. */
1910 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1911 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1913 /* Work out how many blocks are needed to store the significant bits
1914 (excluding the upper zeros or signs). */
1915 unsigned int len
= BLOCKS_NEEDED (xprecision
- shift
);
1917 /* It's easier to handle the simple block case specially. */
1918 if (small_shift
== 0)
1919 for (unsigned int i
= 0; i
< len
; ++i
)
1920 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
1923 /* Each output block but the last is a combination of two input blocks.
1924 The last block is a right shift of the last block in XVAL. */
1925 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
1926 for (unsigned int i
= 0; i
< len
; ++i
)
1928 val
[i
] = curr
>> small_shift
;
1929 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
1930 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1936 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1937 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1938 VAL has PRECISION bits. */
1940 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1941 unsigned int xlen
, unsigned int xprecision
,
1942 unsigned int precision
, unsigned int shift
)
1944 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1946 /* The value we just created has precision XPRECISION - SHIFT.
1947 Zero-extend it to wider precisions. */
1948 if (precision
> xprecision
- shift
)
1950 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1952 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
1953 else if (val
[len
- 1] < 0)
1955 /* Add a new block with a zero. */
1960 return canonize (val
, len
, precision
);
1963 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1964 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1965 VAL has PRECISION bits. */
1967 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1968 unsigned int xlen
, unsigned int xprecision
,
1969 unsigned int precision
, unsigned int shift
)
1971 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1973 /* The value we just created has precision XPRECISION - SHIFT.
1974 Sign-extend it to wider types. */
1975 if (precision
> xprecision
- shift
)
1977 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1979 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
1981 return canonize (val
, len
, precision
);
1984 /* Return the number of leading (upper) zeros in X. */
1986 wi::clz (const wide_int_ref
&x
)
1988 /* Calculate how many bits there above the highest represented block. */
1989 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1991 unsigned HOST_WIDE_INT high
= x
.uhigh ();
1993 /* The upper -COUNT bits of HIGH are not part of the value.
1995 high
= (high
<< -count
) >> -count
;
1996 else if (x
.sign_mask () < 0)
1997 /* The upper bit is set, so there are no leading zeros. */
2000 /* We don't need to look below HIGH. Either HIGH is nonzero,
2001 or the top bit of the block below is nonzero; clz_hwi is
2002 HOST_BITS_PER_WIDE_INT in the latter case. */
2003 return count
+ clz_hwi (high
);
2006 /* Return the number of redundant sign bits in X. (That is, the number
2007 of bits immediately below the sign bit that have the same value as
2010 wi::clrsb (const wide_int_ref
&x
)
2012 /* Calculate how many bits there above the highest represented block. */
2013 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2015 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2016 unsigned HOST_WIDE_INT mask
= -1;
2019 /* The upper -COUNT bits of HIGH are not part of the value.
2020 Clear them from both MASK and HIGH. */
2025 /* If the top bit is 1, count the number of leading 1s. If the top
2026 bit is zero, count the number of leading zeros. */
2027 if (high
> mask
/ 2)
2030 /* There are no sign bits below the top block, so we don't need to look
2031 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2033 return count
+ clz_hwi (high
) - 1;
2036 /* Return the number of trailing (lower) zeros in X. */
2038 wi::ctz (const wide_int_ref
&x
)
2040 if (x
.len
== 1 && x
.ulow () == 0)
2043 /* Having dealt with the zero case, there must be a block with a
2044 nonzero bit. We don't care about the bits above the first 1. */
2046 while (x
.val
[i
] == 0)
2048 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2051 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2054 wi::exact_log2 (const wide_int_ref
&x
)
2056 /* Reject cases where there are implicit -1 blocks above HIGH. */
2057 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2060 /* Set CRUX to the index of the entry that should be nonzero.
2061 If the top block is zero then the next lowest block (if any)
2062 must have the high bit set. */
2063 unsigned int crux
= x
.len
- 1;
2064 if (crux
> 0 && x
.val
[crux
] == 0)
2067 /* Check that all lower blocks are zero. */
2068 for (unsigned int i
= 0; i
< crux
; ++i
)
2072 /* Get a zero-extended form of block CRUX. */
2073 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2074 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2075 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2077 /* Now it's down to whether HWI is a power of 2. */
2078 int res
= ::exact_log2 (hwi
);
2080 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2084 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2086 wi::floor_log2 (const wide_int_ref
&x
)
2088 return x
.precision
- 1 - clz (x
);
2091 /* Return the index of the first (lowest) set bit in X, counting from 1.
2092 Return 0 if X is 0. */
2094 wi::ffs (const wide_int_ref
&x
)
2096 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2099 /* Return true if sign-extending X to have precision PRECISION would give
2100 the minimum signed value at that precision. */
2102 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2104 return ctz (x
) + 1 == int (precision
);
2107 /* Return true if X represents the minimum signed value. */
2109 wi::only_sign_bit_p (const wide_int_ref
&x
)
2111 return only_sign_bit_p (x
, x
.precision
);
2115 * Private utilities.
2118 void gt_ggc_mx (widest_int
*) { }
2119 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2120 void gt_pch_nx (widest_int
*) { }
2122 template void wide_int::dump () const;
2123 template void generic_wide_int
<wide_int_ref_storage
<false> >::dump () const;
2124 template void generic_wide_int
<wide_int_ref_storage
<true> >::dump () const;
2125 template void offset_int::dump () const;
2126 template void widest_int::dump () const;