1 /* Operations with very long integers.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #define HOST_BITS_PER_HALF_WIDE_INT 32
29 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
30 # define HOST_HALF_WIDE_INT long
31 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
32 # define HOST_HALF_WIDE_INT int
34 #error Please add support for HOST_HALF_WIDE_INT
37 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
38 /* Do not include longlong.h when compiler is clang-based. See PR61146. */
39 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
40 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
41 typedef unsigned HOST_WIDE_INT UWtype
;
42 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
43 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
44 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
46 typedef unsigned int UDWtype
__attribute__ ((mode (DI
)));
48 typedef unsigned int UDWtype
__attribute__ ((mode (TI
)));
53 static const HOST_WIDE_INT zeros
[WIDE_INT_MAX_ELTS
] = {};
59 /* Quantities to deal with values that hold half of a wide int. Used
60 in multiply and divide. */
61 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
63 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
64 #define BLOCKS_NEEDED(PREC) \
65 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
66 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
68 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
69 based on the top existing bit of VAL. */
71 static unsigned HOST_WIDE_INT
72 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
74 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? (HOST_WIDE_INT
) -1 : 0;
77 /* Convert the integer in VAL to canonical form, returning its new length.
78 LEN is the number of blocks currently in VAL and PRECISION is the number
79 of bits in the integer it represents.
81 This function only changes the representation, not the value. */
83 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
85 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
89 if (len
> blocks_needed
)
96 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
97 val
[len
- 1] = top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
98 if (top
!= 0 && top
!= (HOST_WIDE_INT
)-1)
101 /* At this point we know that the top is either 0 or -1. Find the
102 first block that is not a copy of this. */
103 for (i
= len
- 2; i
>= 0; i
--)
105 HOST_WIDE_INT x
= val
[i
];
108 if (SIGN_MASK (x
) == top
)
111 /* We need an extra block because the top bit block i does
112 not match the extension. */
117 /* The number is 0 or -1. */
122 * Conversion routines in and out of wide_int.
125 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
126 result for an integer with precision PRECISION. Return the length
127 of VAL (after any canonization. */
129 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
130 unsigned int xlen
, unsigned int precision
, bool need_canon
)
132 for (unsigned i
= 0; i
< xlen
; i
++)
134 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
137 /* Construct a wide int from a buffer of length LEN. BUFFER will be
138 read according to byte endianess and word endianess of the target.
139 Only the lower BUFFER_LEN bytes of the result are set; the remaining
140 high bytes are cleared. */
142 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
144 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
145 wide_int result
= wide_int::create (precision
);
146 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
148 /* We have to clear all the bits ourself, as we merely or in values
150 unsigned int len
= BLOCKS_NEEDED (precision
);
151 HOST_WIDE_INT
*val
= result
.write_val ();
152 for (unsigned int i
= 0; i
< len
; ++i
)
155 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
159 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
160 unsigned HOST_WIDE_INT value
;
162 if (buffer_len
> UNITS_PER_WORD
)
164 unsigned int word
= byte
/ UNITS_PER_WORD
;
166 if (WORDS_BIG_ENDIAN
)
167 word
= (words
- 1) - word
;
169 offset
= word
* UNITS_PER_WORD
;
171 if (BYTES_BIG_ENDIAN
)
172 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
174 offset
+= byte
% UNITS_PER_WORD
;
177 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
179 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
181 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
182 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
185 result
.set_len (canonize (val
, len
, precision
));
190 /* Sets RESULT from X, the sign is taken according to SGN. */
192 wi::to_mpz (const wide_int_ref
&x
, mpz_t result
, signop sgn
)
194 int len
= x
.get_len ();
195 const HOST_WIDE_INT
*v
= x
.get_val ();
196 int excess
= len
* HOST_BITS_PER_WIDE_INT
- x
.get_precision ();
198 if (wi::neg_p (x
, sgn
))
200 /* We use ones complement to avoid -x80..0 edge case that -
202 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
203 for (int i
= 0; i
< len
; i
++)
206 t
[len
- 1] = (unsigned HOST_WIDE_INT
) t
[len
- 1] << excess
>> excess
;
207 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
208 mpz_com (result
, result
);
212 HOST_WIDE_INT
*t
= XALLOCAVEC (HOST_WIDE_INT
, len
);
213 for (int i
= 0; i
< len
- 1; i
++)
215 t
[len
- 1] = (unsigned HOST_WIDE_INT
) v
[len
- 1] << excess
>> excess
;
216 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
219 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
222 /* Returns X converted to TYPE. If WRAP is true, then out-of-range
223 values of VAL will be wrapped; otherwise, they will be set to the
224 appropriate minimum or maximum TYPE bound. */
226 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
229 unsigned int prec
= TYPE_PRECISION (type
);
230 wide_int res
= wide_int::create (prec
);
238 get_type_static_bounds (type
, min
, max
);
240 if (mpz_cmp (x
, min
) < 0)
242 else if (mpz_cmp (x
, max
) > 0)
249 /* Determine the number of unsigned HOST_WIDE_INTs that are required
250 for representing the absolute value. The code to calculate count is
251 extracted from the GMP manual, section "Integer Import and Export":
252 http://gmplib.org/manual/Integer-Import-and-Export.html */
253 numb
= CHAR_BIT
* sizeof (HOST_WIDE_INT
);
254 count
= (mpz_sizeinbase (x
, 2) + numb
- 1) / numb
;
255 HOST_WIDE_INT
*val
= res
.write_val ();
256 /* Read the absolute value.
258 Write directly to the wide_int storage if possible, otherwise leave
259 GMP to allocate the memory for us. It might be slightly more efficient
260 to use mpz_tdiv_r_2exp for the latter case, but the situation is
261 pathological and it seems safer to operate on the original mpz value
263 void *valres
= mpz_export (count
<= WIDE_INT_MAX_ELTS
? val
: 0,
264 &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
270 count
= MIN (count
, BLOCKS_NEEDED (prec
));
273 memcpy (val
, valres
, count
* sizeof (HOST_WIDE_INT
));
276 /* Zero-extend the absolute value to PREC bits. */
277 if (count
< BLOCKS_NEEDED (prec
) && val
[count
- 1] < 0)
280 count
= canonize (val
, count
, prec
);
290 * Largest and smallest values in a mode.
293 /* Return the largest SGNed number that is representable in PRECISION bits.
295 TODO: There is still code from the double_int era that trys to
296 make up for the fact that double int's could not represent the
297 min and max values of all types. This code should be removed
298 because the min and max values can always be represented in
299 wide_ints and int-csts. */
301 wi::max_value (unsigned int precision
, signop sgn
)
303 gcc_checking_assert (precision
!= 0);
305 /* The unsigned max is just all ones. */
306 return shwi (-1, precision
);
308 /* The signed max is all ones except the top bit. This must be
309 explicitly represented. */
310 return mask (precision
- 1, false, precision
);
313 /* Return the largest SGNed number that is representable in PRECISION bits. */
315 wi::min_value (unsigned int precision
, signop sgn
)
317 gcc_checking_assert (precision
!= 0);
319 return uhwi (0, precision
);
321 /* The signed min is all zeros except the top bit. This must be
322 explicitly represented. */
323 return wi::set_bit_in_zero (precision
- 1, precision
);
330 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
331 signedness SGN, to an integer that has PRECISION bits. Store the blocks
332 in VAL and return the number of blocks used.
334 This function can handle both extension (PRECISION > XPRECISION)
335 and truncation (PRECISION < XPRECISION). */
337 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
338 unsigned int xlen
, unsigned int xprecision
,
339 unsigned int precision
, signop sgn
)
341 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
342 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
343 for (unsigned i
= 0; i
< len
; i
++)
346 if (precision
> xprecision
)
348 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
353 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
354 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
355 else if (val
[len
- 1] < 0)
357 while (len
< BLOCKS_NEEDED (xprecision
))
359 if (small_xprecision
)
360 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
367 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
368 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
371 len
= canonize (val
, len
, precision
);
376 /* This function hides the fact that we cannot rely on the bits beyond
377 the precision. This issue comes up in the relational comparisions
378 where we do allow comparisons of values of different precisions. */
379 static inline HOST_WIDE_INT
380 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
381 unsigned int blocks_needed
, unsigned int small_prec
,
382 unsigned int index
, signop sgn
)
387 else if (index
< blocks_needed
|| sgn
== SIGNED
)
388 /* Signed or within the precision. */
389 val
= SIGN_MASK (a
[len
- 1]);
391 /* Unsigned extension beyond the precision. */
394 if (small_prec
&& index
== blocks_needed
- 1)
395 return (sgn
== SIGNED
396 ? sext_hwi (val
, small_prec
)
397 : zext_hwi (val
, small_prec
));
402 /* Find the highest bit represented in a wide int. This will in
403 general have the same value as the sign bit. */
404 static inline HOST_WIDE_INT
405 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
407 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
408 unsigned HOST_WIDE_INT val
= a
[len
- 1];
411 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
415 * Comparisons, note that only equality is an operator. The other
416 * comparisons cannot be operators since they are inherently signed or
417 * unsigned and C++ has no such operators.
420 /* Return true if OP0 == OP1. */
422 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
423 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
427 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
429 if (op0len
!= op1len
)
432 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
434 /* It does not matter if we zext or sext here, we just have to
435 do both the same way. */
436 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
442 if (op0
[l0
] != op1
[l0
])
450 /* Return true if OP0 < OP1 using signed comparisons. */
452 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
453 unsigned int precision
,
454 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
456 HOST_WIDE_INT s0
, s1
;
457 unsigned HOST_WIDE_INT u0
, u1
;
458 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
459 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
460 int l
= MAX (op0len
- 1, op1len
- 1);
462 /* Only the top block is compared as signed. The rest are unsigned
464 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
465 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
474 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
475 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
487 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
490 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
491 unsigned int precision
,
492 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
494 HOST_WIDE_INT s0
, s1
;
495 unsigned HOST_WIDE_INT u0
, u1
;
496 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
497 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
498 int l
= MAX (op0len
- 1, op1len
- 1);
500 /* Only the top block is compared as signed. The rest are unsigned
502 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
503 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
512 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
513 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
525 /* Return true if OP0 < OP1 using unsigned comparisons. */
527 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
528 unsigned int precision
,
529 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
531 unsigned HOST_WIDE_INT x0
;
532 unsigned HOST_WIDE_INT x1
;
533 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
534 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
535 int l
= MAX (op0len
- 1, op1len
- 1);
539 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
540 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
551 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
552 unsigned compares. */
554 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
555 unsigned int precision
,
556 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
558 unsigned HOST_WIDE_INT x0
;
559 unsigned HOST_WIDE_INT x1
;
560 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
561 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
562 int l
= MAX (op0len
- 1, op1len
- 1);
566 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
567 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
582 /* Sign-extend the number represented by XVAL and XLEN into VAL,
583 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
584 and VAL have PRECISION bits. */
586 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
587 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
589 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
590 /* Extending beyond the precision is a no-op. If we have only stored
591 OFFSET bits or fewer, the rest are already signs. */
592 if (offset
>= precision
|| len
>= xlen
)
594 for (unsigned i
= 0; i
< xlen
; ++i
)
598 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
599 for (unsigned int i
= 0; i
< len
; i
++)
603 val
[len
] = sext_hwi (xval
[len
], suboffset
);
606 return canonize (val
, len
, precision
);
609 /* Zero-extend the number represented by XVAL and XLEN into VAL,
610 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
611 and VAL have PRECISION bits. */
613 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
614 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
616 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
617 /* Extending beyond the precision is a no-op. If we have only stored
618 OFFSET bits or fewer, and the upper stored bit is zero, then there
620 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
622 for (unsigned i
= 0; i
< xlen
; ++i
)
626 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
627 for (unsigned int i
= 0; i
< len
; i
++)
628 val
[i
] = i
< xlen
? xval
[i
] : -1;
630 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
633 return canonize (val
, len
+ 1, precision
);
637 * Masking, inserting, shifting, rotating.
640 /* Insert WIDTH bits from Y into X starting at START. */
642 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
649 unsigned int precision
= x
.get_precision ();
650 if (start
>= precision
)
653 gcc_checking_assert (precision
>= width
);
655 if (start
+ width
>= precision
)
656 width
= precision
- start
;
658 mask
= wi::shifted_mask (start
, width
, false, precision
);
659 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
662 tmp
= wi::bit_and_not (x
, mask
);
663 result
= result
| tmp
;
668 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
669 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
672 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
673 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
675 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
676 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
678 if (block
+ 1 >= xlen
)
680 /* The operation either affects the last current block or needs
682 unsigned int len
= block
+ 1;
683 for (unsigned int i
= 0; i
< len
; i
++)
684 val
[i
] = safe_uhwi (xval
, xlen
, i
);
685 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
687 /* If the bit we just set is at the msb of the block, make sure
688 that any higher bits are zeros. */
689 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
695 for (unsigned int i
= 0; i
< xlen
; i
++)
697 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
698 return canonize (val
, xlen
, precision
);
704 wide_int_storage::bswap () const
706 wide_int result
= wide_int::create (precision
);
708 unsigned int len
= BLOCKS_NEEDED (precision
);
709 unsigned int xlen
= get_len ();
710 const HOST_WIDE_INT
*xval
= get_val ();
711 HOST_WIDE_INT
*val
= result
.write_val ();
713 /* This is not a well defined operation if the precision is not a
715 gcc_assert ((precision
& 0x7) == 0);
717 for (i
= 0; i
< len
; i
++)
720 /* Only swap the bytes that are not the padding. */
721 for (s
= 0; s
< precision
; s
+= 8)
723 unsigned int d
= precision
- s
- 8;
724 unsigned HOST_WIDE_INT byte
;
726 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
727 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
729 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
731 block
= d
/ HOST_BITS_PER_WIDE_INT
;
732 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
734 val
[block
] |= byte
<< offset
;
737 result
.set_len (canonize (val
, len
, precision
));
741 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
742 above that up to PREC are zeros. The result is inverted if NEGATE
743 is true. Return the number of blocks in VAL. */
745 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
750 val
[0] = negate
? 0 : -1;
755 val
[0] = negate
? -1 : 0;
760 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
761 val
[i
++] = negate
? 0 : -1;
763 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
766 HOST_WIDE_INT last
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
767 val
[i
++] = negate
? ~last
: last
;
770 val
[i
++] = negate
? -1 : 0;
775 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
776 bits are ones, and the bits above that up to PREC are zeros. The result
777 is inverted if NEGATE is true. Return the number of blocks in VAL. */
779 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
780 bool negate
, unsigned int prec
)
782 if (start
>= prec
|| width
== 0)
784 val
[0] = negate
? -1 : 0;
788 if (width
> prec
- start
)
789 width
= prec
- start
;
790 unsigned int end
= start
+ width
;
793 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
794 val
[i
++] = negate
? -1 : 0;
796 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
799 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
801 if (shift
< HOST_BITS_PER_WIDE_INT
)
804 block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - block
- 1;
805 val
[i
++] = negate
? ~block
: block
;
810 val
[i
++] = negate
? block
: ~block
;
813 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
815 val
[i
++] = negate
? 0 : -1;
817 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
821 HOST_WIDE_INT block
= ((unsigned HOST_WIDE_INT
) 1 << shift
) - 1;
822 val
[i
++] = negate
? ~block
: block
;
825 val
[i
++] = negate
? -1 : 0;
831 * logical operations.
834 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
836 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
837 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
838 unsigned int op1len
, unsigned int prec
)
842 bool need_canon
= true;
844 unsigned int len
= MAX (op0len
, op1len
);
847 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
865 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
881 val
[l0
] = op0
[l0
] & op1
[l0
];
886 len
= canonize (val
, len
, prec
);
891 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
893 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
894 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
895 unsigned int op1len
, unsigned int prec
)
900 bool need_canon
= true;
902 unsigned int len
= MAX (op0len
, op1len
);
905 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
923 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
939 val
[l0
] = op0
[l0
] & ~op1
[l0
];
944 len
= canonize (val
, len
, prec
);
949 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
951 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
952 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
953 unsigned int op1len
, unsigned int prec
)
958 bool need_canon
= true;
960 unsigned int len
= MAX (op0len
, op1len
);
963 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
981 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
997 val
[l0
] = op0
[l0
] | op1
[l0
];
1002 len
= canonize (val
, len
, prec
);
1007 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1009 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1010 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1011 unsigned int op1len
, unsigned int prec
)
1014 int l0
= op0len
- 1;
1015 int l1
= op1len
- 1;
1016 bool need_canon
= true;
1018 unsigned int len
= MAX (op0len
, op1len
);
1021 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1039 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1055 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1060 len
= canonize (val
, len
, prec
);
1065 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1067 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1068 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1069 unsigned int op1len
, unsigned int prec
)
1072 int l0
= op0len
- 1;
1073 int l1
= op1len
- 1;
1075 unsigned int len
= MAX (op0len
, op1len
);
1078 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1081 val
[l0
] = op0
[l0
] ^ op1mask
;
1088 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1091 val
[l1
] = op0mask
^ op1
[l1
];
1098 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1102 return canonize (val
, len
, prec
);
1109 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1110 whether the result overflows when OP0 and OP1 are treated as having
1111 signedness SGN. Return the number of blocks in VAL. */
1113 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1114 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1115 unsigned int op1len
, unsigned int prec
,
1116 signop sgn
, bool *overflow
)
1118 unsigned HOST_WIDE_INT o0
= 0;
1119 unsigned HOST_WIDE_INT o1
= 0;
1120 unsigned HOST_WIDE_INT x
= 0;
1121 unsigned HOST_WIDE_INT carry
= 0;
1122 unsigned HOST_WIDE_INT old_carry
= 0;
1123 unsigned HOST_WIDE_INT mask0
, mask1
;
1126 unsigned int len
= MAX (op0len
, op1len
);
1127 mask0
= -top_bit_of (op0
, op0len
, prec
);
1128 mask1
= -top_bit_of (op1
, op1len
, prec
);
1129 /* Add all of the explicitly defined elements. */
1131 for (i
= 0; i
< len
; i
++)
1133 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1134 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1135 x
= o0
+ o1
+ carry
;
1138 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1141 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1143 val
[len
] = mask0
+ mask1
+ carry
;
1150 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1153 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1154 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1158 /* Put the MSB of X and O0 and in the top of the HWI. */
1162 *overflow
= (x
<= o0
);
1164 *overflow
= (x
< o0
);
1168 return canonize (val
, len
, prec
);
1171 /* Subroutines of the multiplication and division operations. Unpack
1172 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1173 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1174 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1176 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
, const HOST_WIDE_INT
*input
,
1177 unsigned int in_len
, unsigned int out_len
,
1178 unsigned int prec
, signop sgn
)
1182 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1183 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1188 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1189 mask
&= HALF_INT_MASK
;
1194 for (i
= 0; i
< blocks_needed
- 1; i
++)
1196 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1198 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1201 HOST_WIDE_INT x
= safe_uhwi (input
, in_len
, i
);
1205 x
= sext_hwi (x
, small_prec
);
1207 x
= zext_hwi (x
, small_prec
);
1210 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1212 /* Smear the sign bit. */
1217 /* The inverse of wi_unpack. IN_LEN is the the number of input
1218 blocks. The number of output blocks will be half this amount. */
1220 wi_pack (unsigned HOST_WIDE_INT
*result
,
1221 const unsigned HOST_HALF_WIDE_INT
*input
,
1222 unsigned int in_len
)
1227 while (i
+ 2 < in_len
)
1229 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1230 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1]
1231 << HOST_BITS_PER_HALF_WIDE_INT
);
1235 /* Handle the case where in_len is odd. For this we zero extend. */
1237 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
];
1239 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1240 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1] << HOST_BITS_PER_HALF_WIDE_INT
);
1243 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1246 If HIGH is not set, throw away the upper half after the check is
1247 made to see if it overflows. Unfortunately there is no better way
1248 to check for overflow than to do this. If OVERFLOW is nonnull,
1249 record in *OVERFLOW whether the result overflowed. SGN controls
1250 the signedness and is used to check overflow or if HIGH is set. */
1252 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1253 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1254 unsigned int op2len
, unsigned int prec
, signop sgn
,
1255 bool *overflow
, bool high
)
1257 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1260 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1261 unsigned int half_blocks_needed
= blocks_needed
* 2;
1262 /* The sizes here are scaled to support a 2x largest mode by 2x
1263 largest mode yielding a 4x largest mode result. This is what is
1266 unsigned HOST_HALF_WIDE_INT
1267 u
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1268 unsigned HOST_HALF_WIDE_INT
1269 v
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1270 /* The '2' in 'R' is because we are internally doing a full
1272 unsigned HOST_HALF_WIDE_INT
1273 r
[2 * 4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1274 HOST_WIDE_INT mask
= ((HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1276 /* If the top level routine did not really pass in an overflow, then
1277 just make sure that we never attempt to set it. */
1278 bool needs_overflow
= (overflow
!= 0);
1282 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1283 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1285 /* This is a surprisingly common case, so do it first. */
1286 if (op1
== 0 || op2
== 0)
1293 if (sgn
== UNSIGNED
)
1295 /* If the inputs are single HWIs and the output has room for at
1296 least two HWIs, we can use umul_ppmm directly. */
1297 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1298 && wi::fits_uhwi_p (op1
)
1299 && wi::fits_uhwi_p (op2
))
1301 /* This case never overflows. */
1307 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1308 if (val
[1] < 0 && prec
> HOST_BITS_PER_WIDE_INT
* 2)
1313 return 1 + (val
[1] != 0 || val
[0] < 0);
1315 /* Likewise if the output is a full single HWI, except that the
1316 upper HWI of the result is only used for determining overflow.
1317 (We handle this case inline when overflow isn't needed.) */
1318 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1320 unsigned HOST_WIDE_INT upper
;
1321 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1323 *overflow
= (upper
!= 0);
1331 /* Handle multiplications by 1. */
1336 val
[0] = wi::neg_p (op2
, sgn
) ? -1 : 0;
1339 for (i
= 0; i
< op2len
; i
++)
1347 val
[0] = wi::neg_p (op1
, sgn
) ? -1 : 0;
1350 for (i
= 0; i
< op1len
; i
++)
1355 /* If we need to check for overflow, we can only do half wide
1356 multiplies quickly because we need to look at the top bits to
1357 check for the overflow. */
1358 if ((high
|| needs_overflow
)
1359 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1361 unsigned HOST_WIDE_INT r
;
1365 o0
= op1
.to_shwi ();
1366 o1
= op2
.to_shwi ();
1370 o0
= op1
.to_uhwi ();
1371 o1
= op2
.to_uhwi ();
1379 if ((HOST_WIDE_INT
) r
!= sext_hwi (r
, prec
))
1384 if ((r
>> prec
) != 0)
1388 val
[0] = high
? r
>> prec
: r
;
1392 /* We do unsigned mul and then correct it. */
1393 wi_unpack (u
, op1val
, op1len
, half_blocks_needed
, prec
, SIGNED
);
1394 wi_unpack (v
, op2val
, op2len
, half_blocks_needed
, prec
, SIGNED
);
1396 /* The 2 is for a full mult. */
1397 memset (r
, 0, half_blocks_needed
* 2
1398 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1400 for (j
= 0; j
< half_blocks_needed
; j
++)
1403 for (i
= 0; i
< half_blocks_needed
; i
++)
1405 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1407 r
[i
+ j
] = t
& HALF_INT_MASK
;
1408 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1410 r
[j
+ half_blocks_needed
] = k
;
1413 /* We did unsigned math above. For signed we must adjust the
1414 product (assuming we need to see that). */
1415 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1417 unsigned HOST_WIDE_INT b
;
1418 if (wi::neg_p (op1
))
1421 for (i
= 0; i
< half_blocks_needed
; i
++)
1423 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1424 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1425 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1426 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1429 if (wi::neg_p (op2
))
1432 for (i
= 0; i
< half_blocks_needed
; i
++)
1434 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1435 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1436 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1437 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1446 /* For unsigned, overflow is true if any of the top bits are set.
1447 For signed, overflow is true if any of the top bits are not equal
1449 if (sgn
== UNSIGNED
)
1453 top
= r
[(half_blocks_needed
) - 1];
1454 top
= SIGN_MASK (top
<< (HOST_BITS_PER_WIDE_INT
/ 2));
1458 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1459 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1465 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1466 wi_pack ((unsigned HOST_WIDE_INT
*) val
,
1467 &r
[half_blocks_needed
], half_blocks_needed
);
1468 return canonize (val
, blocks_needed
, prec
);
1472 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1473 wi_pack ((unsigned HOST_WIDE_INT
*) val
, r
, half_blocks_needed
);
1474 return canonize (val
, blocks_needed
, prec
);
1478 /* Compute the population count of X. */
1480 wi::popcount (const wide_int_ref
&x
)
1485 /* The high order block is special if it is the last block and the
1486 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1487 have to clear out any ones above the precision before doing
1488 popcount on this block. */
1489 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1490 unsigned int stop
= x
.len
;
1493 count
= popcount_hwi (x
.uhigh () << -count
);
1498 if (x
.sign_mask () >= 0)
1502 for (i
= 0; i
< stop
; ++i
)
1503 count
+= popcount_hwi (x
.val
[i
]);
1508 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1509 whether the result overflows when OP0 and OP1 are treated as having
1510 signedness SGN. Return the number of blocks in VAL. */
1512 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1513 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1514 unsigned int op1len
, unsigned int prec
,
1515 signop sgn
, bool *overflow
)
1517 unsigned HOST_WIDE_INT o0
= 0;
1518 unsigned HOST_WIDE_INT o1
= 0;
1519 unsigned HOST_WIDE_INT x
= 0;
1520 /* We implement subtraction as an in place negate and add. Negation
1521 is just inversion and add 1, so we can do the add of 1 by just
1522 starting the borrow in of the first element at 1. */
1523 unsigned HOST_WIDE_INT borrow
= 0;
1524 unsigned HOST_WIDE_INT old_borrow
= 0;
1526 unsigned HOST_WIDE_INT mask0
, mask1
;
1529 unsigned int len
= MAX (op0len
, op1len
);
1530 mask0
= -top_bit_of (op0
, op0len
, prec
);
1531 mask1
= -top_bit_of (op1
, op1len
, prec
);
1533 /* Subtract all of the explicitly defined elements. */
1534 for (i
= 0; i
< len
; i
++)
1536 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1537 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1538 x
= o0
- o1
- borrow
;
1540 old_borrow
= borrow
;
1541 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1544 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1546 val
[len
] = mask0
- mask1
- borrow
;
1553 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1556 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1557 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1561 /* Put the MSB of X and O0 and in the top of the HWI. */
1565 *overflow
= (x
>= o0
);
1567 *overflow
= (x
> o0
);
1571 return canonize (val
, len
, prec
);
1579 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1580 algorithm is a small modification of the algorithm in Hacker's
1581 Delight by Warren, which itself is a small modification of Knuth's
1582 algorithm. M is the number of significant elements of U however
1583 there needs to be at least one extra element of B_DIVIDEND
1584 allocated, N is the number of elements of B_DIVISOR. */
1586 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1587 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1588 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1589 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1592 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1593 HOST_WIDE_INT and stored in the lower bits of each word. This
1594 algorithm should work properly on both 32 and 64 bit
1596 unsigned HOST_WIDE_INT b
1597 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1598 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1599 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1600 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1604 /* Single digit divisor. */
1608 for (j
= m
- 1; j
>= 0; j
--)
1610 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1611 k
= ((k
* b
+ b_dividend
[j
])
1612 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1613 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1619 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1623 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1624 algorithm, we can overwrite b_dividend and b_divisor, so we do
1626 for (i
= n
- 1; i
> 0; i
--)
1627 b_divisor
[i
] = (b_divisor
[i
] << s
)
1628 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1629 b_divisor
[0] = b_divisor
[0] << s
;
1631 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1632 for (i
= m
- 1; i
> 0; i
--)
1633 b_dividend
[i
] = (b_dividend
[i
] << s
)
1634 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1635 b_dividend
[0] = b_dividend
[0] << s
;
1639 for (j
= m
- n
; j
>= 0; j
--)
1641 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1642 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1644 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1647 rhat
+= b_divisor
[n
-1];
1652 /* Multiply and subtract. */
1654 for (i
= 0; i
< n
; i
++)
1656 p
= qhat
* b_divisor
[i
];
1657 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1658 b_dividend
[i
+ j
] = t
;
1659 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1660 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1662 t
= b_dividend
[j
+n
] - k
;
1663 b_dividend
[j
+n
] = t
;
1665 b_quotient
[j
] = qhat
;
1670 for (i
= 0; i
< n
; i
++)
1672 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1673 b_dividend
[i
+j
] = t
;
1674 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1676 b_dividend
[j
+n
] += k
;
1680 for (i
= 0; i
< n
; i
++)
1681 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1682 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1684 for (i
= 0; i
< n
; i
++)
1685 b_remainder
[i
] = b_dividend
[i
];
1689 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1690 the result. If QUOTIENT is nonnull, store the value of the quotient
1691 there and return the number of blocks in it. The return value is
1692 not defined otherwise. If REMAINDER is nonnull, store the value
1693 of the remainder there and store the number of blocks in
1694 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1695 the division overflowed. */
1697 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1698 HOST_WIDE_INT
*remainder
,
1699 const HOST_WIDE_INT
*dividend_val
,
1700 unsigned int dividend_len
, unsigned int dividend_prec
,
1701 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1702 unsigned int divisor_prec
, signop sgn
,
1705 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1706 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1707 unsigned HOST_HALF_WIDE_INT
1708 b_quotient
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1709 unsigned HOST_HALF_WIDE_INT
1710 b_remainder
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1711 unsigned HOST_HALF_WIDE_INT
1712 b_dividend
[(4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1713 unsigned HOST_HALF_WIDE_INT
1714 b_divisor
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1716 bool dividend_neg
= false;
1717 bool divisor_neg
= false;
1718 bool overflow
= false;
1719 wide_int neg_dividend
, neg_divisor
;
1721 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1723 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1728 /* The smallest signed number / -1 causes overflow. The dividend_len
1729 check is for speed rather than correctness. */
1731 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1733 && wi::only_sign_bit_p (dividend
))
1736 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1737 (signed min / -1) has the same representation as the orignal dividend.
1738 We have traditionally made division by zero act as division by one,
1739 so there too we use the original dividend. */
1750 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1751 quotient
[i
] = dividend_val
[i
];
1752 return dividend_len
;
1758 /* Do it on the host if you can. */
1760 && wi::fits_shwi_p (dividend
)
1761 && wi::fits_shwi_p (divisor
))
1763 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1764 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1766 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1768 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1771 quotient
[0] = HOST_WIDE_INT_MIN
;
1784 quotient
[0] = o0
/ o1
;
1787 remainder
[0] = o0
% o1
;
1795 && wi::fits_uhwi_p (dividend
)
1796 && wi::fits_uhwi_p (divisor
))
1798 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1799 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1802 quotient
[0] = o0
/ o1
;
1805 remainder
[0] = o0
% o1
;
1811 /* Make the divisor and dividend positive and remember what we
1815 if (wi::neg_p (dividend
))
1817 neg_dividend
= -dividend
;
1818 dividend
= neg_dividend
;
1819 dividend_neg
= true;
1821 if (wi::neg_p (divisor
))
1823 neg_divisor
= -divisor
;
1824 divisor
= neg_divisor
;
1829 wi_unpack (b_dividend
, dividend
.get_val (), dividend
.get_len (),
1830 dividend_blocks_needed
, dividend_prec
, sgn
);
1831 wi_unpack (b_divisor
, divisor
.get_val (), divisor
.get_len (),
1832 divisor_blocks_needed
, divisor_prec
, sgn
);
1834 m
= dividend_blocks_needed
;
1836 while (m
> 1 && b_dividend
[m
- 1] == 0)
1839 n
= divisor_blocks_needed
;
1840 while (n
> 1 && b_divisor
[n
- 1] == 0)
1843 memset (b_quotient
, 0, sizeof (b_quotient
));
1845 divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
1847 unsigned int quotient_len
= 0;
1850 wi_pack ((unsigned HOST_WIDE_INT
*) quotient
, b_quotient
, m
);
1851 quotient_len
= canonize (quotient
, (m
+ 1) / 2, dividend_prec
);
1852 /* The quotient is neg if exactly one of the divisor or dividend is
1854 if (dividend_neg
!= divisor_neg
)
1855 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
1856 quotient_len
, dividend_prec
,
1862 wi_pack ((unsigned HOST_WIDE_INT
*) remainder
, b_remainder
, n
);
1863 *remainder_len
= canonize (remainder
, (n
+ 1) / 2, dividend_prec
);
1864 /* The remainder is always the same sign as the dividend. */
1866 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
1867 *remainder_len
, dividend_prec
,
1871 return quotient_len
;
1875 * Shifting, rotating and extraction.
1878 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1879 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1881 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1882 unsigned int xlen
, unsigned int precision
,
1885 /* Split the shift into a whole-block shift and a subblock shift. */
1886 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1887 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1889 /* The whole-block shift fills with zeros. */
1890 unsigned int len
= BLOCKS_NEEDED (precision
);
1891 for (unsigned int i
= 0; i
< skip
; ++i
)
1894 /* It's easier to handle the simple block case specially. */
1895 if (small_shift
== 0)
1896 for (unsigned int i
= skip
; i
< len
; ++i
)
1897 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
1900 /* The first unfilled output block is a left shift of the first
1901 block in XVAL. The other output blocks contain bits from two
1902 consecutive input blocks. */
1903 unsigned HOST_WIDE_INT carry
= 0;
1904 for (unsigned int i
= skip
; i
< len
; ++i
)
1906 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
1907 val
[i
] = (x
<< small_shift
) | carry
;
1908 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1911 return canonize (val
, len
, precision
);
1914 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1915 number of blocks in VAL. The input has XPRECISION bits and the
1916 output has XPRECISION - SHIFT bits. */
1918 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1919 unsigned int xlen
, unsigned int xprecision
,
1922 /* Split the shift into a whole-block shift and a subblock shift. */
1923 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1924 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1926 /* Work out how many blocks are needed to store the significant bits
1927 (excluding the upper zeros or signs). */
1928 unsigned int len
= BLOCKS_NEEDED (xprecision
- shift
);
1930 /* It's easier to handle the simple block case specially. */
1931 if (small_shift
== 0)
1932 for (unsigned int i
= 0; i
< len
; ++i
)
1933 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
1936 /* Each output block but the last is a combination of two input blocks.
1937 The last block is a right shift of the last block in XVAL. */
1938 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
1939 for (unsigned int i
= 0; i
< len
; ++i
)
1941 val
[i
] = curr
>> small_shift
;
1942 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
1943 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1949 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1950 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1951 VAL has PRECISION bits. */
1953 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1954 unsigned int xlen
, unsigned int xprecision
,
1955 unsigned int precision
, unsigned int shift
)
1957 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1959 /* The value we just created has precision XPRECISION - SHIFT.
1960 Zero-extend it to wider precisions. */
1961 if (precision
> xprecision
- shift
)
1963 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1965 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
1966 else if (val
[len
- 1] < 0)
1968 /* Add a new block with a zero. */
1973 return canonize (val
, len
, precision
);
1976 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1977 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1978 VAL has PRECISION bits. */
1980 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1981 unsigned int xlen
, unsigned int xprecision
,
1982 unsigned int precision
, unsigned int shift
)
1984 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1986 /* The value we just created has precision XPRECISION - SHIFT.
1987 Sign-extend it to wider types. */
1988 if (precision
> xprecision
- shift
)
1990 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1992 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
1994 return canonize (val
, len
, precision
);
1997 /* Return the number of leading (upper) zeros in X. */
1999 wi::clz (const wide_int_ref
&x
)
2001 /* Calculate how many bits there above the highest represented block. */
2002 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2004 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2006 /* The upper -COUNT bits of HIGH are not part of the value.
2008 high
= (high
<< -count
) >> -count
;
2009 else if (x
.sign_mask () < 0)
2010 /* The upper bit is set, so there are no leading zeros. */
2013 /* We don't need to look below HIGH. Either HIGH is nonzero,
2014 or the top bit of the block below is nonzero; clz_hwi is
2015 HOST_BITS_PER_WIDE_INT in the latter case. */
2016 return count
+ clz_hwi (high
);
2019 /* Return the number of redundant sign bits in X. (That is, the number
2020 of bits immediately below the sign bit that have the same value as
2023 wi::clrsb (const wide_int_ref
&x
)
2025 /* Calculate how many bits there above the highest represented block. */
2026 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2028 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2029 unsigned HOST_WIDE_INT mask
= -1;
2032 /* The upper -COUNT bits of HIGH are not part of the value.
2033 Clear them from both MASK and HIGH. */
2038 /* If the top bit is 1, count the number of leading 1s. If the top
2039 bit is zero, count the number of leading zeros. */
2040 if (high
> mask
/ 2)
2043 /* There are no sign bits below the top block, so we don't need to look
2044 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2046 return count
+ clz_hwi (high
) - 1;
2049 /* Return the number of trailing (lower) zeros in X. */
2051 wi::ctz (const wide_int_ref
&x
)
2053 if (x
.len
== 1 && x
.ulow () == 0)
2056 /* Having dealt with the zero case, there must be a block with a
2057 nonzero bit. We don't care about the bits above the first 1. */
2059 while (x
.val
[i
] == 0)
2061 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2064 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2067 wi::exact_log2 (const wide_int_ref
&x
)
2069 /* Reject cases where there are implicit -1 blocks above HIGH. */
2070 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2073 /* Set CRUX to the index of the entry that should be nonzero.
2074 If the top block is zero then the next lowest block (if any)
2075 must have the high bit set. */
2076 unsigned int crux
= x
.len
- 1;
2077 if (crux
> 0 && x
.val
[crux
] == 0)
2080 /* Check that all lower blocks are zero. */
2081 for (unsigned int i
= 0; i
< crux
; ++i
)
2085 /* Get a zero-extended form of block CRUX. */
2086 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2087 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2088 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2090 /* Now it's down to whether HWI is a power of 2. */
2091 int res
= ::exact_log2 (hwi
);
2093 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2097 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2099 wi::floor_log2 (const wide_int_ref
&x
)
2101 return x
.precision
- 1 - clz (x
);
2104 /* Return the index of the first (lowest) set bit in X, counting from 1.
2105 Return 0 if X is 0. */
2107 wi::ffs (const wide_int_ref
&x
)
2109 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2112 /* Return true if sign-extending X to have precision PRECISION would give
2113 the minimum signed value at that precision. */
2115 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2117 return ctz (x
) + 1 == int (precision
);
2120 /* Return true if X represents the minimum signed value. */
2122 wi::only_sign_bit_p (const wide_int_ref
&x
)
2124 return only_sign_bit_p (x
, x
.precision
);
2128 * Private utilities.
2131 void gt_ggc_mx (widest_int
*) { }
2132 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2133 void gt_pch_nx (widest_int
*) { }
2135 template void wide_int::dump () const;
2136 template void generic_wide_int
<wide_int_ref_storage
<false> >::dump () const;
2137 template void generic_wide_int
<wide_int_ref_storage
<true> >::dump () const;
2138 template void offset_int::dump () const;
2139 template void widest_int::dump () const;